Skip to main content

slabbin/
lib.rs

1//! This allocator be straight up *slabbin'*.
2//!
3//! A slab allocator is in theory the perfect one, if it's applicable. It's blazingly fast and
4//! avoids the issue of external fragmentation, but unlike the bump allocator it can free
5//! individual allocations, and unlike the stack allocator it can free them in an arbitrary order.
6//! The tradeoff here is that all allocations must have the same, fixed layout.
7//!
8//! The allocator in this crate is totally unsafe, and meant specifically for use cases where
9//! stable addresses are required: when you allocate a slot, you get a pointer that stays valid
10//! until you deallocate it (or drop the allocator). Example use cases include linked structures or
11//! self-referential structures. If you don't have this requirement you may consider using [`slab`]
12//! or [`typed-arena`] for example as a safe alternative.
13//!
14//! # Slabs
15//!
16//! A slab is a pre-allocated contiguous chunk of memory containing *slots*. Each slot can either
17//! be free or occupied. A slab always starts out with all slots free, and new slots are given out
18//! on each allocation, until they run out, at which point a new slab is allocated. Slots that are
19//! deallocated are chained together in a linked list. Due to this, allocation amounts to 3
20//! operations in the best case and ~8 in the worse case. Deallocation is always 3 operations.
21//!
22//! [`slab`]: https://crates.io/crates/slab
23//! [`typed-arena`]: https://crates.io/crates/typed-arena
24
25#![allow(clippy::inline_always)]
26#![forbid(unsafe_op_in_unsafe_fn)]
27#![no_std]
28
29extern crate alloc;
30
31use alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout};
32use core::{
33    cell::Cell,
34    fmt,
35    mem::{self, ManuallyDrop, MaybeUninit},
36    num::NonZeroUsize,
37    ptr::{self, NonNull},
38};
39
40/// An efficient slab allocator with stable addresses.
41///
42/// See also [the crate-level documentation] for more information about slab allocation.
43///
44/// # Examples
45///
46/// A doubly linked list that's backed by slabs:
47///
48/// ```
49/// use slabbin::SlabAllocator;
50/// use std::ptr::{self, NonNull};
51///
52/// struct LinkedList<T> {
53///     head: Option<NonNull<Node<T>>>,
54///     tail: Option<NonNull<Node<T>>>,
55///     allocator: SlabAllocator<Node<T>>,
56/// }
57///
58/// impl<T> LinkedList<T> {
59///     fn new(slab_capacity: usize) -> Self {
60///         LinkedList {
61///             head: None,
62///             tail: None,
63///             allocator: SlabAllocator::new(slab_capacity),
64///         }
65///     }
66///
67///     fn push_back(&mut self, value: T) {
68///         let node = self.allocator.allocate();
69///
70///         // SAFETY: `SlabAllocator::allocate` gives out pointers that are valid for writes (but
71///         // **not** for reads).
72///         unsafe { ptr::write(node.as_ptr(), Node::new(value)) };
73///
74///         if let Some(tail) = self.tail {
75///             unsafe { (*tail.as_ptr()).next = Some(node) };
76///             unsafe { (*node.as_ptr()).prev = Some(tail) };
77///         } else {
78///             self.head = Some(node);
79///         }
80///
81///         self.tail = Some(node);
82///     }
83///
84///     fn pop_back(&mut self) -> Option<T> {
85///         if let Some(tail) = self.tail {
86///             if let Some(prev) = unsafe { (*tail.as_ptr()).prev } {
87///                 unsafe { (*prev.as_ptr()).next = None };
88///                 self.tail = Some(prev);
89///             } else {
90///                 self.head = None;
91///                 self.tail = None;
92///             }
93///
94///             // SAFETY: We can move out of the value because the node will be deallocated.
95///             let value = unsafe { ptr::read(ptr::addr_of_mut!((*tail.as_ptr()).value)) };
96///
97///             // SAFETY: We allocated this node, and have just removed all linkage to it so that
98///             // it can't be accessed again.
99///             unsafe { self.allocator.deallocate(tail) };
100///
101///             Some(value)
102///         } else {
103///             None
104///         }
105///     }
106/// }
107///
108/// struct Node<T> {
109///     prev: Option<NonNull<Self>>,
110///     next: Option<NonNull<Self>>,
111///     value: T,
112/// }
113///
114/// impl<T> Node<T> {
115///     fn new(value: T) -> Self {
116///         Node {
117///             prev: None,
118///             next: None,
119///             value,
120///         }
121///     }
122/// }
123///
124/// let mut list = LinkedList::new(64);
125/// list.push_back(42);
126/// list.push_back(12);
127/// list.push_back(69);
128///
129/// assert_eq!(list.pop_back(), Some(69));
130/// assert_eq!(list.pop_back(), Some(12));
131/// assert_eq!(list.pop_back(), Some(42));
132/// assert_eq!(list.pop_back(), None);
133/// ```
134///
135/// [the crate-level documentation]: self
136pub struct SlabAllocator<T> {
137    free_list_head: Cell<Option<NonNull<Slot<T>>>>,
138    slab_list_head: Cell<Option<NonNull<Slab<T>>>>,
139    /// The next free slab that should be used, if any.
140    slab_list_next: Cell<Option<NonNull<Slab<T>>>>,
141    /// Points to where the free slots start in a fresh slab. If the slab list is empty then this
142    /// is dangling.
143    free_start: Cell<NonNull<Slot<T>>>,
144    /// Points to where the free slots end in a fresh slab. If the slab list is empty then this is
145    /// dangling.
146    free_end: Cell<NonNull<Slot<T>>>,
147    slab_capacity: NonZeroUsize,
148}
149
150// SAFETY: The pointers we hold are not referencing the stack or TLS or anything like that; they
151// are all heap allocations, and therefore sending them to another thread is safe. Note that it is
152// safe to do this regardless of whether `T` is `Send`: the allocator itself doesn't own any `T`s;
153// the user does and manages their lifetime explicitly by allocating, initializing, deinitializing
154// and then deallocating them. It is therefore their responsibility that a type that is `!Send`
155// doesn't escape the thread it was created on (just like with any other allocator, it just so
156// happens that this one is parametrized, but it doesn't have to be). Pointers are always `!Send`
157// so the user would have to use unsafe code to achieve something like that in the first place.
158unsafe impl<T> Send for SlabAllocator<T> {}
159
160impl<T> SlabAllocator<T> {
161    /// Creates a new `SlabAllocator`.
162    ///
163    /// `slab_capacity` is the number of slots in a [slab].
164    ///
165    /// No memory is allocated until you call one of the `allocate` methods.
166    ///
167    /// # Panics
168    ///
169    /// Panics if `slab_capacity` is zero.
170    ///
171    /// [slab]: self#slabs
172    #[inline]
173    #[must_use]
174    pub const fn new(slab_capacity: usize) -> Self {
175        if let Some(slab_capacity) = NonZeroUsize::new(slab_capacity) {
176            let dangling = NonNull::dangling();
177
178            SlabAllocator {
179                free_list_head: Cell::new(None),
180                slab_list_head: Cell::new(None),
181                slab_list_next: Cell::new(None),
182                free_start: Cell::new(dangling),
183                free_end: Cell::new(dangling),
184                slab_capacity,
185            }
186        } else {
187            panic!("`slab_capacity` must be non-zero");
188        }
189    }
190
191    /// Allocates a new slot for `T`. The memory referred to by the returned pointer needs to be
192    /// initialized before creating a reference to it.
193    ///
194    /// This operation is *O*(1).
195    ///
196    /// # Panics
197    ///
198    /// Panics if the size of a slab exceeds `isize::MAX` bytes.
199    #[inline(always)]
200    #[must_use]
201    pub fn allocate(&self) -> NonNull<T> {
202        let ptr = if let Some(ptr) = self.allocate_fast() {
203            ptr
204        } else if let Some(ptr) = self.allocate_fast2() {
205            ptr
206        } else {
207            self.allocate_slow()
208                .unwrap_or_else(|_| handle_alloc_error(self.slab_layout()))
209        };
210
211        // We can safely hand the user a pointer to `T`, which is valid for writes of `T`, seeing
212        // as `Slot<T>` is a union with `T` as one of its fields. That means that the slot must
213        // have a layout that fits `T` as we used `Layout` for the layout calculation of the slots
214        // array.
215        ptr.cast::<T>()
216    }
217
218    /// Allocates a new slot for `T`. The memory referred to by the returned pointer needs to be
219    /// initialized before creating a reference to it.
220    ///
221    /// This operation is *O*(1).
222    ///
223    /// # Errors
224    ///
225    /// Returns an error if the global allocator returns an error.
226    ///
227    /// # Panics
228    ///
229    /// Panics if the size of a slab exceeds `isize::MAX` bytes.
230    #[inline(always)]
231    pub fn try_allocate(&self) -> Result<NonNull<T>, AllocError> {
232        let ptr = if let Some(ptr) = self.allocate_fast() {
233            ptr
234        } else if let Some(ptr) = self.allocate_fast2() {
235            ptr
236        } else {
237            self.allocate_slow()?
238        };
239
240        Ok(ptr.cast::<T>())
241    }
242
243    #[inline(always)]
244    fn allocate_fast(&self) -> Option<NonNull<Slot<T>>> {
245        let head = self.free_list_head.get()?;
246
247        // SAFETY: Each node in the free-list is, by definition, free and therefore must have been
248        // initialized with the `next_free` union field when linking it into the list.
249        let next = unsafe { (*head.as_ptr()).next_free };
250
251        self.free_list_head.set(next);
252
253        // Make Miri comprehend that a slot must be initialized before reading it, even in cases
254        // where a slot is reallocated and has been initialized before.
255        if cfg!(miri) {
256            let ptr = head.as_ptr().cast::<MaybeUninit<Slot<T>>>();
257
258            // SAFETY: `MaybeUninit<Slot<T>>` and `Slot<T>` have the same layout.
259            unsafe { ptr.write(MaybeUninit::uninit()) };
260        }
261
262        // We can safely hand the user a pointer to the head of the free-list, seeing as we removed
263        // it from the list so that it cannot be handed out again.
264        Some(head)
265    }
266
267    #[inline(always)]
268    fn allocate_fast2(&self) -> Option<NonNull<Slot<T>>> {
269        let ptr = self.free_start.get();
270
271        if ptr < self.free_end.get() {
272            // SAFETY:
273            // * We know the offset must be in bounds of the allocated object because we just
274            //   checked that `free_start` doesn't refer to the end of the allocated object yet:
275            //   * `free_start` and `free_end` are initialized such that they refer to the start
276            //     and end of the slots array respectively.
277            //   * If the pointers haven't been initialized yet, then they are both dangling and
278            //     equal, which means the the above condition trivially wouldn't hold.
279            //   * This is the only place where `free_start` is incremented, always by 1.
280            //     `free_end` is unchanging until a new slab is allocated.
281            // * The computed offset cannot overflow an `isize` because we used `Layout` for the
282            //   layout calculation.
283            // * The computed offset cannot wrap around the address space for the same reason as
284            //   the previous.
285            let free_start = unsafe { NonNull::new_unchecked(ptr.as_ptr().add(1)) };
286
287            self.free_start.set(free_start);
288
289            // We can safety hand the user a pointer to the previous free-start, as we incremented
290            // it such that the same slot cannot be handed out again.
291            Some(ptr)
292        } else {
293            None
294        }
295    }
296
297    #[cold]
298    fn allocate_slow(&self) -> Result<NonNull<Slot<T>>, AllocError> {
299        let slab = if let Some(slab) = self.slab_list_next.get() {
300            // SAFETY: `slab` being in the slab list means it refers to a currently allocated slab
301            // and that its header is properly initialized.
302            let next = unsafe { (*slab.as_ptr()).next };
303
304            self.slab_list_next.set(next);
305
306            slab
307        } else {
308            self.add_slab()?
309        };
310
311        // SAFETY: We either got an existing slab or successfully allocated a new one above, and a
312        // slab always includes at least the slab header, so the offset must be in range.
313        let slots = unsafe { NonNull::new_unchecked(ptr::addr_of_mut!((*slab.as_ptr()).slots)) };
314
315        // SAFETY:
316        // * We know that the offset must be in bounds of the allocated object because we allocated
317        //   `self.slab_capacity` slots and `self.slab_capacity` is non-zero.
318        // * The computed offset cannot overflow an `isize` because we used `Layout` for the layout
319        //   calculation.
320        // * The computed offset cannot wrap around the address space for the same reason as the
321        //   previous.
322        let free_start = unsafe { NonNull::new_unchecked(slots.as_ptr().add(1)) };
323
324        // SAFETY: Same as the previous.
325        let free_end =
326            unsafe { NonNull::new_unchecked(slots.as_ptr().add(self.slab_capacity.get())) };
327
328        self.free_start.set(free_start);
329        self.free_end.set(free_end);
330
331        // We can safely hand the user a pointer to the first slot, seeing as we set the free-start
332        // to the next slot, so that the same slot cannot be handed out again.
333        Ok(slots)
334    }
335
336    fn add_slab(&self) -> Result<NonNull<Slab<T>>, AllocError> {
337        // SAFETY: Slabs always have a non-zero-sized layout.
338        let bytes = unsafe { alloc(self.slab_layout()) };
339
340        let slab = NonNull::new(bytes.cast::<Slab<T>>()).ok_or(AllocError)?;
341
342        // SAFETY: We checked that the pointer is non-null, which means allocation succeeded, and
343        // we've been given at least the slab header.
344        unsafe { (*slab.as_ptr()).next = self.slab_list_head.get() };
345
346        self.slab_list_head.set(Some(slab));
347
348        Ok(slab)
349    }
350
351    fn slab_layout(&self) -> Layout {
352        Layout::new::<Option<NonNull<Slab<T>>>>()
353            .extend(Layout::array::<Slot<T>>(self.slab_capacity.get()).unwrap())
354            .unwrap()
355            .0
356            .pad_to_align()
357    }
358
359    /// Deallocates the slot at the given `ptr`. The `T` is not dropped before deallocating, you
360    /// must do so yourself before calling this function if `T` has drop glue (unless you want to
361    /// leak).
362    ///
363    /// This operation is *O*(1).
364    ///
365    /// # Safety
366    ///
367    /// `ptr` must refer to a slot that's **currently allocated** by `self`.
368    #[inline(always)]
369    pub unsafe fn deallocate(&self, ptr: NonNull<T>) {
370        debug_assert!(
371            self.contains(ptr),
372            "attempted to deallocate a slot that does not belong to this allocator",
373        );
374
375        let ptr = ptr.cast::<Slot<T>>();
376
377        // SAFETY: The caller must ensure that `ptr` refers to a currently allocated slot, meaning
378        // that `ptr` was derived from one of our slabs using `allocate`, making it a valid ponter.
379        // We can overwrite whatever was in the slot before, because nothing must access a pointer
380        // after its memory block has been deallocated (as that would constitute a Use-After-Free).
381        // In our case we reuse the memory for the free-list linkage.
382        unsafe { (*ptr.as_ptr()).next_free = self.free_list_head.get() };
383
384        self.free_list_head.set(Some(ptr));
385    }
386
387    /// Resets the allocator, deallocating all currently allocated slots at once.
388    ///
389    /// This operation is *O*(1).
390    ///
391    /// # Safety
392    ///
393    /// This function semantically behaves as if [`deallocate`] was called for every currently
394    /// allocated slot.
395    ///
396    /// [`deallocate`]: Self::deallocate
397    pub unsafe fn reset(&self) {
398        self.free_list_head.set(None);
399        self.slab_list_next.set(self.slab_list_head.get());
400        let dangling = NonNull::dangling();
401        self.free_start.set(dangling);
402        self.free_end.set(dangling);
403    }
404
405    /// Returns `true` if `ptr` refers to a currently allocated slot of `self`.
406    ///
407    /// This operation is *O*(*n*). It's useful for debug assertions but maybe not much else.
408    pub fn contains(&self, ptr: NonNull<T>) -> bool {
409        let ptr = ptr.cast::<Slot<T>>();
410
411        // SAFETY: `*mut Slot<T>` and `usize` have the same layout.
412        #[allow(clippy::transmutes_expressible_as_ptr_casts)]
413        let addr = unsafe { mem::transmute::<*mut Slot<T>, usize>(ptr.as_ptr()) };
414
415        // TODO: Replace with `<*mut Slot<T>>::is_aligned` once we release a breaking version.
416        if addr & (mem::align_of::<Slot<T>>() - 1) != 0 {
417            return false;
418        }
419
420        let mut head = self.slab_list_head.get();
421
422        loop {
423            let slab = if let Some(slab) = head {
424                slab
425            } else {
426                return false;
427            };
428
429            // Slabs starting from `self.slab_list_next` have no allocated slots, so if we haven't
430            // found the slot yet, it's not currently allocated.
431            if Some(slab) == self.slab_list_next.get() {
432                return false;
433            }
434
435            // SAFETY: `slab` being in the slab list means it refers to a currently allocated slab
436            // and that contains at least the header, so the offset must be in range.
437            let slots_start = unsafe { ptr::addr_of_mut!((*slab.as_ptr()).slots) };
438
439            // SAFETY:
440            // * We know that the offset must be in bouds of the allocated object because we
441            //   allocated `slab_capacity` slots.
442            // * The computed offset cannot overflow an `isize` because we used `Layout` for the
443            //   layout calculation.
444            // * The computed offset cannot wrap around the address space for the same reason as
445            //   the previous.
446            let slots_end = unsafe { slots_start.add(self.slab_capacity.get()) };
447
448            if (slots_start..slots_end).contains(&ptr.as_ptr()) {
449                break;
450            }
451
452            // SAFETY: `slab` being in the slab list means it refers to a currently allocated slab
453            // and that its header is properly initialized.
454            head = unsafe { (*slab.as_ptr()).next };
455        }
456
457        if (self.free_start.get()..self.free_end.get()).contains(&ptr) {
458            return false;
459        }
460
461        let mut head = self.free_list_head.get();
462
463        while let Some(slot) = head {
464            if slot == ptr {
465                return false;
466            }
467
468            // SAFETY: Each node in the free-list is, by definition, free and therefore must have
469            // been initialized with the `next_free` union field when linking it into the list.
470            head = unsafe { (*slot.as_ptr()).next_free };
471        }
472
473        true
474    }
475
476    fn slab_count(&self) -> usize {
477        let mut head = self.slab_list_head.get();
478        let mut count = 0;
479
480        while let Some(slab) = head {
481            // SAFETY: `slab` being in the slab list means it refers to a currently allocated slab
482            // and that its header is properly initialized.
483            head = unsafe { (*slab.as_ptr()).next };
484
485            count += 1;
486        }
487
488        count
489    }
490}
491
492#[allow(clippy::missing_fields_in_debug)]
493impl<T> fmt::Debug for SlabAllocator<T> {
494    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
495        f.debug_struct("SlabAllocator")
496            .field("slab_count", &self.slab_count())
497            .field("slab_capacity", &self.slab_capacity)
498            .finish()
499    }
500}
501
502impl<T> Drop for SlabAllocator<T> {
503    fn drop(&mut self) {
504        let slab_layout = self.slab_layout();
505
506        while let Some(slab) = self.slab_list_head.get() {
507            // SAFETY: `slab` being in the slab list means it refers to a currently allocated slab
508            // and that its header is properly initialized.
509            *self.slab_list_head.get_mut() = unsafe { (*slab.as_ptr()).next };
510
511            // SAFETY:
512            // * `slab` being in the slab list means it refers to a currently allocated slab.
513            // * `self.slab_layout()` returns the same layout that was used to allocate the slab.
514            unsafe { dealloc(slab.as_ptr().cast(), slab_layout) };
515        }
516    }
517}
518
519#[repr(C)]
520struct Slab<T> {
521    next: Option<NonNull<Self>>,
522    /// The actual field type is `[Slot<T>]` except that we want a thin pointer.
523    slots: Slot<T>,
524}
525
526#[repr(C)]
527union Slot<T> {
528    next_free: Option<NonNull<Self>>,
529    value: ManuallyDrop<T>,
530}
531
532/// Indicates that allocating memory using the global allocator failed.
533#[derive(Clone, Copy, Debug, PartialEq, Eq)]
534pub struct AllocError;
535
536impl fmt::Display for AllocError {
537    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
538        f.write_str("memory allocation failed")
539    }
540}
541
542#[cfg(test)]
543mod tests {
544    use super::*;
545    use core::mem;
546
547    #[test]
548    fn basic_usage1() {
549        let allocator = SlabAllocator::<i32>::new(2);
550
551        let mut x = allocator.allocate();
552        unsafe { x.as_ptr().write(69) };
553
554        let mut y = allocator.allocate();
555        unsafe { y.as_ptr().write(42) };
556
557        assert_eq!(allocator.slab_count(), 1);
558
559        mem::swap(unsafe { x.as_mut() }, unsafe { y.as_mut() });
560
561        unsafe { allocator.deallocate(x) };
562
563        let mut x2 = allocator.allocate();
564        unsafe { x2.as_ptr().write(12) };
565
566        assert_eq!(allocator.slab_count(), 1);
567
568        mem::swap(unsafe { y.as_mut() }, unsafe { x2.as_mut() });
569
570        unsafe { allocator.deallocate(y) };
571        unsafe { allocator.deallocate(x2) };
572    }
573
574    #[test]
575    fn basic_usage2() {
576        let allocator = SlabAllocator::<i32>::new(1);
577
578        let mut x = allocator.allocate();
579        unsafe { x.as_ptr().write(1) };
580
581        let mut y = allocator.allocate();
582        unsafe { y.as_ptr().write(2) };
583
584        let mut z = allocator.allocate();
585        unsafe { z.as_ptr().write(3) };
586
587        assert_eq!(allocator.slab_count(), 3);
588
589        mem::swap(unsafe { x.as_mut() }, unsafe { y.as_mut() });
590        mem::swap(unsafe { y.as_mut() }, unsafe { z.as_mut() });
591        mem::swap(unsafe { z.as_mut() }, unsafe { x.as_mut() });
592
593        unsafe { allocator.deallocate(y) };
594
595        let mut y2 = allocator.allocate();
596        unsafe { y2.as_ptr().write(20) };
597
598        assert_eq!(allocator.slab_count(), 3);
599
600        mem::swap(unsafe { x.as_mut() }, unsafe { y2.as_mut() });
601
602        unsafe { allocator.deallocate(x) };
603        unsafe { allocator.deallocate(z) };
604
605        let mut x2 = allocator.allocate();
606        unsafe { x2.as_ptr().write(10) };
607
608        mem::swap(unsafe { y2.as_mut() }, unsafe { x2.as_mut() });
609
610        let mut z2 = allocator.allocate();
611        unsafe { z2.as_ptr().write(30) };
612
613        assert_eq!(allocator.slab_count(), 3);
614
615        mem::swap(unsafe { x2.as_mut() }, unsafe { z2.as_mut() });
616
617        unsafe { allocator.deallocate(x2) };
618
619        mem::swap(unsafe { z2.as_mut() }, unsafe { y2.as_mut() });
620
621        unsafe { allocator.deallocate(y2) };
622        unsafe { allocator.deallocate(z2) };
623    }
624
625    #[test]
626    fn basic_usage3() {
627        let allocator = SlabAllocator::<i32>::new(2);
628
629        let mut x = allocator.allocate();
630        unsafe { x.as_ptr().write(1) };
631
632        let mut y = allocator.allocate();
633        unsafe { y.as_ptr().write(2) };
634
635        assert_eq!(allocator.slab_count(), 1);
636
637        mem::swap(unsafe { x.as_mut() }, unsafe { y.as_mut() });
638
639        let z = allocator.allocate();
640        unsafe { z.as_ptr().write(3) };
641
642        assert_eq!(allocator.slab_count(), 2);
643
644        unsafe { allocator.deallocate(x) };
645        unsafe { allocator.deallocate(z) };
646
647        let mut z2 = allocator.allocate();
648        unsafe { z2.as_ptr().write(30) };
649
650        let mut x2 = allocator.allocate();
651        unsafe { x2.as_ptr().write(10) };
652
653        assert_eq!(allocator.slab_count(), 2);
654
655        mem::swap(unsafe { x2.as_mut() }, unsafe { z2.as_mut() });
656
657        unsafe { allocator.deallocate(x2) };
658        unsafe { allocator.deallocate(y) };
659        unsafe { allocator.deallocate(z2) };
660    }
661
662    #[test]
663    fn reusing_slots1() {
664        let allocator = SlabAllocator::<i32>::new(2);
665
666        let x = allocator.allocate();
667        let y = allocator.allocate();
668
669        unsafe { allocator.deallocate(y) };
670
671        let y2 = allocator.allocate();
672        assert_eq!(y2, y);
673
674        unsafe { allocator.deallocate(x) };
675
676        let x2 = allocator.allocate();
677        assert_eq!(x2, x);
678
679        unsafe { allocator.deallocate(y2) };
680        unsafe { allocator.deallocate(x2) };
681    }
682
683    #[test]
684    fn reusing_slots2() {
685        let allocator = SlabAllocator::<i32>::new(1);
686
687        let x = allocator.allocate();
688
689        unsafe { allocator.deallocate(x) };
690
691        let x2 = allocator.allocate();
692        assert_eq!(x, x2);
693
694        let y = allocator.allocate();
695        let z = allocator.allocate();
696
697        unsafe { allocator.deallocate(y) };
698        unsafe { allocator.deallocate(x2) };
699
700        let x3 = allocator.allocate();
701        let y2 = allocator.allocate();
702        assert_eq!(x3, x2);
703        assert_eq!(y2, y);
704
705        unsafe { allocator.deallocate(x3) };
706        unsafe { allocator.deallocate(y2) };
707        unsafe { allocator.deallocate(z) };
708    }
709
710    #[test]
711    fn reusing_slots3() {
712        let allocator = SlabAllocator::<i32>::new(2);
713
714        let x = allocator.allocate();
715        let y = allocator.allocate();
716
717        unsafe { allocator.deallocate(x) };
718        unsafe { allocator.deallocate(y) };
719
720        let y2 = allocator.allocate();
721        let x2 = allocator.allocate();
722        let z = allocator.allocate();
723        assert_eq!(x2, x);
724        assert_eq!(y2, y);
725
726        unsafe { allocator.deallocate(x2) };
727        unsafe { allocator.deallocate(z) };
728        unsafe { allocator.deallocate(y2) };
729
730        let y3 = allocator.allocate();
731        let z2 = allocator.allocate();
732        let x3 = allocator.allocate();
733        assert_eq!(y3, y2);
734        assert_eq!(z2, z);
735        assert_eq!(x3, x2);
736
737        unsafe { allocator.deallocate(x3) };
738        unsafe { allocator.deallocate(y3) };
739        unsafe { allocator.deallocate(z2) };
740    }
741
742    #[test]
743    fn reusing_slots4() {
744        let allocator = SlabAllocator::<i32>::new(2);
745
746        let x = allocator.allocate();
747        let y = allocator.allocate();
748
749        unsafe { allocator.deallocate(y) };
750
751        let y2 = allocator.allocate();
752        assert_eq!(y2, y);
753
754        unsafe { allocator.reset() };
755
756        let x2 = allocator.allocate();
757        let y3 = allocator.allocate();
758        assert_eq!(x2, x);
759        assert_eq!(y3, y2);
760
761        unsafe { allocator.deallocate(y3) };
762        unsafe { allocator.deallocate(x2) };
763    }
764
765    #[test]
766    fn reusing_slots5() {
767        let allocator = SlabAllocator::<i32>::new(1);
768
769        let x = allocator.allocate();
770
771        unsafe { allocator.deallocate(x) };
772
773        let x2 = allocator.allocate();
774        assert_eq!(x, x2);
775
776        let y = allocator.allocate();
777        let z = allocator.allocate();
778
779        unsafe { allocator.reset() };
780
781        let z2 = allocator.allocate();
782        let y2 = allocator.allocate();
783        assert_eq!(z2, z);
784        assert_eq!(y2, y);
785
786        unsafe { allocator.deallocate(z2) };
787        unsafe { allocator.deallocate(y2) };
788    }
789
790    #[test]
791    fn reusing_slots6() {
792        let allocator = SlabAllocator::<i32>::new(2);
793
794        let x = allocator.allocate();
795        let y = allocator.allocate();
796
797        unsafe { allocator.reset() };
798
799        let x2 = allocator.allocate();
800        let y2 = allocator.allocate();
801        let z = allocator.allocate();
802        assert_eq!(x2, x);
803        assert_eq!(y2, y);
804
805        unsafe { allocator.deallocate(x2) };
806        unsafe { allocator.deallocate(z) };
807        unsafe { allocator.deallocate(y2) };
808        unsafe { allocator.reset() };
809
810        let z2 = allocator.allocate();
811        let _ = allocator.allocate();
812        let x3 = allocator.allocate();
813        let y3 = allocator.allocate();
814        assert_eq!(z2, z);
815        assert_eq!(x3, x2);
816        assert_eq!(y3, y2);
817
818        unsafe { allocator.reset() };
819    }
820
821    #[test]
822    fn same_slab() {
823        const MAX_DIFF: usize = 2 * mem::size_of::<Slot<i32>>();
824
825        let allocator = SlabAllocator::<i32>::new(3);
826
827        let x = allocator.allocate();
828        let y = allocator.allocate();
829        let z = allocator.allocate();
830
831        assert!((x.as_ptr() as usize).abs_diff(y.as_ptr() as usize) <= MAX_DIFF);
832        assert!((y.as_ptr() as usize).abs_diff(z.as_ptr() as usize) <= MAX_DIFF);
833        assert!((z.as_ptr() as usize).abs_diff(x.as_ptr() as usize) <= MAX_DIFF);
834    }
835
836    #[test]
837    fn different_slabs() {
838        const MIN_DIFF: usize = mem::size_of::<Slab<i32>>();
839
840        let allocator = SlabAllocator::<i32>::new(1);
841
842        let x = allocator.allocate();
843        let y = allocator.allocate();
844        let z = allocator.allocate();
845
846        assert!((x.as_ptr() as usize).abs_diff(y.as_ptr() as usize) >= MIN_DIFF);
847        assert!((y.as_ptr() as usize).abs_diff(z.as_ptr() as usize) >= MIN_DIFF);
848        assert!((z.as_ptr() as usize).abs_diff(x.as_ptr() as usize) >= MIN_DIFF);
849    }
850
851    #[test]
852    #[should_panic]
853    fn zero_slab_capacity() {
854        let _ = SlabAllocator::<()>::new(0);
855    }
856
857    #[cfg(debug_assertions)]
858    #[test]
859    #[should_panic]
860    fn unaligned_ptr() {
861        let allocator = SlabAllocator::<i32>::new(1);
862        let x = allocator.allocate();
863        unsafe { allocator.deallocate(x.byte_add(1)) };
864    }
865
866    #[cfg(debug_assertions)]
867    #[test]
868    #[should_panic]
869    fn foreign_ptr() {
870        let allocator = SlabAllocator::<i32>::new(1);
871        let mut x = 69;
872        unsafe { allocator.deallocate((&mut x).into()) };
873    }
874
875    #[cfg(debug_assertions)]
876    #[test]
877    #[should_panic]
878    fn not_yet_allocated() {
879        let allocator = SlabAllocator::<i32>::new(2);
880        let x = allocator.allocate();
881        unsafe { allocator.deallocate(x.byte_add(size_of::<Slot<i32>>())) };
882    }
883
884    #[cfg(debug_assertions)]
885    #[test]
886    #[should_panic]
887    fn already_deallocated() {
888        let allocator = SlabAllocator::<i32>::new(2);
889        let x = allocator.allocate();
890        unsafe { allocator.deallocate(x) };
891        unsafe { allocator.deallocate(x) };
892    }
893}