rts_alloc/
allocator.rs

1use crate::free_stack::FreeStack;
2use crate::global_free_list::GlobalFreeList;
3use crate::header::{self, WorkerLocalListHeads, WorkerLocalListPartialFullHeads};
4use crate::linked_list_node::LinkedListNode;
5use crate::remote_free_list::RemoteFreeList;
6use crate::size_classes::{size_class, NUM_SIZE_CLASSES};
7use crate::slab_meta::SlabMeta;
8use crate::worker_local_list::WorkerLocalList;
9use crate::{error::Error, header::Header, size_classes::size_class_index};
10use core::mem::offset_of;
11use core::ptr::NonNull;
12use core::sync::atomic::Ordering;
13use std::path::Path;
14
15pub struct Allocator {
16    header: NonNull<Header>,
17    file_size: usize,
18    worker_index: u32,
19}
20
21impl Allocator {
22    /// Create a new `Allocator` with new file and given parameters.
23    pub fn create(
24        path: impl AsRef<Path>,
25        file_size: usize,
26        num_workers: u32,
27        slab_size: u32,
28        worker_index: u32,
29    ) -> Result<Self, Error> {
30        if worker_index >= num_workers {
31            return Err(Error::InvalidWorkerIndex);
32        }
33        let header = crate::init::create(path, file_size, num_workers, slab_size)?;
34
35        // SAFETY: The header is guaranteed to be valid and initialized.
36        unsafe { Allocator::new(header, file_size, worker_index) }
37    }
38
39    /// Join an existing allocator at the given path.
40    ///
41    /// # Safety
42    /// - `worker_index` must be uniquely assigned to this worker thread/process.
43    pub fn join(path: impl AsRef<Path>, worker_index: u32) -> Result<Self, Error> {
44        let (header, file_size) = crate::init::join(path)?;
45
46        // Check if the worker index is valid.
47        // SAFETY: The header is guaranteed to be valid and initialized.
48        if worker_index >= unsafe { header.as_ref() }.num_workers {
49            return Err(Error::InvalidWorkerIndex);
50        }
51
52        // SAFETY: The header is guaranteed to be valid and initialized.
53        unsafe { Allocator::new(header, file_size, worker_index) }
54    }
55
56    /// Creates a new `Allocator` for the given worker index.
57    ///
58    /// # Safety
59    /// - The `header` must point to a valid header of an initialized allocator.
60    unsafe fn new(
61        header: NonNull<Header>,
62        file_size: usize,
63        worker_index: u32,
64    ) -> Result<Self, Error> {
65        // SAFETY: The header is assumed to be valid and initialized.
66        if worker_index >= unsafe { header.as_ref() }.num_workers {
67            return Err(Error::InvalidWorkerIndex);
68        }
69        Ok(Allocator {
70            header,
71            file_size,
72            worker_index,
73        })
74    }
75}
76
77impl Drop for Allocator {
78    fn drop(&mut self) {
79        #[cfg(test)]
80        {
81            // In tests, we do not mmap.
82            return;
83        }
84
85        #[allow(unreachable_code)]
86        // SAFETY: The header is guaranteed to be valid and initialized.
87        //         And outside of tests the allocator is mmaped.
88        unsafe {
89            libc::munmap(self.header.as_ptr() as *mut libc::c_void, self.file_size);
90        }
91    }
92}
93
94impl Allocator {
95    /// Allocates a block of memory of the given size.
96    /// If the size is larger than the maximum size class, returns `None`.
97    /// If the allocation fails, returns `None`.
98    pub fn allocate(&self, size: u32) -> Option<NonNull<u8>> {
99        let size_index = size_class_index(size)?;
100
101        // SAFETY: `size_index` is guaranteed to be valid by `size_class_index`.
102        let slab_index = unsafe { self.find_allocatable_slab_index(size_index) }?;
103        // SAFETY:
104        // - `slab_index` is guaranteed to be valid by `find_allocatable_slab_index`.
105        // - `size_index` is guaranteed to be valid by `size_class_index`.
106        unsafe { self.allocate_within_slab(slab_index, size_index) }
107    }
108
109    /// Try to find a suitable slab for allocation.
110    /// If a partial slab assigned to the worker is not found, then try to find
111    /// a slab from the global free list.
112    ///
113    /// # Safety
114    /// - The `size_index` must be a valid index for the size classes.
115    unsafe fn find_allocatable_slab_index(&self, size_index: usize) -> Option<u32> {
116        // SAFETY: `size_index` is guaranteed to be valid by the caller.
117        unsafe { self.worker_local_list_partial(size_index) }
118            .head()
119            .or_else(|| self.take_slab(size_index))
120    }
121
122    /// Attempt to allocate meomry within a slab.
123    /// If the slab is full or the allocation otherwise fails, returns `None`.
124    ///
125    /// # Safety
126    /// - The `slab_index` must be a valid index for the slabs
127    /// - The `size_index` must be a valid index for the size classes.
128    unsafe fn allocate_within_slab(
129        &self,
130        slab_index: u32,
131        size_index: usize,
132    ) -> Option<NonNull<u8>> {
133        // SAFETY: The slab index is guaranteed to be valid by the caller.
134        let mut free_stack = unsafe { self.slab_free_stack(slab_index) };
135        let maybe_index_within_slab = free_stack.pop();
136
137        // If the slab is empty - remove it from the worker's partial list,
138        // and move it to the worker's full list.
139        if free_stack.is_empty() {
140            // SAFETY:
141            // - The `slab_index` is guaranteed to be valid by the caller.
142            // - The `size_index` is guaranteed to be valid by the caller.
143            unsafe {
144                self.worker_local_list_partial(size_index)
145                    .remove(slab_index);
146            }
147            // SAFETY:
148            // - The `slab_index` is guaranteed to be valid by the caller.
149            // - The `size_index` is guaranteed to be valid by the caller.
150            unsafe {
151                self.worker_local_list_full(size_index).push(slab_index);
152            }
153        }
154
155        maybe_index_within_slab.map(|index_within_slab| {
156            // SAFETY: The `slab_index` is guaranteed to be valid by the caller.
157            let slab = unsafe { self.slab(slab_index) };
158            // SAFETY: The `size_index` is guaranteed to be valid by the caller.
159            let size = unsafe { size_class(size_index) };
160            slab.byte_add(index_within_slab as usize * size as usize)
161        })
162    }
163
164    /// Attempt to take a slab from the global free list.
165    /// If the global free list is empty, returns `None`.
166    /// If the slab is successfully taken, it will be marked as assigned to the worker.
167    ///
168    /// # Safety
169    /// - The `size_index` must be a valid index for the size claasses.
170    unsafe fn take_slab(&self, size_index: usize) -> Option<u32> {
171        let slab_index = self.global_free_list().pop()?;
172
173        // SAFETY: The slab index is guaranteed to be valid by `pop`.
174        unsafe { self.slab_meta(slab_index).as_ref() }.assign(self.worker_index, size_index);
175        // SAFETY:
176        // - The slab index is guaranteed to be valid by `pop`.
177        // - The size index is guaranteed to be valid by the caller.
178        unsafe {
179            let slab_capacity = self.header.as_ref().slab_size / size_class(size_index);
180            self.slab_free_stack(slab_index).reset(slab_capacity as u16);
181        };
182        // SAFETY: The size index is guaranteed to be valid by caller.
183        let mut worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
184        // SAFETY: The slab index is guaranteed to be valid by `pop`.
185        unsafe { worker_local_list.push(slab_index) };
186        Some(slab_index)
187    }
188}
189
190impl Allocator {
191    /// Free a block of memory previously allocated by this allocator.
192    ///
193    /// # Safety
194    /// - The `ptr` must be a valid pointer to a block of memory allocated by this allocator.
195    /// - The `ptr` must not have been freed before.
196    pub unsafe fn free(&self, ptr: NonNull<u8>) {
197        // SAFETY: The pointer is assumed to be valid and allocated by this allocator.
198        let offset = unsafe { self.offset(ptr) };
199        let allocation_indexes = self.find_allocation_indexes(offset);
200
201        // Check if the slab is assigned to this worker.
202        if self.worker_index
203            == unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
204                .assigned_worker
205                .load(Ordering::Acquire)
206        {
207            self.local_free(allocation_indexes);
208        } else {
209            self.remote_free(allocation_indexes);
210        }
211    }
212
213    fn local_free(&self, allocation_indexes: AllocationIndexes) {
214        // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
215        let (was_full, is_empty) = unsafe {
216            let mut free_stack = self.slab_free_stack(allocation_indexes.slab_index);
217            let was_full = free_stack.is_empty();
218            free_stack.push(allocation_indexes.index_within_slab);
219            // Names confusing:
220            // - When the **free** stack is empty, the slab is full of allocations.
221            // - When the **free** stack is full, the slab has no allocations available.
222            (was_full, free_stack.is_full())
223        };
224
225        match (was_full, is_empty) {
226            (true, true) => {
227                // The slab was full and is now empty - this cannot happen unless the slab
228                // size is equal to the size class.
229                unreachable!("slab can only contain one allocation - this is not allowed");
230            }
231            (true, false) => {
232                let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
233                    .size_class_index
234                    .load(Ordering::Relaxed);
235                // The slab was full and is now partially full. It must be moved
236                // from the worker's full list to the worker's partial list.
237                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
238                unsafe {
239                    self.worker_local_list_full(size_index)
240                        .remove(allocation_indexes.slab_index);
241                }
242                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
243                unsafe {
244                    self.worker_local_list_partial(size_index)
245                        .push(allocation_indexes.slab_index);
246                }
247            }
248            (false, true) => {
249                let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
250                    .size_class_index
251                    .load(Ordering::Relaxed);
252                // The slab was partially full and is now empty.
253                // It must be moved from the worker's partial list to the global free list.
254                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
255                unsafe {
256                    self.worker_local_list_partial(size_index)
257                        .remove(allocation_indexes.slab_index);
258                }
259                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
260                unsafe {
261                    self.global_free_list().push(allocation_indexes.slab_index);
262                }
263            }
264            (false, false) => {
265                // The slab was partially full and is still partially full.
266                // No action is needed, just return.
267            }
268        }
269    }
270
271    fn remote_free(&self, allocation_indexes: AllocationIndexes) {
272        // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
273        unsafe {
274            self.remote_free_list(allocation_indexes.slab_index)
275                .push(allocation_indexes.index_within_slab);
276        }
277    }
278
279    /// Find the offset given a pointer.
280    ///
281    /// # Safety
282    /// - The `ptr` must be a valid pointer in the allocator's address space.
283    pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
284        ptr.byte_offset_from(self.header) as usize
285    }
286
287    /// Return a ptr given a shareable offset - calculated by `offset`.
288    pub fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
289        unsafe { self.header.byte_add(offset) }.cast()
290    }
291
292    /// Find the slab index and index within the slab for a given offset.
293    fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
294        let (slab_index, offset_within_slab) = {
295            // SAFETY: The header is assumed to be valid and initialized.
296            let header = unsafe { self.header.as_ref() };
297            debug_assert!(offset >= header.slabs_offset as usize);
298            let offset_from_slab_start = offset.wrapping_sub(header.slabs_offset as usize);
299            let slab_index = (offset_from_slab_start / header.slab_size as usize) as u32;
300            debug_assert!(slab_index < header.num_slabs, "slab index out of bounds");
301
302            // SAFETY: The slab size is guaranteed to be a power of 2, for a valid header.
303            let offset_within_slab =
304                unsafe { Self::offset_within_slab(header.slab_size, offset_from_slab_start) };
305
306            (slab_index, offset_within_slab)
307        };
308
309        let index_within_slab = {
310            // SAFETY: The slab index is guaranteed to be valid by the above calculations.
311            let size_class_index = unsafe { self.slab_meta(slab_index).as_ref() }
312                .size_class_index
313                .load(Ordering::Acquire);
314            // SAFETY: The size class index is guaranteed to be valid by valid slab meta.
315            let size_class = unsafe { size_class(size_class_index) };
316            (offset_within_slab / size_class) as u16
317        };
318
319        AllocationIndexes {
320            slab_index,
321            index_within_slab,
322        }
323    }
324
325    /// Return offset within a slab.
326    ///
327    /// # Safety
328    /// - The `slab_size` must be a power of 2.
329    const unsafe fn offset_within_slab(slab_size: u32, offset_from_slab_start: usize) -> u32 {
330        debug_assert!(slab_size.is_power_of_two());
331        (offset_from_slab_start & (slab_size as usize - 1)) as u32
332    }
333}
334
335impl Allocator {
336    /// Frees all items in remote free lists.
337    pub fn clean_remote_free_lists(&self) {
338        // Do partial slabs before full slabs, because the act of freeing within
339        // the full slabs may move them to partial slabs list, which would lead
340        // to us double-iterating.
341        for size_index in 0..NUM_SIZE_CLASSES {
342            // SAFETY: The size index is guaranteed to be valid by the loop.
343            let worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
344            self.clean_remote_free_lists_for_list(worker_local_list);
345
346            // SAFETY: The size index is guaranteed to be valid by the loop.
347            let worker_local_list = unsafe { self.worker_local_list_full(size_index) };
348            self.clean_remote_free_lists_for_list(worker_local_list);
349        }
350    }
351
352    /// Frees all items in the remote free list for the given worker local list.
353    fn clean_remote_free_lists_for_list(&self, worker_local_list: WorkerLocalList) {
354        for slab_index in worker_local_list.iterate() {
355            // SAFETY: The slab index is guaranteed to be valid by the iterator.
356            let remote_free_list = unsafe { self.remote_free_list(slab_index) };
357            for index_within_slab in remote_free_list.drain() {
358                self.local_free(AllocationIndexes {
359                    slab_index,
360                    index_within_slab,
361                })
362            }
363        }
364    }
365}
366
367impl Allocator {
368    /// Returns a pointer to the free list elements in allocator.
369    fn free_list_elements(&self) -> NonNull<LinkedListNode> {
370        // SAFETY: The header is assumed to be valid and initialized.
371        let offset = unsafe { self.header.as_ref() }.free_list_elements_offset;
372        // SAFETY: The header is guaranteed to be valid and initialized.
373        unsafe { self.header.byte_add(offset as usize) }.cast()
374    }
375
376    /// Returns a `GlobalFreeList` to interact with the global free list.
377    fn global_free_list<'a>(&'a self) -> GlobalFreeList<'a> {
378        // SAFETY: The header is assumed to be valid and initialized.
379        let head = &unsafe { self.header.as_ref() }.global_free_list_head;
380        let list = self.free_list_elements();
381        // SAFETY:
382        // - `head` is a valid reference to the global free list head.
383        // - `list` is guaranteed to be valid wtih sufficient capacity.
384        unsafe { GlobalFreeList::new(head, list) }
385    }
386
387    /// Returns a `WorkerLocalList` for the current worker to interact with its
388    /// local free list of partially full slabs.
389    ///
390    /// # Safety
391    /// - The `size_index` must be a valid index for the size classes.
392    unsafe fn worker_local_list_partial<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
393        let head = &self.worker_head(size_index).partial;
394        let list = self.free_list_elements();
395
396        // SAFETY:
397        // - `head` is a valid reference to the worker's local list head.
398        // - `list` is guaranteed to be valid with sufficient capacity.
399        unsafe { WorkerLocalList::new(head, list) }
400    }
401
402    /// Returns a `WorkerLocalList` for the current worker to interact with its
403    /// local free list of full slabs.
404    ///
405    /// # Safety
406    /// - The `size_index` must be a valid index for the size classes.
407    unsafe fn worker_local_list_full<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
408        let head = &self.worker_head(size_index).full;
409        let list = self.free_list_elements();
410
411        // SAFETY:
412        // - `head` is a valid reference to the worker's local list head.
413        // - `list` is guaranteed to be valid with sufficient capacity.
414        unsafe { WorkerLocalList::new(head, list) }
415    }
416
417    fn worker_head(&self, size_index: usize) -> &WorkerLocalListPartialFullHeads {
418        // SAFETY: The header is assumed to be valid and initialized.
419        let all_workers_heads = unsafe {
420            self.header
421                .byte_add(offset_of!(Header, worker_local_list_heads))
422                .cast::<WorkerLocalListHeads>()
423        };
424        // SAFETY: The worker index is guaranteed to be valid by the constructor.
425        let worker_heads = unsafe { all_workers_heads.add(self.worker_index as usize) };
426        // SAFETY: The size index is guaranteed to be valid by the caller.
427        let worker_head = unsafe {
428            worker_heads
429                .cast::<WorkerLocalListPartialFullHeads>()
430                .add(size_index)
431                .as_ref()
432        };
433
434        worker_head
435    }
436
437    /// Returns an instance of `RemoteFreeList` for the given slab.
438    ///
439    /// # Safety
440    /// - `slab_index` must be a valid slab index.
441    unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
442        let (head, slab_item_size) = {
443            // SAFETY: The slab index is guaranteed to be valid by the caller.
444            let slab_meta = unsafe { self.slab_meta(slab_index).as_ref() };
445            // SAFETY: The slab meta is guaranteed to be valid by the caller.
446            let size_class =
447                unsafe { size_class(slab_meta.size_class_index.load(Ordering::Acquire)) };
448            (&slab_meta.remote_free_stack_head, size_class)
449        };
450        // SAFETY: The slab index is guaranteed to be valid by the caller.
451        let slab = unsafe { self.slab(slab_index) };
452
453        // SAFETY:
454        // - `slab_item_size` must be a valid size AND currently assigned to the slab.
455        // - `head` must be a valid reference to a `CacheAlignedU16
456        //   that is the head of the remote free list.
457        // - `slab` must be a valid pointer to the beginning of the slab.
458        unsafe { RemoteFreeList::new(slab_item_size, head, slab) }
459    }
460
461    /// Returns a pointer to the slab meta for the given slab index.
462    ///
463    /// # Safety
464    /// - The `slab_index` must be a valid index for the slabs.
465    unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
466        // SAFETY: The header is assumed to be valid and initialized.
467        let offset = unsafe { self.header.as_ref() }.slab_shared_meta_offset;
468        // SAFETY: The header is guaranteed to be valid and initialized.
469        let slab_metas = unsafe { self.header.byte_add(offset as usize).cast::<SlabMeta>() };
470        // SAFETY: The `slab_index` is guaranteed to be valid by the caller.
471        unsafe { slab_metas.add(slab_index as usize) }
472    }
473
474    /// Return a mutable reference to a free stack for the given slab index.
475    ///
476    /// # Safety
477    /// - The `slab_index` must be a valid index for the slabs.
478    unsafe fn slab_free_stack<'a>(&'a self, slab_index: u32) -> FreeStack<'a> {
479        let (slab_size, offset) = {
480            // SAFETY: The header is assumed to be valid and initialized.
481            let header = unsafe { self.header.as_ref() };
482            (header.slab_size, header.slab_free_stacks_offset)
483        };
484        let free_stack_size = header::layout::single_free_stack_size(slab_size);
485
486        // SAFETY: The `FreeStack` layout is guaranteed to have enough room
487        // for top, capacity, and the trailing stack.
488        let mut top = unsafe {
489            self.header
490                .byte_add(offset as usize)
491                .byte_add(slab_index as usize * free_stack_size)
492                .cast()
493        };
494        let mut capacity = unsafe { top.add(1) };
495        let trailing_stack = unsafe { capacity.add(1) };
496        unsafe { FreeStack::new(top.as_mut(), capacity.as_mut(), trailing_stack) }
497    }
498
499    /// Return a pointer to a slab.
500    ///
501    /// # Safety
502    /// - The `slab_index` must be a valid index for the slabs.
503    unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
504        let (slab_size, offset) = {
505            // SAFETY: The header is assumed to be valid and initialized.
506            let header = unsafe { self.header.as_ref() };
507            (header.slab_size, header.slabs_offset)
508        };
509        // SAFETY: The header is guaranteed to be valid and initialized.
510        // The slabs are laid out sequentially after the free stacks.
511        unsafe {
512            self.header
513                .byte_add(offset as usize)
514                .byte_add(slab_index as usize * slab_size as usize)
515                .cast()
516        }
517    }
518}
519
520struct AllocationIndexes {
521    slab_index: u32,
522    index_within_slab: u16,
523}
524
525#[cfg(test)]
526mod tests {
527    use super::*;
528    use crate::{
529        header::layout,
530        init::initialize,
531        size_classes::{MAX_SIZE, SIZE_CLASSES},
532    };
533
534    #[repr(C, align(4096))]
535    struct DummyTypeForAlignment([u8; 4096]);
536    const TEST_BUFFER_SIZE: usize = 64 * 1024 * 1024; // 1 MiB
537    const TEST_BUFFER_CAPACITY: usize =
538        TEST_BUFFER_SIZE / core::mem::size_of::<DummyTypeForAlignment>();
539
540    fn test_buffer() -> Vec<DummyTypeForAlignment> {
541        (0..TEST_BUFFER_CAPACITY)
542            .map(|_| DummyTypeForAlignment([0; 4096]))
543            .collect::<Vec<_>>()
544    }
545
546    fn initialize_for_test(
547        buffer: *mut u8,
548        slab_size: u32,
549        num_workers: u32,
550        worker_index: u32,
551    ) -> Allocator {
552        let file_size = TEST_BUFFER_SIZE;
553
554        let layout = layout::offsets(file_size, slab_size, num_workers);
555
556        let header = NonNull::new(buffer as *mut Header).unwrap();
557        // SAFETY: The header is valid for any byte pattern, and we are initializing it with the
558        //         allocator.
559        unsafe {
560            initialize::allocator(header, slab_size, num_workers, layout);
561        }
562
563        // SAFETY: The header/allocator memory is initialized.
564        unsafe { Allocator::new(header, file_size, worker_index) }.unwrap()
565    }
566
567    fn join_for_tests(buffer: *mut u8, worker_index: u32) -> Allocator {
568        let header = NonNull::new(buffer as *mut Header).unwrap();
569        // SAFETY: The header is valid if joining an existing allocator.
570        unsafe { Allocator::new(header, TEST_BUFFER_SIZE, worker_index) }.unwrap()
571    }
572
573    #[test]
574    fn test_allocator() {
575        let mut buffer = test_buffer();
576        let slab_size = 65536; // 64 KiB
577        let num_workers = 4;
578        let worker_index = 0;
579        let allocator = initialize_for_test(
580            buffer.as_mut_ptr().cast(),
581            slab_size,
582            num_workers,
583            worker_index,
584        );
585
586        let mut allocations = vec![];
587
588        for size_class in SIZE_CLASSES[..NUM_SIZE_CLASSES - 1].iter() {
589            for size in [size_class - 1, *size_class, size_class + 1] {
590                allocations.push(allocator.allocate(size).unwrap());
591            }
592        }
593        for size in [MAX_SIZE - 1, MAX_SIZE] {
594            allocations.push(allocator.allocate(size).unwrap());
595        }
596        assert!(allocator.allocate(MAX_SIZE + 1).is_none());
597
598        // The worker should have local lists for all size classes.
599        for size_index in 0..NUM_SIZE_CLASSES {
600            // SAFETY: The size index is guaranteed to be valid by the loop.
601            let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
602            assert!(worker_local_list.head().is_some());
603        }
604
605        for ptr in allocations {
606            // SAFETY: ptr is valid allocation from the allocator.
607            unsafe {
608                allocator.free(ptr);
609            }
610        }
611
612        // The worker local lists should be empty after freeing.
613        for size_index in 0..NUM_SIZE_CLASSES {
614            // SAFETY: The size index is guaranteed to be valid by the loop.
615            let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
616            assert_eq!(worker_local_list.head(), None);
617        }
618    }
619
620    #[test]
621    fn test_slab_list_transitions() {
622        let mut buffer = test_buffer();
623        let slab_size = 65536; // 64 KiB
624        let num_workers = 4;
625        let worker_index = 0;
626        let allocator = initialize_for_test(
627            buffer.as_mut_ptr().cast(),
628            slab_size,
629            num_workers,
630            worker_index,
631        );
632
633        let allocation_size = 2048;
634        let size_index = size_class_index(allocation_size).unwrap();
635        let allocations_per_slab = slab_size / allocation_size;
636
637        fn check_worker_list_expectations(
638            allocator: &Allocator,
639            size_index: usize,
640            expect_partial: bool,
641            expect_full: bool,
642        ) {
643            unsafe {
644                let partial_list = allocator.worker_local_list_partial(size_index);
645                assert_eq!(
646                    partial_list.head().is_some(),
647                    expect_partial,
648                    "{:?}",
649                    partial_list.head()
650                );
651
652                let full_list = allocator.worker_local_list_full(size_index);
653                assert_eq!(
654                    full_list.head().is_some(),
655                    expect_full,
656                    "{:?}",
657                    full_list.head()
658                );
659            }
660        }
661
662        // The parital list and full list should begin empty.
663        check_worker_list_expectations(&allocator, size_index, false, false);
664
665        let mut first_slab_allocations = vec![];
666        for _ in 0..allocations_per_slab - 1 {
667            first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
668        }
669
670        // The first slab should be partially full and the full list empty.
671        check_worker_list_expectations(&allocator, size_index, true, false);
672
673        // Allocate one more to fill the slab.
674        first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
675
676        // The first slab should now be full and moved to the full list.
677        check_worker_list_expectations(&allocator, size_index, false, true);
678
679        // Allocating again will give a new slab, which will be partially full.
680        let second_slab_allocation = allocator.allocate(allocation_size).unwrap();
681
682        // The second slab should be partially full and the first slab in the full list.
683        check_worker_list_expectations(&allocator, size_index, true, true);
684
685        let mut first_slab_allocations = first_slab_allocations.drain(..);
686        unsafe {
687            allocator.free(first_slab_allocations.next().unwrap());
688        }
689        // Both slabs should be partially full, and none are full.
690        check_worker_list_expectations(&allocator, size_index, true, false);
691
692        // Free the first slab allocation.
693        for ptr in first_slab_allocations {
694            unsafe {
695                allocator.free(ptr);
696            }
697        }
698        // The first slab is now empty and should be moved to the global free list,
699        // but the second slab is still partially full.
700        check_worker_list_expectations(&allocator, size_index, true, false);
701
702        // Free the second slab allocation.
703        unsafe {
704            allocator.free(second_slab_allocation);
705        }
706        // Both slabs should now be empty and moved to the global free list.
707        check_worker_list_expectations(&allocator, size_index, false, false);
708    }
709
710    #[test]
711    fn test_out_of_slabs() {
712        let mut buffer = test_buffer();
713        let slab_size = 65536; // 64 KiB
714        let num_workers = 4;
715        let worker_index = 0;
716        let allocator = initialize_for_test(
717            buffer.as_mut_ptr().cast(),
718            slab_size,
719            num_workers,
720            worker_index,
721        );
722
723        let num_slabs = unsafe { allocator.header.as_ref() }.num_slabs;
724        for index in 0..num_slabs {
725            let slab_index = unsafe { allocator.take_slab(0) }.unwrap();
726            assert_eq!(slab_index, index);
727        }
728        // The next slab allocation should fail, as all slabs are taken.
729        assert!(unsafe { allocator.take_slab(0) }.is_none());
730    }
731
732    #[test]
733    fn test_remote_free_lists() {
734        let mut buffer = test_buffer();
735        let slab_size = 65536; // 64 KiB
736        let num_workers = 4;
737
738        let allocator_0 =
739            initialize_for_test(buffer.as_mut_ptr().cast(), slab_size, num_workers, 0);
740        let allocator_1 = join_for_tests(buffer.as_mut_ptr().cast(), 1);
741
742        let allocation_size = 2048;
743        let size_index = size_class_index(allocation_size).unwrap();
744        let allocations_per_slab = slab_size / allocation_size;
745
746        // Allocate enough to fill the first slab.
747        let mut allocations = vec![];
748        for _ in 0..allocations_per_slab {
749            allocations.push(allocator_0.allocate(allocation_size).unwrap());
750        }
751
752        // The first slab should be full.
753        let slab_index = unsafe {
754            let worker_local_list = allocator_0.worker_local_list_partial(size_index);
755            assert!(worker_local_list.head().is_none());
756            let worker_local_list = allocator_0.worker_local_list_full(size_index);
757            assert!(worker_local_list.head().is_some());
758            worker_local_list.head().unwrap()
759        };
760
761        // The slab's remote list should be empty.
762        let remote_free_list = unsafe { allocator_0.remote_free_list(slab_index) };
763        assert!(remote_free_list.iterate().next().is_none());
764
765        // Free the allocations to the remote free list.
766        for ptr in allocations {
767            unsafe {
768                allocator_1.free(ptr);
769            }
770        }
771
772        // Allocator 0 can NOT allocate in the same slab.
773        let different_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
774        let allocation_indexes = unsafe {
775            allocator_0.find_allocation_indexes(allocator_0.offset(different_slab_allocation))
776        };
777        assert_ne!(allocation_indexes.slab_index, slab_index);
778        unsafe { allocator_0.free(different_slab_allocation) };
779
780        // If we clean the remote free lists, the next allocation should succeed in the same slab.
781        allocator_0.clean_remote_free_lists();
782        let same_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
783        let allocation_indexes = unsafe {
784            allocator_0.find_allocation_indexes(allocator_0.offset(same_slab_allocation))
785        };
786        assert_eq!(allocation_indexes.slab_index, slab_index);
787    }
788}