rts_alloc/
allocator.rs

1use crate::free_stack::FreeStack;
2use crate::global_free_list::GlobalFreeList;
3use crate::header::{self, WorkerLocalListHeads, WorkerLocalListPartialFullHeads};
4use crate::linked_list_node::LinkedListNode;
5use crate::remote_free_list::RemoteFreeList;
6use crate::size_classes::{size_class, NUM_SIZE_CLASSES};
7use crate::slab_meta::SlabMeta;
8use crate::worker_local_list::WorkerLocalList;
9use crate::{error::Error, header::Header, size_classes::size_class_index};
10use core::mem::offset_of;
11use core::ptr::NonNull;
12use core::sync::atomic::Ordering;
13use std::fs::File;
14
15pub struct Allocator {
16    header: NonNull<Header>,
17    file_size: usize,
18    worker_index: u32,
19}
20
21impl Allocator {
22    /// Create a new `Allocator` in the provided file with the given parameters.
23    pub fn create(
24        file: &File,
25        file_size: usize,
26        num_workers: u32,
27        slab_size: u32,
28        worker_index: u32,
29    ) -> Result<Self, Error> {
30        if worker_index >= num_workers {
31            return Err(Error::InvalidWorkerIndex);
32        }
33        let header = crate::init::create(file, file_size, num_workers, slab_size)?;
34
35        // SAFETY: The header is guaranteed to be valid and initialized.
36        unsafe { Allocator::new(header, file_size, worker_index) }
37    }
38
39    /// Join an existing allocator in the provided file.
40    ///
41    /// # Safety
42    /// - `worker_index` must be uniquely assigned to this worker thread/process.
43    pub unsafe fn join(file: &File, worker_index: u32) -> Result<Self, Error> {
44        let (header, file_size) = crate::init::join(file)?;
45
46        // Check if the worker index is valid.
47        // SAFETY: The header is guaranteed to be valid and initialized.
48        if worker_index >= unsafe { header.as_ref() }.num_workers {
49            return Err(Error::InvalidWorkerIndex);
50        }
51
52        // SAFETY: The header is guaranteed to be valid and initialized.
53        unsafe { Allocator::new(header, file_size, worker_index) }
54    }
55
56    /// Creates a new `Allocator` for the given worker index.
57    ///
58    /// # Safety
59    /// - The `header` must point to a valid header of an initialized allocator.
60    unsafe fn new(
61        header: NonNull<Header>,
62        file_size: usize,
63        worker_index: u32,
64    ) -> Result<Self, Error> {
65        // SAFETY: The header is assumed to be valid and initialized.
66        if worker_index >= unsafe { header.as_ref() }.num_workers {
67            return Err(Error::InvalidWorkerIndex);
68        }
69        Ok(Allocator {
70            header,
71            file_size,
72            worker_index,
73        })
74    }
75}
76
77unsafe impl Send for Allocator {}
78
79impl Drop for Allocator {
80    fn drop(&mut self) {
81        #[cfg(test)]
82        {
83            // In tests, we do not mmap.
84            return;
85        }
86
87        #[allow(unreachable_code)]
88        // SAFETY: The header is guaranteed to be valid and initialized.
89        //         And outside of tests the allocator is mmaped.
90        unsafe {
91            libc::munmap(self.header.as_ptr() as *mut libc::c_void, self.file_size);
92        }
93    }
94}
95
96impl Allocator {
97    /// Allocates a block of memory of the given size.
98    /// If the size is larger than the maximum size class, returns `None`.
99    /// If the allocation fails, returns `None`.
100    pub fn allocate(&self, size: u32) -> Option<NonNull<u8>> {
101        let size_index = size_class_index(size)?;
102
103        // SAFETY: `size_index` is guaranteed to be valid by `size_class_index`.
104        let slab_index = unsafe { self.find_allocatable_slab_index(size_index) }?;
105        // SAFETY:
106        // - `slab_index` is guaranteed to be valid by `find_allocatable_slab_index`.
107        // - `size_index` is guaranteed to be valid by `size_class_index`.
108        unsafe { self.allocate_within_slab(slab_index, size_index) }
109    }
110
111    /// Try to find a suitable slab for allocation.
112    /// If a partial slab assigned to the worker is not found, then try to find
113    /// a slab from the global free list.
114    ///
115    /// # Safety
116    /// - The `size_index` must be a valid index for the size classes.
117    unsafe fn find_allocatable_slab_index(&self, size_index: usize) -> Option<u32> {
118        // SAFETY: `size_index` is guaranteed to be valid by the caller.
119        unsafe { self.worker_local_list_partial(size_index) }
120            .head()
121            .or_else(|| self.take_slab(size_index))
122    }
123
124    /// Attempt to allocate meomry within a slab.
125    /// If the slab is full or the allocation otherwise fails, returns `None`.
126    ///
127    /// # Safety
128    /// - The `slab_index` must be a valid index for the slabs
129    /// - The `size_index` must be a valid index for the size classes.
130    unsafe fn allocate_within_slab(
131        &self,
132        slab_index: u32,
133        size_index: usize,
134    ) -> Option<NonNull<u8>> {
135        // SAFETY: The slab index is guaranteed to be valid by the caller.
136        let mut free_stack = unsafe { self.slab_free_stack(slab_index) };
137        let maybe_index_within_slab = free_stack.pop();
138
139        // If the slab is empty - remove it from the worker's partial list,
140        // and move it to the worker's full list.
141        if free_stack.is_empty() {
142            // SAFETY:
143            // - The `slab_index` is guaranteed to be valid by the caller.
144            // - The `size_index` is guaranteed to be valid by the caller.
145            unsafe {
146                self.worker_local_list_partial(size_index)
147                    .remove(slab_index);
148            }
149            // SAFETY:
150            // - The `slab_index` is guaranteed to be valid by the caller.
151            // - The `size_index` is guaranteed to be valid by the caller.
152            unsafe {
153                self.worker_local_list_full(size_index).push(slab_index);
154            }
155        }
156
157        maybe_index_within_slab.map(|index_within_slab| {
158            // SAFETY: The `slab_index` is guaranteed to be valid by the caller.
159            let slab = unsafe { self.slab(slab_index) };
160            // SAFETY: The `size_index` is guaranteed to be valid by the caller.
161            let size = unsafe { size_class(size_index) };
162            slab.byte_add(index_within_slab as usize * size as usize)
163        })
164    }
165
166    /// Attempt to take a slab from the global free list.
167    /// If the global free list is empty, returns `None`.
168    /// If the slab is successfully taken, it will be marked as assigned to the worker.
169    ///
170    /// # Safety
171    /// - The `size_index` must be a valid index for the size claasses.
172    unsafe fn take_slab(&self, size_index: usize) -> Option<u32> {
173        let slab_index = self.global_free_list().pop()?;
174
175        // SAFETY: The slab index is guaranteed to be valid by `pop`.
176        unsafe { self.slab_meta(slab_index).as_ref() }.assign(self.worker_index, size_index);
177        // SAFETY:
178        // - The slab index is guaranteed to be valid by `pop`.
179        // - The size index is guaranteed to be valid by the caller.
180        unsafe {
181            let slab_capacity = self.header.as_ref().slab_size / size_class(size_index);
182            self.slab_free_stack(slab_index).reset(slab_capacity as u16);
183        };
184        // SAFETY: The size index is guaranteed to be valid by caller.
185        let mut worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
186        // SAFETY: The slab index is guaranteed to be valid by `pop`.
187        unsafe { worker_local_list.push(slab_index) };
188        Some(slab_index)
189    }
190}
191
192impl Allocator {
193    /// Free a block of memory previously allocated by this allocator.
194    ///
195    /// # Safety
196    /// - The `ptr` must be a valid pointer to a block of memory allocated by this allocator.
197    /// - The `ptr` must not have been freed before.
198    pub unsafe fn free(&self, ptr: NonNull<u8>) {
199        // SAFETY: The pointer is assumed to be valid and allocated by this allocator.
200        let offset = unsafe { self.offset(ptr) };
201        let allocation_indexes = self.find_allocation_indexes(offset);
202
203        // Check if the slab is assigned to this worker.
204        if self.worker_index
205            == unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
206                .assigned_worker
207                .load(Ordering::Acquire)
208        {
209            self.local_free(allocation_indexes);
210        } else {
211            self.remote_free(allocation_indexes);
212        }
213    }
214
215    fn local_free(&self, allocation_indexes: AllocationIndexes) {
216        // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
217        let (was_full, is_empty) = unsafe {
218            let mut free_stack = self.slab_free_stack(allocation_indexes.slab_index);
219            let was_full = free_stack.is_empty();
220            free_stack.push(allocation_indexes.index_within_slab);
221            // Names confusing:
222            // - When the **free** stack is empty, the slab is full of allocations.
223            // - When the **free** stack is full, the slab has no allocations available.
224            (was_full, free_stack.is_full())
225        };
226
227        match (was_full, is_empty) {
228            (true, true) => {
229                // The slab was full and is now empty - this cannot happen unless the slab
230                // size is equal to the size class.
231                unreachable!("slab can only contain one allocation - this is not allowed");
232            }
233            (true, false) => {
234                let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
235                    .size_class_index
236                    .load(Ordering::Relaxed);
237                // The slab was full and is now partially full. It must be moved
238                // from the worker's full list to the worker's partial list.
239                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
240                unsafe {
241                    self.worker_local_list_full(size_index)
242                        .remove(allocation_indexes.slab_index);
243                }
244                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
245                unsafe {
246                    self.worker_local_list_partial(size_index)
247                        .push(allocation_indexes.slab_index);
248                }
249            }
250            (false, true) => {
251                let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
252                    .size_class_index
253                    .load(Ordering::Relaxed);
254                // The slab was partially full and is now empty.
255                // It must be moved from the worker's partial list to the global free list.
256                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
257                unsafe {
258                    self.worker_local_list_partial(size_index)
259                        .remove(allocation_indexes.slab_index);
260                }
261                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
262                unsafe {
263                    self.global_free_list().push(allocation_indexes.slab_index);
264                }
265            }
266            (false, false) => {
267                // The slab was partially full and is still partially full.
268                // No action is needed, just return.
269            }
270        }
271    }
272
273    fn remote_free(&self, allocation_indexes: AllocationIndexes) {
274        // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
275        unsafe {
276            self.remote_free_list(allocation_indexes.slab_index)
277                .push(allocation_indexes.index_within_slab);
278        }
279    }
280
281    /// Find the offset given a pointer.
282    ///
283    /// # Safety
284    /// - The `ptr` must be a valid pointer in the allocator's address space.
285    pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
286        ptr.byte_offset_from(self.header) as usize
287    }
288
289    /// Return a ptr given a shareable offset - calculated by `offset`.
290    pub fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
291        unsafe { self.header.byte_add(offset) }.cast()
292    }
293
294    /// Find the slab index and index within the slab for a given offset.
295    fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
296        let (slab_index, offset_within_slab) = {
297            // SAFETY: The header is assumed to be valid and initialized.
298            let header = unsafe { self.header.as_ref() };
299            debug_assert!(offset >= header.slabs_offset as usize);
300            let offset_from_slab_start = offset.wrapping_sub(header.slabs_offset as usize);
301            let slab_index = (offset_from_slab_start / header.slab_size as usize) as u32;
302            debug_assert!(slab_index < header.num_slabs, "slab index out of bounds");
303
304            // SAFETY: The slab size is guaranteed to be a power of 2, for a valid header.
305            let offset_within_slab =
306                unsafe { Self::offset_within_slab(header.slab_size, offset_from_slab_start) };
307
308            (slab_index, offset_within_slab)
309        };
310
311        let index_within_slab = {
312            // SAFETY: The slab index is guaranteed to be valid by the above calculations.
313            let size_class_index = unsafe { self.slab_meta(slab_index).as_ref() }
314                .size_class_index
315                .load(Ordering::Acquire);
316            // SAFETY: The size class index is guaranteed to be valid by valid slab meta.
317            let size_class = unsafe { size_class(size_class_index) };
318            (offset_within_slab / size_class) as u16
319        };
320
321        AllocationIndexes {
322            slab_index,
323            index_within_slab,
324        }
325    }
326
327    /// Return offset within a slab.
328    ///
329    /// # Safety
330    /// - The `slab_size` must be a power of 2.
331    const unsafe fn offset_within_slab(slab_size: u32, offset_from_slab_start: usize) -> u32 {
332        debug_assert!(slab_size.is_power_of_two());
333        (offset_from_slab_start & (slab_size as usize - 1)) as u32
334    }
335}
336
337impl Allocator {
338    /// Frees all items in remote free lists.
339    pub fn clean_remote_free_lists(&self) {
340        // Do partial slabs before full slabs, because the act of freeing within
341        // the full slabs may move them to partial slabs list, which would lead
342        // to us double-iterating.
343        for size_index in 0..NUM_SIZE_CLASSES {
344            // SAFETY: The size index is guaranteed to be valid by the loop.
345            let worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
346            self.clean_remote_free_lists_for_list(worker_local_list);
347
348            // SAFETY: The size index is guaranteed to be valid by the loop.
349            let worker_local_list = unsafe { self.worker_local_list_full(size_index) };
350            self.clean_remote_free_lists_for_list(worker_local_list);
351        }
352    }
353
354    /// Frees all items in the remote free list for the given worker local list.
355    fn clean_remote_free_lists_for_list(&self, worker_local_list: WorkerLocalList) {
356        for slab_index in worker_local_list.iterate() {
357            // SAFETY: The slab index is guaranteed to be valid by the iterator.
358            let remote_free_list = unsafe { self.remote_free_list(slab_index) };
359            for index_within_slab in remote_free_list.drain() {
360                self.local_free(AllocationIndexes {
361                    slab_index,
362                    index_within_slab,
363                })
364            }
365        }
366    }
367}
368
369impl Allocator {
370    /// Returns a pointer to the free list elements in allocator.
371    fn free_list_elements(&self) -> NonNull<LinkedListNode> {
372        // SAFETY: The header is assumed to be valid and initialized.
373        let offset = unsafe { self.header.as_ref() }.free_list_elements_offset;
374        // SAFETY: The header is guaranteed to be valid and initialized.
375        unsafe { self.header.byte_add(offset as usize) }.cast()
376    }
377
378    /// Returns a `GlobalFreeList` to interact with the global free list.
379    fn global_free_list<'a>(&'a self) -> GlobalFreeList<'a> {
380        // SAFETY: The header is assumed to be valid and initialized.
381        let head = &unsafe { self.header.as_ref() }.global_free_list_head;
382        let list = self.free_list_elements();
383        // SAFETY:
384        // - `head` is a valid reference to the global free list head.
385        // - `list` is guaranteed to be valid wtih sufficient capacity.
386        unsafe { GlobalFreeList::new(head, list) }
387    }
388
389    /// Returns a `WorkerLocalList` for the current worker to interact with its
390    /// local free list of partially full slabs.
391    ///
392    /// # Safety
393    /// - The `size_index` must be a valid index for the size classes.
394    unsafe fn worker_local_list_partial<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
395        let head = &self.worker_head(size_index).partial;
396        let list = self.free_list_elements();
397
398        // SAFETY:
399        // - `head` is a valid reference to the worker's local list head.
400        // - `list` is guaranteed to be valid with sufficient capacity.
401        unsafe { WorkerLocalList::new(head, list) }
402    }
403
404    /// Returns a `WorkerLocalList` for the current worker to interact with its
405    /// local free list of full slabs.
406    ///
407    /// # Safety
408    /// - The `size_index` must be a valid index for the size classes.
409    unsafe fn worker_local_list_full<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
410        let head = &self.worker_head(size_index).full;
411        let list = self.free_list_elements();
412
413        // SAFETY:
414        // - `head` is a valid reference to the worker's local list head.
415        // - `list` is guaranteed to be valid with sufficient capacity.
416        unsafe { WorkerLocalList::new(head, list) }
417    }
418
419    fn worker_head(&self, size_index: usize) -> &WorkerLocalListPartialFullHeads {
420        // SAFETY: The header is assumed to be valid and initialized.
421        let all_workers_heads = unsafe {
422            self.header
423                .byte_add(offset_of!(Header, worker_local_list_heads))
424                .cast::<WorkerLocalListHeads>()
425        };
426        // SAFETY: The worker index is guaranteed to be valid by the constructor.
427        let worker_heads = unsafe { all_workers_heads.add(self.worker_index as usize) };
428        // SAFETY: The size index is guaranteed to be valid by the caller.
429        let worker_head = unsafe {
430            worker_heads
431                .cast::<WorkerLocalListPartialFullHeads>()
432                .add(size_index)
433                .as_ref()
434        };
435
436        worker_head
437    }
438
439    /// Returns an instance of `RemoteFreeList` for the given slab.
440    ///
441    /// # Safety
442    /// - `slab_index` must be a valid slab index.
443    unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
444        let (head, slab_item_size) = {
445            // SAFETY: The slab index is guaranteed to be valid by the caller.
446            let slab_meta = unsafe { self.slab_meta(slab_index).as_ref() };
447            // SAFETY: The slab meta is guaranteed to be valid by the caller.
448            let size_class =
449                unsafe { size_class(slab_meta.size_class_index.load(Ordering::Acquire)) };
450            (&slab_meta.remote_free_stack_head, size_class)
451        };
452        // SAFETY: The slab index is guaranteed to be valid by the caller.
453        let slab = unsafe { self.slab(slab_index) };
454
455        // SAFETY:
456        // - `slab_item_size` must be a valid size AND currently assigned to the slab.
457        // - `head` must be a valid reference to a `CacheAlignedU16
458        //   that is the head of the remote free list.
459        // - `slab` must be a valid pointer to the beginning of the slab.
460        unsafe { RemoteFreeList::new(slab_item_size, head, slab) }
461    }
462
463    /// Returns a pointer to the slab meta for the given slab index.
464    ///
465    /// # Safety
466    /// - The `slab_index` must be a valid index for the slabs.
467    unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
468        // SAFETY: The header is assumed to be valid and initialized.
469        let offset = unsafe { self.header.as_ref() }.slab_shared_meta_offset;
470        // SAFETY: The header is guaranteed to be valid and initialized.
471        let slab_metas = unsafe { self.header.byte_add(offset as usize).cast::<SlabMeta>() };
472        // SAFETY: The `slab_index` is guaranteed to be valid by the caller.
473        unsafe { slab_metas.add(slab_index as usize) }
474    }
475
476    /// Return a mutable reference to a free stack for the given slab index.
477    ///
478    /// # Safety
479    /// - The `slab_index` must be a valid index for the slabs.
480    unsafe fn slab_free_stack<'a>(&'a self, slab_index: u32) -> FreeStack<'a> {
481        let (slab_size, offset) = {
482            // SAFETY: The header is assumed to be valid and initialized.
483            let header = unsafe { self.header.as_ref() };
484            (header.slab_size, header.slab_free_stacks_offset)
485        };
486        let free_stack_size = header::layout::single_free_stack_size(slab_size);
487
488        // SAFETY: The `FreeStack` layout is guaranteed to have enough room
489        // for top, capacity, and the trailing stack.
490        let mut top = unsafe {
491            self.header
492                .byte_add(offset as usize)
493                .byte_add(slab_index as usize * free_stack_size)
494                .cast()
495        };
496        let mut capacity = unsafe { top.add(1) };
497        let trailing_stack = unsafe { capacity.add(1) };
498        unsafe { FreeStack::new(top.as_mut(), capacity.as_mut(), trailing_stack) }
499    }
500
501    /// Return a pointer to a slab.
502    ///
503    /// # Safety
504    /// - The `slab_index` must be a valid index for the slabs.
505    unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
506        let (slab_size, offset) = {
507            // SAFETY: The header is assumed to be valid and initialized.
508            let header = unsafe { self.header.as_ref() };
509            (header.slab_size, header.slabs_offset)
510        };
511        // SAFETY: The header is guaranteed to be valid and initialized.
512        // The slabs are laid out sequentially after the free stacks.
513        unsafe {
514            self.header
515                .byte_add(offset as usize)
516                .byte_add(slab_index as usize * slab_size as usize)
517                .cast()
518        }
519    }
520}
521
522struct AllocationIndexes {
523    slab_index: u32,
524    index_within_slab: u16,
525}
526
527#[cfg(test)]
528mod tests {
529    use super::*;
530    use crate::{
531        header::layout,
532        init::initialize,
533        size_classes::{MAX_SIZE, SIZE_CLASSES},
534    };
535
536    #[repr(C, align(4096))]
537    struct DummyTypeForAlignment([u8; 4096]);
538    const TEST_BUFFER_SIZE: usize = 64 * 1024 * 1024; // 1 MiB
539    const TEST_BUFFER_CAPACITY: usize =
540        TEST_BUFFER_SIZE / core::mem::size_of::<DummyTypeForAlignment>();
541
542    fn test_buffer() -> Vec<DummyTypeForAlignment> {
543        (0..TEST_BUFFER_CAPACITY)
544            .map(|_| DummyTypeForAlignment([0; 4096]))
545            .collect::<Vec<_>>()
546    }
547
548    fn initialize_for_test(
549        buffer: *mut u8,
550        slab_size: u32,
551        num_workers: u32,
552        worker_index: u32,
553    ) -> Allocator {
554        let file_size = TEST_BUFFER_SIZE;
555
556        let layout = layout::offsets(file_size, slab_size, num_workers);
557
558        let header = NonNull::new(buffer as *mut Header).unwrap();
559        // SAFETY: The header is valid for any byte pattern, and we are initializing it with the
560        //         allocator.
561        unsafe {
562            initialize::allocator(header, slab_size, num_workers, layout);
563        }
564
565        // SAFETY: The header/allocator memory is initialized.
566        unsafe { Allocator::new(header, file_size, worker_index) }.unwrap()
567    }
568
569    fn join_for_tests(buffer: *mut u8, worker_index: u32) -> Allocator {
570        let header = NonNull::new(buffer as *mut Header).unwrap();
571        // SAFETY: The header is valid if joining an existing allocator.
572        unsafe { Allocator::new(header, TEST_BUFFER_SIZE, worker_index) }.unwrap()
573    }
574
575    #[test]
576    fn test_allocator() {
577        let mut buffer = test_buffer();
578        let slab_size = 65536; // 64 KiB
579        let num_workers = 4;
580        let worker_index = 0;
581        let allocator = initialize_for_test(
582            buffer.as_mut_ptr().cast(),
583            slab_size,
584            num_workers,
585            worker_index,
586        );
587
588        let mut allocations = vec![];
589
590        for size_class in SIZE_CLASSES[..NUM_SIZE_CLASSES - 1].iter() {
591            for size in [size_class - 1, *size_class, size_class + 1] {
592                allocations.push(allocator.allocate(size).unwrap());
593            }
594        }
595        for size in [MAX_SIZE - 1, MAX_SIZE] {
596            allocations.push(allocator.allocate(size).unwrap());
597        }
598        assert!(allocator.allocate(MAX_SIZE + 1).is_none());
599
600        // The worker should have local lists for all size classes.
601        for size_index in 0..NUM_SIZE_CLASSES {
602            // SAFETY: The size index is guaranteed to be valid by the loop.
603            let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
604            assert!(worker_local_list.head().is_some());
605        }
606
607        for ptr in allocations {
608            // SAFETY: ptr is valid allocation from the allocator.
609            unsafe {
610                allocator.free(ptr);
611            }
612        }
613
614        // The worker local lists should be empty after freeing.
615        for size_index in 0..NUM_SIZE_CLASSES {
616            // SAFETY: The size index is guaranteed to be valid by the loop.
617            let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
618            assert_eq!(worker_local_list.head(), None);
619        }
620    }
621
622    #[test]
623    fn test_slab_list_transitions() {
624        let mut buffer = test_buffer();
625        let slab_size = 65536; // 64 KiB
626        let num_workers = 4;
627        let worker_index = 0;
628        let allocator = initialize_for_test(
629            buffer.as_mut_ptr().cast(),
630            slab_size,
631            num_workers,
632            worker_index,
633        );
634
635        let allocation_size = 2048;
636        let size_index = size_class_index(allocation_size).unwrap();
637        let allocations_per_slab = slab_size / allocation_size;
638
639        fn check_worker_list_expectations(
640            allocator: &Allocator,
641            size_index: usize,
642            expect_partial: bool,
643            expect_full: bool,
644        ) {
645            unsafe {
646                let partial_list = allocator.worker_local_list_partial(size_index);
647                assert_eq!(
648                    partial_list.head().is_some(),
649                    expect_partial,
650                    "{:?}",
651                    partial_list.head()
652                );
653
654                let full_list = allocator.worker_local_list_full(size_index);
655                assert_eq!(
656                    full_list.head().is_some(),
657                    expect_full,
658                    "{:?}",
659                    full_list.head()
660                );
661            }
662        }
663
664        // The parital list and full list should begin empty.
665        check_worker_list_expectations(&allocator, size_index, false, false);
666
667        let mut first_slab_allocations = vec![];
668        for _ in 0..allocations_per_slab - 1 {
669            first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
670        }
671
672        // The first slab should be partially full and the full list empty.
673        check_worker_list_expectations(&allocator, size_index, true, false);
674
675        // Allocate one more to fill the slab.
676        first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
677
678        // The first slab should now be full and moved to the full list.
679        check_worker_list_expectations(&allocator, size_index, false, true);
680
681        // Allocating again will give a new slab, which will be partially full.
682        let second_slab_allocation = allocator.allocate(allocation_size).unwrap();
683
684        // The second slab should be partially full and the first slab in the full list.
685        check_worker_list_expectations(&allocator, size_index, true, true);
686
687        let mut first_slab_allocations = first_slab_allocations.drain(..);
688        unsafe {
689            allocator.free(first_slab_allocations.next().unwrap());
690        }
691        // Both slabs should be partially full, and none are full.
692        check_worker_list_expectations(&allocator, size_index, true, false);
693
694        // Free the first slab allocation.
695        for ptr in first_slab_allocations {
696            unsafe {
697                allocator.free(ptr);
698            }
699        }
700        // The first slab is now empty and should be moved to the global free list,
701        // but the second slab is still partially full.
702        check_worker_list_expectations(&allocator, size_index, true, false);
703
704        // Free the second slab allocation.
705        unsafe {
706            allocator.free(second_slab_allocation);
707        }
708        // Both slabs should now be empty and moved to the global free list.
709        check_worker_list_expectations(&allocator, size_index, false, false);
710    }
711
712    #[test]
713    fn test_out_of_slabs() {
714        let mut buffer = test_buffer();
715        let slab_size = 65536; // 64 KiB
716        let num_workers = 4;
717        let worker_index = 0;
718        let allocator = initialize_for_test(
719            buffer.as_mut_ptr().cast(),
720            slab_size,
721            num_workers,
722            worker_index,
723        );
724
725        let num_slabs = unsafe { allocator.header.as_ref() }.num_slabs;
726        for index in 0..num_slabs {
727            let slab_index = unsafe { allocator.take_slab(0) }.unwrap();
728            assert_eq!(slab_index, index);
729        }
730        // The next slab allocation should fail, as all slabs are taken.
731        assert!(unsafe { allocator.take_slab(0) }.is_none());
732    }
733
734    #[test]
735    fn test_remote_free_lists() {
736        let mut buffer = test_buffer();
737        let slab_size = 65536; // 64 KiB
738        let num_workers = 4;
739
740        let allocator_0 =
741            initialize_for_test(buffer.as_mut_ptr().cast(), slab_size, num_workers, 0);
742        let allocator_1 = join_for_tests(buffer.as_mut_ptr().cast(), 1);
743
744        let allocation_size = 2048;
745        let size_index = size_class_index(allocation_size).unwrap();
746        let allocations_per_slab = slab_size / allocation_size;
747
748        // Allocate enough to fill the first slab.
749        let mut allocations = vec![];
750        for _ in 0..allocations_per_slab {
751            allocations.push(allocator_0.allocate(allocation_size).unwrap());
752        }
753
754        // The first slab should be full.
755        let slab_index = unsafe {
756            let worker_local_list = allocator_0.worker_local_list_partial(size_index);
757            assert!(worker_local_list.head().is_none());
758            let worker_local_list = allocator_0.worker_local_list_full(size_index);
759            assert!(worker_local_list.head().is_some());
760            worker_local_list.head().unwrap()
761        };
762
763        // The slab's remote list should be empty.
764        let remote_free_list = unsafe { allocator_0.remote_free_list(slab_index) };
765        assert!(remote_free_list.iterate().next().is_none());
766
767        // Free the allocations to the remote free list.
768        for ptr in allocations {
769            unsafe {
770                allocator_1.free(ptr);
771            }
772        }
773
774        // Allocator 0 can NOT allocate in the same slab.
775        let different_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
776        let allocation_indexes = unsafe {
777            allocator_0.find_allocation_indexes(allocator_0.offset(different_slab_allocation))
778        };
779        assert_ne!(allocation_indexes.slab_index, slab_index);
780        unsafe { allocator_0.free(different_slab_allocation) };
781
782        // If we clean the remote free lists, the next allocation should succeed in the same slab.
783        allocator_0.clean_remote_free_lists();
784        let same_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
785        let allocation_indexes = unsafe {
786            allocator_0.find_allocation_indexes(allocator_0.offset(same_slab_allocation))
787        };
788        assert_eq!(allocation_indexes.slab_index, slab_index);
789    }
790}