Skip to main content

rts_alloc/
allocator.rs

1use crate::free_stack::FreeStack;
2use crate::global_free_list::GlobalFreeList;
3use crate::header::{self, WorkerLocalListHeads, WorkerLocalListPartialFullHeads};
4use crate::linked_list_node::LinkedListNode;
5use crate::remote_free_list::RemoteFreeList;
6use crate::size_classes::{size_class, NUM_SIZE_CLASSES};
7use crate::slab_meta::SlabMeta;
8use crate::worker_local_list::WorkerLocalList;
9use crate::{error::Error, header::Header, size_classes::size_class_index};
10use core::mem::offset_of;
11use core::ptr::NonNull;
12use core::sync::atomic::Ordering;
13use std::fs::File;
14
15pub struct Allocator {
16    header: NonNull<Header>,
17    file_size: usize,
18    worker_index: u32,
19}
20
21impl Allocator {
22    /// Create a new `Allocator` in the provided file with the given parameters.
23    pub fn create(
24        file: &File,
25        file_size: usize,
26        num_workers: u32,
27        slab_size: u32,
28        worker_index: u32,
29    ) -> Result<Self, Error> {
30        if worker_index >= num_workers {
31            return Err(Error::InvalidWorkerIndex);
32        }
33        let header = crate::init::create(file, file_size, num_workers, slab_size)?;
34
35        // SAFETY: The header is guaranteed to be valid and initialized.
36        unsafe { Allocator::new(header, file_size, worker_index) }
37    }
38
39    /// Join an existing allocator in the provided file.
40    ///
41    /// # Safety
42    /// - `worker_index` must be uniquely assigned to this worker thread/process.
43    pub unsafe fn join(file: &File, worker_index: u32) -> Result<Self, Error> {
44        let (header, file_size) = crate::init::join(file)?;
45
46        // Check if the worker index is valid.
47        // SAFETY: The header is guaranteed to be valid and initialized.
48        if worker_index >= unsafe { header.as_ref() }.num_workers {
49            return Err(Error::InvalidWorkerIndex);
50        }
51
52        // SAFETY: The header is guaranteed to be valid and initialized.
53        unsafe { Allocator::new(header, file_size, worker_index) }
54    }
55
56    /// Creates a new `Allocator` for the given worker index.
57    ///
58    /// # Safety
59    /// - The `header` must point to a valid header of an initialized allocator.
60    unsafe fn new(
61        header: NonNull<Header>,
62        file_size: usize,
63        worker_index: u32,
64    ) -> Result<Self, Error> {
65        // SAFETY: The header is assumed to be valid and initialized.
66        if worker_index >= unsafe { header.as_ref() }.num_workers {
67            return Err(Error::InvalidWorkerIndex);
68        }
69        Ok(Allocator {
70            header,
71            file_size,
72            worker_index,
73        })
74    }
75}
76
77unsafe impl Send for Allocator {}
78
79impl Drop for Allocator {
80    fn drop(&mut self) {
81        #[cfg(test)]
82        {
83            // In tests, we do not mmap.
84            return;
85        }
86
87        #[allow(unreachable_code)]
88        // SAFETY: The header is guaranteed to be valid and initialized.
89        //         And outside of tests the allocator is mmaped.
90        unsafe {
91            libc::munmap(self.header.as_ptr() as *mut libc::c_void, self.file_size);
92        }
93    }
94}
95
96impl Allocator {
97    /// Allocates a block of memory of the given size.
98    /// If the size is larger than the maximum size class, returns `None`.
99    /// If the allocation fails, returns `None`.
100    pub fn allocate(&self, size: u32) -> Option<NonNull<u8>> {
101        let size_index = size_class_index(size)?;
102
103        // SAFETY: `size_index` is guaranteed to be valid by `size_class_index`.
104        let slab_index = unsafe { self.find_allocatable_slab_index(size_index) }?;
105        // SAFETY:
106        // - `slab_index` is guaranteed to be valid by `find_allocatable_slab_index`.
107        // - `size_index` is guaranteed to be valid by `size_class_index`.
108        unsafe { self.allocate_within_slab(slab_index, size_index) }
109    }
110
111    /// Try to find a suitable slab for allocation.
112    /// If a partial slab assigned to the worker is not found, then try to find
113    /// a slab from the global free list.
114    ///
115    /// # Safety
116    /// - The `size_index` must be a valid index for the size classes.
117    unsafe fn find_allocatable_slab_index(&self, size_index: usize) -> Option<u32> {
118        // SAFETY: `size_index` is guaranteed to be valid by the caller.
119        unsafe { self.worker_local_list_partial(size_index) }
120            .head()
121            .or_else(|| self.take_slab(size_index))
122    }
123
124    /// Attempt to allocate meomry within a slab.
125    /// If the slab is full or the allocation otherwise fails, returns `None`.
126    ///
127    /// # Safety
128    /// - The `slab_index` must be a valid index for the slabs
129    /// - The `size_index` must be a valid index for the size classes.
130    unsafe fn allocate_within_slab(
131        &self,
132        slab_index: u32,
133        size_index: usize,
134    ) -> Option<NonNull<u8>> {
135        // SAFETY: The slab index is guaranteed to be valid by the caller.
136        let mut free_stack = unsafe { self.slab_free_stack(slab_index) };
137        let maybe_index_within_slab = free_stack.pop();
138
139        // If the slab is empty - remove it from the worker's partial list,
140        // and move it to the worker's full list.
141        if free_stack.is_empty() {
142            // SAFETY:
143            // - The `slab_index` is guaranteed to be valid by the caller.
144            // - The `size_index` is guaranteed to be valid by the caller.
145            unsafe {
146                self.worker_local_list_partial(size_index)
147                    .remove(slab_index);
148            }
149            // SAFETY:
150            // - The `slab_index` is guaranteed to be valid by the caller.
151            // - The `size_index` is guaranteed to be valid by the caller.
152            unsafe {
153                self.worker_local_list_full(size_index).push(slab_index);
154            }
155        }
156
157        maybe_index_within_slab.map(|index_within_slab| {
158            // SAFETY: The `slab_index` is guaranteed to be valid by the caller.
159            let slab = unsafe { self.slab(slab_index) };
160            // SAFETY: The `size_index` is guaranteed to be valid by the caller.
161            let size = unsafe { size_class(size_index) };
162            slab.byte_add(index_within_slab as usize * size as usize)
163        })
164    }
165
166    /// Attempt to take a slab from the global free list.
167    /// If the global free list is empty, returns `None`.
168    /// If the slab is successfully taken, it will be marked as assigned to the worker.
169    ///
170    /// # Safety
171    /// - The `size_index` must be a valid index for the size claasses.
172    unsafe fn take_slab(&self, size_index: usize) -> Option<u32> {
173        let slab_index = self.global_free_list().pop()?;
174
175        // SAFETY: The slab index is guaranteed to be valid by `pop`.
176        unsafe { self.slab_meta(slab_index).as_ref() }.assign(self.worker_index, size_index);
177        // SAFETY:
178        // - The slab index is guaranteed to be valid by `pop`.
179        // - The size index is guaranteed to be valid by the caller.
180        unsafe {
181            let slab_capacity = self.header.as_ref().slab_size / size_class(size_index);
182            self.slab_free_stack(slab_index).reset(slab_capacity as u16);
183        };
184        // SAFETY: The size index is guaranteed to be valid by caller.
185        let mut worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
186        // SAFETY: The slab index is guaranteed to be valid by `pop`.
187        unsafe { worker_local_list.push(slab_index) };
188        Some(slab_index)
189    }
190}
191
192impl Allocator {
193    /// Free a block of memory previously allocated by this allocator.
194    ///
195    /// # Safety
196    /// - The `ptr` must be a valid pointer to a block of memory allocated by this allocator.
197    /// - The `ptr` must not have been freed before.
198    pub unsafe fn free(&self, ptr: NonNull<u8>) {
199        // SAFETY: The pointer is assumed to be valid and allocated by this allocator.
200        let offset = unsafe { self.offset(ptr) };
201        self.free_offset(offset);
202    }
203
204    /// Free a block of memory previously allocated by this allocator.
205    ///
206    /// # Safety
207    /// - The `offset` must be a valid offset within the memory allocated by this allocator.
208    /// - The `offset` must not have been freed before.
209    pub unsafe fn free_offset(&self, offset: usize) {
210        let allocation_indexes = self.find_allocation_indexes(offset);
211
212        // Check if the slab is assigned to this worker.
213        if self.worker_index
214            == unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
215                .assigned_worker
216                .load(Ordering::Acquire)
217        {
218            self.local_free(allocation_indexes);
219        } else {
220            self.remote_free(allocation_indexes);
221        }
222    }
223
224    fn local_free(&self, allocation_indexes: AllocationIndexes) {
225        // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
226        let (was_full, is_empty) = unsafe {
227            let mut free_stack = self.slab_free_stack(allocation_indexes.slab_index);
228            let was_full = free_stack.is_empty();
229            free_stack.push(allocation_indexes.index_within_slab);
230            // Names confusing:
231            // - When the **free** stack is empty, the slab is full of allocations.
232            // - When the **free** stack is full, the slab has no allocations available.
233            (was_full, free_stack.is_full())
234        };
235
236        match (was_full, is_empty) {
237            (true, true) => {
238                // The slab was full and is now empty - this cannot happen unless the slab
239                // size is equal to the size class.
240                unreachable!("slab can only contain one allocation - this is not allowed");
241            }
242            (true, false) => {
243                let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
244                    .size_class_index
245                    .load(Ordering::Relaxed);
246                // The slab was full and is now partially full. It must be moved
247                // from the worker's full list to the worker's partial list.
248                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
249                unsafe {
250                    self.worker_local_list_full(size_index)
251                        .remove(allocation_indexes.slab_index);
252                }
253                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
254                unsafe {
255                    self.worker_local_list_partial(size_index)
256                        .push(allocation_indexes.slab_index);
257                }
258            }
259            (false, true) => {
260                let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
261                    .size_class_index
262                    .load(Ordering::Relaxed);
263                // The slab was partially full and is now empty.
264                // It must be moved from the worker's partial list to the global free list.
265                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
266                unsafe {
267                    self.worker_local_list_partial(size_index)
268                        .remove(allocation_indexes.slab_index);
269                }
270                // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
271                unsafe {
272                    self.global_free_list().push(allocation_indexes.slab_index);
273                }
274            }
275            (false, false) => {
276                // The slab was partially full and is still partially full.
277                // No action is needed, just return.
278            }
279        }
280    }
281
282    fn remote_free(&self, allocation_indexes: AllocationIndexes) {
283        // SAFETY: The allocation indexes are guaranteed to be valid by the caller.
284        unsafe {
285            self.remote_free_list(allocation_indexes.slab_index)
286                .push(allocation_indexes.index_within_slab);
287        }
288    }
289
290    /// Find the offset given a pointer.
291    ///
292    /// # Safety
293    /// - The `ptr` must be a valid pointer in the allocator's address space.
294    pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
295        ptr.byte_offset_from(self.header) as usize
296    }
297
298    /// Return a ptr given a shareable offset - calculated by `offset`.
299    ///
300    /// # Safety
301    ///
302    /// - Caller must ensure the offset is valid for this allocator.
303    pub unsafe fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
304        unsafe { self.header.byte_add(offset) }.cast()
305    }
306
307    /// Find the slab index and index within the slab for a given offset.
308    fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
309        let (slab_index, offset_within_slab) = {
310            // SAFETY: The header is assumed to be valid and initialized.
311            let header = unsafe { self.header.as_ref() };
312            debug_assert!(offset >= header.slabs_offset as usize);
313            let offset_from_slab_start = offset.wrapping_sub(header.slabs_offset as usize);
314            let slab_index = (offset_from_slab_start / header.slab_size as usize) as u32;
315            debug_assert!(slab_index < header.num_slabs, "slab index out of bounds");
316
317            // SAFETY: The slab size is guaranteed to be a power of 2, for a valid header.
318            let offset_within_slab =
319                unsafe { Self::offset_within_slab(header.slab_size, offset_from_slab_start) };
320
321            (slab_index, offset_within_slab)
322        };
323
324        let index_within_slab = {
325            // SAFETY: The slab index is guaranteed to be valid by the above calculations.
326            let size_class_index = unsafe { self.slab_meta(slab_index).as_ref() }
327                .size_class_index
328                .load(Ordering::Acquire);
329            // SAFETY: The size class index is guaranteed to be valid by valid slab meta.
330            let size_class = unsafe { size_class(size_class_index) };
331            (offset_within_slab / size_class) as u16
332        };
333
334        AllocationIndexes {
335            slab_index,
336            index_within_slab,
337        }
338    }
339
340    /// Return offset within a slab.
341    ///
342    /// # Safety
343    /// - The `slab_size` must be a power of 2.
344    const unsafe fn offset_within_slab(slab_size: u32, offset_from_slab_start: usize) -> u32 {
345        debug_assert!(slab_size.is_power_of_two());
346        (offset_from_slab_start & (slab_size as usize - 1)) as u32
347    }
348}
349
350impl Allocator {
351    /// Frees all items in remote free lists.
352    pub fn clean_remote_free_lists(&self) {
353        // Do partial slabs before full slabs, because the act of freeing within
354        // the full slabs may move them to partial slabs list, which would lead
355        // to us double-iterating.
356        for size_index in 0..NUM_SIZE_CLASSES {
357            // SAFETY: The size index is guaranteed to be valid by the loop.
358            let worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
359            self.clean_remote_free_lists_for_list(worker_local_list);
360
361            // SAFETY: The size index is guaranteed to be valid by the loop.
362            let worker_local_list = unsafe { self.worker_local_list_full(size_index) };
363            self.clean_remote_free_lists_for_list(worker_local_list);
364        }
365    }
366
367    /// Frees all items in the remote free list for the given worker local list.
368    fn clean_remote_free_lists_for_list(&self, worker_local_list: WorkerLocalList) {
369        for slab_index in worker_local_list.iterate() {
370            // SAFETY: The slab index is guaranteed to be valid by the iterator.
371            let remote_free_list = unsafe { self.remote_free_list(slab_index) };
372            for index_within_slab in remote_free_list.drain() {
373                self.local_free(AllocationIndexes {
374                    slab_index,
375                    index_within_slab,
376                })
377            }
378        }
379    }
380}
381
382impl Allocator {
383    /// Returns a pointer to the free list elements in allocator.
384    fn free_list_elements(&self) -> NonNull<LinkedListNode> {
385        // SAFETY: The header is assumed to be valid and initialized.
386        let offset = unsafe { self.header.as_ref() }.free_list_elements_offset;
387        // SAFETY: The header is guaranteed to be valid and initialized.
388        unsafe { self.header.byte_add(offset as usize) }.cast()
389    }
390
391    /// Returns a `GlobalFreeList` to interact with the global free list.
392    fn global_free_list<'a>(&'a self) -> GlobalFreeList<'a> {
393        // SAFETY: The header is assumed to be valid and initialized.
394        let head = &unsafe { self.header.as_ref() }.global_free_list_head;
395        let list = self.free_list_elements();
396        // SAFETY:
397        // - `head` is a valid reference to the global free list head.
398        // - `list` is guaranteed to be valid wtih sufficient capacity.
399        unsafe { GlobalFreeList::new(head, list) }
400    }
401
402    /// Returns a `WorkerLocalList` for the current worker to interact with its
403    /// local free list of partially full slabs.
404    ///
405    /// # Safety
406    /// - The `size_index` must be a valid index for the size classes.
407    unsafe fn worker_local_list_partial<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
408        let head = &self.worker_head(size_index).partial;
409        let list = self.free_list_elements();
410
411        // SAFETY:
412        // - `head` is a valid reference to the worker's local list head.
413        // - `list` is guaranteed to be valid with sufficient capacity.
414        unsafe { WorkerLocalList::new(head, list) }
415    }
416
417    /// Returns a `WorkerLocalList` for the current worker to interact with its
418    /// local free list of full slabs.
419    ///
420    /// # Safety
421    /// - The `size_index` must be a valid index for the size classes.
422    unsafe fn worker_local_list_full<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
423        let head = &self.worker_head(size_index).full;
424        let list = self.free_list_elements();
425
426        // SAFETY:
427        // - `head` is a valid reference to the worker's local list head.
428        // - `list` is guaranteed to be valid with sufficient capacity.
429        unsafe { WorkerLocalList::new(head, list) }
430    }
431
432    fn worker_head(&self, size_index: usize) -> &WorkerLocalListPartialFullHeads {
433        // SAFETY: The header is assumed to be valid and initialized.
434        let all_workers_heads = unsafe {
435            self.header
436                .byte_add(offset_of!(Header, worker_local_list_heads))
437                .cast::<WorkerLocalListHeads>()
438        };
439        // SAFETY: The worker index is guaranteed to be valid by the constructor.
440        let worker_heads = unsafe { all_workers_heads.add(self.worker_index as usize) };
441        // SAFETY: The size index is guaranteed to be valid by the caller.
442        let worker_head = unsafe {
443            worker_heads
444                .cast::<WorkerLocalListPartialFullHeads>()
445                .add(size_index)
446                .as_ref()
447        };
448
449        worker_head
450    }
451
452    /// Returns an instance of `RemoteFreeList` for the given slab.
453    ///
454    /// # Safety
455    /// - `slab_index` must be a valid slab index.
456    unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
457        let (head, slab_item_size) = {
458            // SAFETY: The slab index is guaranteed to be valid by the caller.
459            let slab_meta = unsafe { self.slab_meta(slab_index).as_ref() };
460            // SAFETY: The slab meta is guaranteed to be valid by the caller.
461            let size_class =
462                unsafe { size_class(slab_meta.size_class_index.load(Ordering::Acquire)) };
463            (&slab_meta.remote_free_stack_head, size_class)
464        };
465        // SAFETY: The slab index is guaranteed to be valid by the caller.
466        let slab = unsafe { self.slab(slab_index) };
467
468        // SAFETY:
469        // - `slab_item_size` must be a valid size AND currently assigned to the slab.
470        // - `head` must be a valid reference to a `CacheAlignedU16
471        //   that is the head of the remote free list.
472        // - `slab` must be a valid pointer to the beginning of the slab.
473        unsafe { RemoteFreeList::new(slab_item_size, head, slab) }
474    }
475
476    /// Returns a pointer to the slab meta for the given slab index.
477    ///
478    /// # Safety
479    /// - The `slab_index` must be a valid index for the slabs.
480    unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
481        // SAFETY: The header is assumed to be valid and initialized.
482        let offset = unsafe { self.header.as_ref() }.slab_shared_meta_offset;
483        // SAFETY: The header is guaranteed to be valid and initialized.
484        let slab_metas = unsafe { self.header.byte_add(offset as usize).cast::<SlabMeta>() };
485        // SAFETY: The `slab_index` is guaranteed to be valid by the caller.
486        unsafe { slab_metas.add(slab_index as usize) }
487    }
488
489    /// Return a mutable reference to a free stack for the given slab index.
490    ///
491    /// # Safety
492    /// - The `slab_index` must be a valid index for the slabs.
493    unsafe fn slab_free_stack<'a>(&'a self, slab_index: u32) -> FreeStack<'a> {
494        let (slab_size, offset) = {
495            // SAFETY: The header is assumed to be valid and initialized.
496            let header = unsafe { self.header.as_ref() };
497            (header.slab_size, header.slab_free_stacks_offset)
498        };
499        let free_stack_size = header::layout::single_free_stack_size(slab_size);
500
501        // SAFETY: The `FreeStack` layout is guaranteed to have enough room
502        // for top, capacity, and the trailing stack.
503        let mut top = unsafe {
504            self.header
505                .byte_add(offset as usize)
506                .byte_add(slab_index as usize * free_stack_size)
507                .cast()
508        };
509        let mut capacity = unsafe { top.add(1) };
510        let trailing_stack = unsafe { capacity.add(1) };
511        unsafe { FreeStack::new(top.as_mut(), capacity.as_mut(), trailing_stack) }
512    }
513
514    /// Return a pointer to a slab.
515    ///
516    /// # Safety
517    /// - The `slab_index` must be a valid index for the slabs.
518    unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
519        let (slab_size, offset) = {
520            // SAFETY: The header is assumed to be valid and initialized.
521            let header = unsafe { self.header.as_ref() };
522            (header.slab_size, header.slabs_offset)
523        };
524        // SAFETY: The header is guaranteed to be valid and initialized.
525        // The slabs are laid out sequentially after the free stacks.
526        unsafe {
527            self.header
528                .byte_add(offset as usize)
529                .byte_add(slab_index as usize * slab_size as usize)
530                .cast()
531        }
532    }
533}
534
535struct AllocationIndexes {
536    slab_index: u32,
537    index_within_slab: u16,
538}
539
540#[cfg(test)]
541mod tests {
542    use super::*;
543    use crate::{
544        header::layout,
545        init::initialize,
546        size_classes::{MAX_SIZE, SIZE_CLASSES},
547    };
548
549    #[repr(C, align(4096))]
550    struct DummyTypeForAlignment([u8; 4096]);
551    const TEST_BUFFER_SIZE: usize = 64 * 1024 * 1024; // 1 MiB
552    const TEST_BUFFER_CAPACITY: usize =
553        TEST_BUFFER_SIZE / core::mem::size_of::<DummyTypeForAlignment>();
554
555    fn test_buffer() -> Vec<DummyTypeForAlignment> {
556        (0..TEST_BUFFER_CAPACITY)
557            .map(|_| DummyTypeForAlignment([0; 4096]))
558            .collect::<Vec<_>>()
559    }
560
561    fn initialize_for_test(
562        buffer: *mut u8,
563        slab_size: u32,
564        num_workers: u32,
565        worker_index: u32,
566    ) -> Allocator {
567        let file_size = TEST_BUFFER_SIZE;
568
569        let layout = layout::offsets(file_size, slab_size, num_workers);
570
571        let header = NonNull::new(buffer as *mut Header).unwrap();
572        // SAFETY: The header is valid for any byte pattern, and we are initializing it with the
573        //         allocator.
574        unsafe {
575            initialize::allocator(header, slab_size, num_workers, layout);
576        }
577
578        // SAFETY: The header/allocator memory is initialized.
579        unsafe { Allocator::new(header, file_size, worker_index) }.unwrap()
580    }
581
582    fn join_for_tests(buffer: *mut u8, worker_index: u32) -> Allocator {
583        let header = NonNull::new(buffer as *mut Header).unwrap();
584        // SAFETY: The header is valid if joining an existing allocator.
585        unsafe { Allocator::new(header, TEST_BUFFER_SIZE, worker_index) }.unwrap()
586    }
587
588    #[test]
589    fn test_allocator() {
590        let mut buffer = test_buffer();
591        let slab_size = 65536; // 64 KiB
592        let num_workers = 4;
593        let worker_index = 0;
594        let allocator = initialize_for_test(
595            buffer.as_mut_ptr().cast(),
596            slab_size,
597            num_workers,
598            worker_index,
599        );
600
601        let mut allocations = vec![];
602
603        for size_class in SIZE_CLASSES[..NUM_SIZE_CLASSES - 1].iter() {
604            for size in [size_class - 1, *size_class, size_class + 1] {
605                allocations.push(allocator.allocate(size).unwrap());
606            }
607        }
608        for size in [MAX_SIZE - 1, MAX_SIZE] {
609            allocations.push(allocator.allocate(size).unwrap());
610        }
611        assert!(allocator.allocate(MAX_SIZE + 1).is_none());
612
613        // The worker should have local lists for all size classes.
614        for size_index in 0..NUM_SIZE_CLASSES {
615            // SAFETY: The size index is guaranteed to be valid by the loop.
616            let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
617            assert!(worker_local_list.head().is_some());
618        }
619
620        for ptr in allocations {
621            // SAFETY: ptr is valid allocation from the allocator.
622            unsafe {
623                allocator.free(ptr);
624            }
625        }
626
627        // The worker local lists should be empty after freeing.
628        for size_index in 0..NUM_SIZE_CLASSES {
629            // SAFETY: The size index is guaranteed to be valid by the loop.
630            let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
631            assert_eq!(worker_local_list.head(), None);
632        }
633    }
634
635    #[test]
636    fn test_slab_list_transitions() {
637        let mut buffer = test_buffer();
638        let slab_size = 65536; // 64 KiB
639        let num_workers = 4;
640        let worker_index = 0;
641        let allocator = initialize_for_test(
642            buffer.as_mut_ptr().cast(),
643            slab_size,
644            num_workers,
645            worker_index,
646        );
647
648        let allocation_size = 2048;
649        let size_index = size_class_index(allocation_size).unwrap();
650        let allocations_per_slab = slab_size / allocation_size;
651
652        fn check_worker_list_expectations(
653            allocator: &Allocator,
654            size_index: usize,
655            expect_partial: bool,
656            expect_full: bool,
657        ) {
658            unsafe {
659                let partial_list = allocator.worker_local_list_partial(size_index);
660                assert_eq!(
661                    partial_list.head().is_some(),
662                    expect_partial,
663                    "{:?}",
664                    partial_list.head()
665                );
666
667                let full_list = allocator.worker_local_list_full(size_index);
668                assert_eq!(
669                    full_list.head().is_some(),
670                    expect_full,
671                    "{:?}",
672                    full_list.head()
673                );
674            }
675        }
676
677        // The parital list and full list should begin empty.
678        check_worker_list_expectations(&allocator, size_index, false, false);
679
680        let mut first_slab_allocations = vec![];
681        for _ in 0..allocations_per_slab - 1 {
682            first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
683        }
684
685        // The first slab should be partially full and the full list empty.
686        check_worker_list_expectations(&allocator, size_index, true, false);
687
688        // Allocate one more to fill the slab.
689        first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
690
691        // The first slab should now be full and moved to the full list.
692        check_worker_list_expectations(&allocator, size_index, false, true);
693
694        // Allocating again will give a new slab, which will be partially full.
695        let second_slab_allocation = allocator.allocate(allocation_size).unwrap();
696
697        // The second slab should be partially full and the first slab in the full list.
698        check_worker_list_expectations(&allocator, size_index, true, true);
699
700        let mut first_slab_allocations = first_slab_allocations.drain(..);
701        unsafe {
702            allocator.free(first_slab_allocations.next().unwrap());
703        }
704        // Both slabs should be partially full, and none are full.
705        check_worker_list_expectations(&allocator, size_index, true, false);
706
707        // Free the first slab allocation.
708        for ptr in first_slab_allocations {
709            unsafe {
710                allocator.free(ptr);
711            }
712        }
713        // The first slab is now empty and should be moved to the global free list,
714        // but the second slab is still partially full.
715        check_worker_list_expectations(&allocator, size_index, true, false);
716
717        // Free the second slab allocation.
718        unsafe {
719            allocator.free(second_slab_allocation);
720        }
721        // Both slabs should now be empty and moved to the global free list.
722        check_worker_list_expectations(&allocator, size_index, false, false);
723    }
724
725    #[test]
726    fn test_out_of_slabs() {
727        let mut buffer = test_buffer();
728        let slab_size = 65536; // 64 KiB
729        let num_workers = 4;
730        let worker_index = 0;
731        let allocator = initialize_for_test(
732            buffer.as_mut_ptr().cast(),
733            slab_size,
734            num_workers,
735            worker_index,
736        );
737
738        let num_slabs = unsafe { allocator.header.as_ref() }.num_slabs;
739        for index in 0..num_slabs {
740            let slab_index = unsafe { allocator.take_slab(0) }.unwrap();
741            assert_eq!(slab_index, index);
742        }
743        // The next slab allocation should fail, as all slabs are taken.
744        assert!(unsafe { allocator.take_slab(0) }.is_none());
745    }
746
747    #[test]
748    fn test_remote_free_lists() {
749        let mut buffer = test_buffer();
750        let slab_size = 65536; // 64 KiB
751        let num_workers = 4;
752
753        let allocator_0 =
754            initialize_for_test(buffer.as_mut_ptr().cast(), slab_size, num_workers, 0);
755        let allocator_1 = join_for_tests(buffer.as_mut_ptr().cast(), 1);
756
757        let allocation_size = 2048;
758        let size_index = size_class_index(allocation_size).unwrap();
759        let allocations_per_slab = slab_size / allocation_size;
760
761        // Allocate enough to fill the first slab.
762        let mut allocations = vec![];
763        for _ in 0..allocations_per_slab {
764            allocations.push(allocator_0.allocate(allocation_size).unwrap());
765        }
766
767        // The first slab should be full.
768        let slab_index = unsafe {
769            let worker_local_list = allocator_0.worker_local_list_partial(size_index);
770            assert!(worker_local_list.head().is_none());
771            let worker_local_list = allocator_0.worker_local_list_full(size_index);
772            assert!(worker_local_list.head().is_some());
773            worker_local_list.head().unwrap()
774        };
775
776        // The slab's remote list should be empty.
777        let remote_free_list = unsafe { allocator_0.remote_free_list(slab_index) };
778        assert!(remote_free_list.iterate().next().is_none());
779
780        // Free the allocations to the remote free list.
781        for ptr in allocations {
782            unsafe {
783                allocator_1.free(ptr);
784            }
785        }
786
787        // Allocator 0 can NOT allocate in the same slab.
788        let different_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
789        let allocation_indexes = unsafe {
790            allocator_0.find_allocation_indexes(allocator_0.offset(different_slab_allocation))
791        };
792        assert_ne!(allocation_indexes.slab_index, slab_index);
793        unsafe { allocator_0.free(different_slab_allocation) };
794
795        // If we clean the remote free lists, the next allocation should succeed in the same slab.
796        allocator_0.clean_remote_free_lists();
797        let same_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
798        let allocation_indexes = unsafe {
799            allocator_0.find_allocation_indexes(allocator_0.offset(same_slab_allocation))
800        };
801        assert_eq!(allocation_indexes.slab_index, slab_index);
802    }
803}