1use crate::free_stack::FreeStack;
2use crate::global_free_list::GlobalFreeList;
3use crate::header::{self, WorkerLocalListHeads, WorkerLocalListPartialFullHeads};
4use crate::linked_list_node::LinkedListNode;
5use crate::remote_free_list::RemoteFreeList;
6use crate::size_classes::{size_class, NUM_SIZE_CLASSES};
7use crate::slab_meta::SlabMeta;
8use crate::sync::Ordering;
9use crate::worker_local_list::WorkerLocalList;
10use crate::{error::Error, header::Header, size_classes::size_class_index};
11use core::mem::offset_of;
12use core::ptr::NonNull;
13use std::fs::File;
14use std::sync::Arc;
15
16pub struct Allocator {
17 base: AllocatorBase,
18 worker_index: u32,
19}
20
21pub struct FreeOnlyAllocator {
22 base: AllocatorBase,
23}
24
25struct MappedRegion {
26 header: NonNull<Header>,
27 file_size: usize,
28}
29
30impl Drop for MappedRegion {
31 fn drop(&mut self) {
32 let _ = crate::memory_map::unmap_file(self.header.as_ptr().cast(), self.file_size);
34 }
35}
36
37unsafe impl Send for MappedRegion {}
43unsafe impl Sync for MappedRegion {}
45
46#[derive(Clone)]
47struct AllocatorBase {
48 region: Arc<MappedRegion>,
49}
50
51impl Allocator {
52 pub unsafe fn create(
59 file: &File,
60 file_size: usize,
61 min_workers: u32,
62 slab_size: u32,
63 ) -> Result<Self, Error> {
64 let header = crate::init::create(file, file_size, min_workers, slab_size)?;
65 let base = unsafe { AllocatorBase::from_mapping(header, file_size) };
68 let worker_index = match unsafe { claim_any_worker_index(base.header()) } {
70 Some(worker_index) => worker_index,
71 None => return Err(Error::NoAvailableWorkers),
72 };
73
74 Allocator::new(base, worker_index)
75 }
76
77 pub fn join(file: &File) -> Result<Self, Error> {
85 let (header, file_size) = crate::init::join(file)?;
86 let base = unsafe { AllocatorBase::from_mapping(header, file_size) };
89 let worker_index = match unsafe { claim_any_worker_index(base.header()) } {
91 Some(worker_index) => worker_index,
92 None => return Err(Error::NoAvailableWorkers),
93 };
94
95 Allocator::new(base, worker_index)
96 }
97
98 pub fn join_from_existing(existing: &Allocator) -> Result<Self, Error> {
101 Self::join_from_base(&existing.base)
102 }
103
104 pub fn join_from_existing_free_only(existing: &FreeOnlyAllocator) -> Result<Self, Error> {
107 Self::join_from_base(&existing.base)
108 }
109
110 fn join_from_base(base: &AllocatorBase) -> Result<Self, Error> {
113 let worker_index = match unsafe { claim_any_worker_index(base.header()) } {
115 Some(worker_index) => worker_index,
116 None => return Err(Error::NoAvailableWorkers),
117 };
118 Allocator::new(base.clone(), worker_index)
119 }
120
121 fn new(base: AllocatorBase, worker_index: u32) -> Result<Self, Error> {
123 if worker_index >= unsafe { base.header().as_ref() }.num_workers {
125 return Err(Error::InvalidWorkerIndex);
126 }
127 Ok(Allocator { base, worker_index })
128 }
129}
130
131unsafe impl Send for Allocator {}
132unsafe impl Send for FreeOnlyAllocator {}
133
134impl Drop for Allocator {
135 fn drop(&mut self) {
136 self.release_worker();
137 }
138}
139
140impl FreeOnlyAllocator {
141 pub fn join(file: &File) -> Result<Self, Error> {
148 let (header, file_size) = crate::init::join(file)?;
149 Ok(FreeOnlyAllocator {
152 base: unsafe { AllocatorBase::from_mapping(header, file_size) },
153 })
154 }
155
156 pub fn join_from_existing(existing: &Allocator) -> Self {
158 Self::from_base(&existing.base)
159 }
160
161 pub fn join_from_existing_free_only(existing: &FreeOnlyAllocator) -> Self {
163 Self::from_base(&existing.base)
164 }
165
166 fn from_base(base: &AllocatorBase) -> Self {
167 Self { base: base.clone() }
168 }
169}
170
171impl Allocator {
172 fn release_worker(&self) {
173 self.worker_meta().claimed.store(0, Ordering::Release);
174 }
175
176 pub fn allocate(&self, size: u32) -> Option<NonNull<u8>> {
180 let size_index = size_class_index(size)?;
181
182 let slab_index = unsafe { self.find_allocatable_slab_index(size_index) }?;
184 unsafe { self.allocate_within_slab(slab_index, size_index) }
188 }
189
190 unsafe fn find_allocatable_slab_index(&self, size_index: usize) -> Option<u32> {
197 unsafe { self.worker_local_list_partial(size_index) }
199 .head()
200 .or_else(|| self.take_slab(size_index))
201 }
202
203 unsafe fn allocate_within_slab(
210 &self,
211 slab_index: u32,
212 size_index: usize,
213 ) -> Option<NonNull<u8>> {
214 let mut free_stack = unsafe { self.slab_free_stack(slab_index) };
216 let maybe_index_within_slab = free_stack.pop();
217
218 if free_stack.is_empty() {
221 unsafe {
225 self.worker_local_list_partial(size_index)
226 .remove(slab_index);
227 }
228 unsafe {
232 self.worker_local_list_full(size_index).push(slab_index);
233 }
234 }
235
236 maybe_index_within_slab.map(|index_within_slab| {
237 let slab = unsafe { self.slab(slab_index) };
239 let size = unsafe { size_class(size_index) };
241 self.worker_meta()
242 .outstanding_allocation_bytes
243 .fetch_add(size as u64, Ordering::Relaxed);
244 slab.byte_add(index_within_slab as usize * size as usize)
245 })
246 }
247
248 unsafe fn take_slab(&self, size_index: usize) -> Option<u32> {
255 let slab_index = self.global_free_list().pop()?;
256
257 unsafe { self.slab_meta(slab_index).as_ref() }.assign(self.worker_index, size_index);
259 unsafe {
263 let slab_capacity = self.base.header().as_ref().slab_size / size_class(size_index);
264 self.slab_free_stack(slab_index).reset(slab_capacity as u16);
265 };
266 let mut worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
268 unsafe { worker_local_list.push(slab_index) };
270 Some(slab_index)
271 }
272}
273
274impl Allocator {
275 pub unsafe fn free(&self, ptr: NonNull<u8>) {
281 let offset = unsafe { self.offset(ptr) };
283 self.free_offset(offset);
284 }
285
286 pub unsafe fn free_offset(&self, offset: usize) {
292 let allocation_indexes = self.find_allocation_indexes(offset);
293
294 if self.worker_index
296 == unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
297 .assigned_worker
298 .load(Ordering::Acquire)
299 {
300 let (size_index, size) = unsafe { self.slab_size_class(allocation_indexes.slab_index) };
302 self.worker_meta()
303 .outstanding_allocation_bytes
304 .fetch_sub(size as u64, Ordering::Relaxed);
305 self.local_free_with_size_index(allocation_indexes, size_index);
306 } else {
307 self.remote_free(allocation_indexes);
308 }
309 }
310
311 fn local_free_with_size_index(&self, allocation_indexes: AllocationIndexes, size_index: usize) {
312 let (was_full, is_empty) = unsafe {
314 let mut free_stack = self.slab_free_stack(allocation_indexes.slab_index);
315 let was_full = free_stack.is_empty();
316 free_stack.push(allocation_indexes.index_within_slab);
317 (was_full, free_stack.is_full())
321 };
322
323 match (was_full, is_empty) {
324 (true, true) => {
325 unreachable!("slab can only contain one allocation - this is not allowed");
328 }
329 (true, false) => {
330 unsafe {
334 self.worker_local_list_full(size_index)
335 .remove(allocation_indexes.slab_index);
336 }
337 unsafe {
339 self.worker_local_list_partial(size_index)
340 .push(allocation_indexes.slab_index);
341 }
342 }
343 (false, true) => {
344 unsafe {
348 self.worker_local_list_partial(size_index)
349 .remove(allocation_indexes.slab_index);
350 }
351 unsafe {
353 self.global_free_list().push(allocation_indexes.slab_index);
354 }
355 }
356 (false, false) => {
357 }
360 }
361 }
362
363 fn remote_free(&self, allocation_indexes: AllocationIndexes) {
364 unsafe {
366 self.base
367 .remote_free_list(allocation_indexes.slab_index)
368 .push(allocation_indexes.index_within_slab);
369 }
370 }
371
372 pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
377 self.base.offset(ptr)
378 }
379
380 pub unsafe fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
386 self.base.ptr_from_offset(offset)
387 }
388
389 fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
391 self.base.find_allocation_indexes(offset)
392 }
393}
394
395impl FreeOnlyAllocator {
396 pub unsafe fn free(&self, ptr: NonNull<u8>) {
402 let offset = unsafe { self.offset(ptr) };
404 self.free_offset(offset);
405 }
406
407 pub unsafe fn free_offset(&self, offset: usize) {
413 let allocation_indexes = self.find_allocation_indexes(offset);
414 unsafe {
416 self.base
417 .remote_free_list(allocation_indexes.slab_index)
418 .push(allocation_indexes.index_within_slab);
419 }
420 }
421
422 pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
427 self.base.offset(ptr)
428 }
429
430 pub unsafe fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
436 self.base.ptr_from_offset(offset)
437 }
438
439 fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
441 self.base.find_allocation_indexes(offset)
442 }
443}
444
445impl AllocatorBase {
446 unsafe fn from_mapping(header: NonNull<Header>, file_size: usize) -> Self {
450 Self {
451 region: Arc::new(MappedRegion { header, file_size }),
452 }
453 }
454
455 #[inline]
456 fn header(&self) -> NonNull<Header> {
457 self.region.header
458 }
459
460 unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
465 ptr.byte_offset_from(self.header()) as usize
466 }
467
468 unsafe fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
474 unsafe { self.header().byte_add(offset) }.cast()
475 }
476
477 fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
479 let (slab_index, offset_within_slab) = {
480 let header = unsafe { self.header().as_ref() };
482 debug_assert!(offset >= header.slabs_offset as usize);
483 let offset_from_slab_start = offset.wrapping_sub(header.slabs_offset as usize);
484 let slab_index = (offset_from_slab_start / header.slab_size as usize) as u32;
485 debug_assert!(slab_index < header.num_slabs, "slab index out of bounds");
486
487 let offset_within_slab =
489 unsafe { Self::offset_within_slab(header.slab_size, offset_from_slab_start) };
490
491 (slab_index, offset_within_slab)
492 };
493
494 let index_within_slab = {
495 let size_class_index = unsafe { self.slab_meta(slab_index).as_ref() }
497 .size_class_index
498 .load(Ordering::Acquire);
499 let size_class = unsafe { size_class(size_class_index) };
501 (offset_within_slab / size_class) as u16
502 };
503
504 AllocationIndexes {
505 slab_index,
506 index_within_slab,
507 }
508 }
509
510 const unsafe fn offset_within_slab(slab_size: u32, offset_from_slab_start: usize) -> u32 {
515 debug_assert!(slab_size.is_power_of_two());
516 (offset_from_slab_start & (slab_size as usize - 1)) as u32
517 }
518
519 unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
524 let (head, slab_item_size) = {
525 let slab_meta = unsafe { self.slab_meta(slab_index).as_ref() };
527 let size_class =
529 unsafe { size_class(slab_meta.size_class_index.load(Ordering::Acquire)) };
530 (&slab_meta.remote_free_stack_head, size_class)
531 };
532 let slab = unsafe { self.slab(slab_index) };
534
535 unsafe { RemoteFreeList::new(slab_item_size, head, slab) }
541 }
542
543 unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
548 let offset = unsafe { self.header().as_ref() }.slab_shared_meta_offset;
550 let slab_metas = unsafe { self.header().byte_add(offset as usize).cast::<SlabMeta>() };
552 unsafe { slab_metas.add(slab_index as usize) }
554 }
555
556 unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
561 let (slab_size, offset) = {
562 let header = unsafe { self.header().as_ref() };
564 (header.slab_size, header.slabs_offset)
565 };
566 unsafe {
569 self.header()
570 .byte_add(offset as usize)
571 .byte_add(slab_index as usize * slab_size as usize)
572 .cast()
573 }
574 }
575
576 fn free_list_elements(&self) -> NonNull<LinkedListNode> {
577 let offset = unsafe { self.header().as_ref() }.free_list_elements_offset;
579 unsafe { self.header().byte_add(offset as usize) }.cast()
581 }
582}
583
584impl Allocator {
585 pub fn outstanding_allocation_bytes(&self) -> u64 {
586 self.worker_meta()
587 .outstanding_allocation_bytes
588 .load(Ordering::Relaxed)
589 }
590
591 pub fn clean_remote_free_lists(&self) {
593 for size_index in 0..NUM_SIZE_CLASSES {
597 let worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
599 self.clean_remote_free_lists_for_list(worker_local_list);
600
601 let worker_local_list = unsafe { self.worker_local_list_full(size_index) };
603 self.clean_remote_free_lists_for_list(worker_local_list);
604 }
605 }
606
607 fn clean_remote_free_lists_for_list(&self, worker_local_list: WorkerLocalList) {
609 for slab_index in worker_local_list.iterate() {
610 let (size_index, size) = unsafe { self.slab_size_class(slab_index) };
612 let mut drained_items = 0u64;
613 let remote_free_list = unsafe { self.remote_free_list(slab_index) };
615 for index_within_slab in remote_free_list.drain() {
616 self.local_free_with_size_index(
617 AllocationIndexes {
618 slab_index,
619 index_within_slab,
620 },
621 size_index,
622 );
623 drained_items += 1;
624 }
625 if drained_items != 0 {
626 self.worker_meta()
627 .outstanding_allocation_bytes
628 .fetch_sub(drained_items * size as u64, Ordering::Relaxed);
629 }
630 }
631 }
632}
633
634impl Allocator {
635 fn free_list_elements(&self) -> NonNull<LinkedListNode> {
637 self.base.free_list_elements()
638 }
639
640 fn global_free_list<'a>(&'a self) -> GlobalFreeList<'a> {
642 let head = &unsafe { self.base.header().as_ref() }.global_free_list_head;
644 let list = self.free_list_elements();
645 unsafe { GlobalFreeList::new(head, list) }
649 }
650
651 unsafe fn worker_local_list_partial<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
657 let head = &self.worker_head(size_index).partial;
658 let list = self.free_list_elements();
659
660 unsafe { WorkerLocalList::new(head, list) }
664 }
665
666 unsafe fn worker_local_list_full<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
672 let head = &self.worker_head(size_index).full;
673 let list = self.free_list_elements();
674
675 unsafe { WorkerLocalList::new(head, list) }
679 }
680
681 fn worker_meta(&self) -> &WorkerLocalListHeads {
682 unsafe { worker_meta_ptr(self.base.header(), self.worker_index).as_ref() }
684 }
685
686 fn worker_head(&self, size_index: usize) -> &WorkerLocalListPartialFullHeads {
687 &self.worker_meta().heads[size_index]
689 }
690
691 unsafe fn slab_size_class(&self, slab_index: u32) -> (usize, u32) {
696 let size_index = unsafe { self.slab_meta(slab_index).as_ref() }
697 .size_class_index
698 .load(Ordering::Relaxed);
699 let size = unsafe { size_class(size_index) };
701 (size_index, size)
702 }
703
704 unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
709 self.base.remote_free_list(slab_index)
710 }
711
712 unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
717 self.base.slab_meta(slab_index)
718 }
719
720 unsafe fn slab_free_stack<'a>(&'a self, slab_index: u32) -> FreeStack<'a> {
725 let (slab_size, offset) = {
726 let header = unsafe { self.base.header().as_ref() };
728 (header.slab_size, header.slab_free_stacks_offset)
729 };
730 let free_stack_size = header::layout::single_free_stack_size(slab_size);
731
732 let mut top = unsafe {
735 self.base
736 .header()
737 .byte_add(offset as usize)
738 .byte_add(slab_index as usize * free_stack_size)
739 .cast()
740 };
741 let mut capacity = unsafe { top.add(1) };
742 let trailing_stack = unsafe { capacity.add(1) };
743 unsafe { FreeStack::new(top.as_mut(), capacity.as_mut(), trailing_stack) }
744 }
745
746 unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
751 self.base.slab(slab_index)
752 }
753}
754
755unsafe fn worker_meta_ptr(
756 header: NonNull<Header>,
757 worker_index: u32,
758) -> NonNull<WorkerLocalListHeads> {
759 let all_workers_heads = unsafe {
760 header
761 .byte_add(offset_of!(Header, worker_local_list_heads))
762 .cast::<WorkerLocalListHeads>()
763 };
764 unsafe { all_workers_heads.add(worker_index as usize) }
766}
767
768unsafe fn claim_any_worker_index(header: NonNull<Header>) -> Option<u32> {
769 let num_workers = unsafe { header.as_ref() }.num_workers;
770 for worker_index in 0..num_workers {
771 let claimed = unsafe { &worker_meta_ptr(header, worker_index).as_ref().claimed };
772 if claimed
773 .compare_exchange(0, 1, Ordering::AcqRel, Ordering::Acquire)
774 .is_ok()
775 {
776 return Some(worker_index);
777 }
778 }
779 None
780}
781
782struct AllocationIndexes {
783 slab_index: u32,
784 index_within_slab: u16,
785}
786
787#[cfg(test)]
788mod tests {
789 use super::*;
790 use crate::size_classes::{MAX_SIZE, SIZE_CLASSES};
791
792 const TEST_BUFFER_SIZE: usize = 64 * 1024 * 1024; fn create_temp_shmem_file() -> Result<File, Error> {
795 use std::fs::OpenOptions;
796 use std::sync::atomic::{AtomicU64, Ordering};
797
798 static COUNTER: AtomicU64 = AtomicU64::new(0);
799 let temp_dir = std::env::temp_dir();
800 let n = COUNTER.fetch_add(1, Ordering::Relaxed);
801 let path = temp_dir.join(format!("rts-alloc-{n}.tmp"));
802
803 let mut open_options = OpenOptions::new();
804 open_options.read(true).write(true).create_new(true);
805
806 #[cfg(windows)]
807 {
808 use std::os::windows::fs::OpenOptionsExt;
809 use windows_sys::Win32::Storage::FileSystem::{
810 FILE_ATTRIBUTE_TEMPORARY, FILE_FLAG_DELETE_ON_CLOSE,
811 };
812
813 open_options
814 .attributes(FILE_ATTRIBUTE_TEMPORARY)
815 .custom_flags(FILE_FLAG_DELETE_ON_CLOSE);
816 }
817
818 let open_result = open_options.open(&path);
819
820 match open_result {
821 Ok(file) => {
822 #[cfg(unix)]
823 {
824 std::fs::remove_file(&path)?;
825 }
826 Ok(file)
827 }
828 Err(err) => Err(Error::IoError(err)),
829 }
830 }
831
832 fn initialize_for_test(slab_size: u32, num_workers: u32) -> (File, Allocator) {
833 let file = create_temp_shmem_file().unwrap();
834 let allocator =
836 unsafe { Allocator::create(&file, TEST_BUFFER_SIZE, num_workers, slab_size).unwrap() };
837 (file, allocator)
838 }
839
840 #[test]
841 fn test_allocator() {
842 let slab_size = 65536; let num_workers = 4;
844 let (_file, allocator) = initialize_for_test(slab_size, num_workers);
845 assert_eq!(allocator.outstanding_allocation_bytes(), 0);
846
847 let mut allocations = vec![];
848 let mut total_allocated_bytes = 0u64;
849
850 for class_size in SIZE_CLASSES[..NUM_SIZE_CLASSES - 1].iter() {
851 for size in [class_size - 1, *class_size, class_size + 1] {
852 allocations.push(allocator.allocate(size).unwrap());
853 total_allocated_bytes += size_class_index(size)
854 .map(|i| unsafe { size_class(i) as u64 })
855 .unwrap();
856 }
857 }
858 for size in [MAX_SIZE - 1, MAX_SIZE] {
859 allocations.push(allocator.allocate(size).unwrap());
860 total_allocated_bytes += size_class_index(size)
861 .map(|i| unsafe { size_class(i) as u64 })
862 .unwrap();
863 }
864 assert_eq!(
865 allocator.outstanding_allocation_bytes(),
866 total_allocated_bytes
867 );
868 assert!(allocator.allocate(MAX_SIZE + 1).is_none());
869
870 for size_index in 0..NUM_SIZE_CLASSES {
872 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
874 assert!(worker_local_list.head().is_some());
875 }
876
877 for ptr in allocations {
878 unsafe {
880 allocator.free(ptr);
881 }
882 }
883 assert_eq!(allocator.outstanding_allocation_bytes(), 0);
884
885 for size_index in 0..NUM_SIZE_CLASSES {
887 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
889 assert_eq!(worker_local_list.head(), None);
890 }
891 }
892
893 #[test]
894 fn test_slab_list_transitions() {
895 let slab_size = 65536; let num_workers = 4;
897 let (_file, allocator) = initialize_for_test(slab_size, num_workers);
898
899 let allocation_size = 2048;
900 let size_index = size_class_index(allocation_size).unwrap();
901 let allocations_per_slab = slab_size / allocation_size;
902
903 fn check_worker_list_expectations(
904 allocator: &Allocator,
905 size_index: usize,
906 expect_partial: bool,
907 expect_full: bool,
908 ) {
909 unsafe {
910 let partial_list = allocator.worker_local_list_partial(size_index);
911 assert_eq!(
912 partial_list.head().is_some(),
913 expect_partial,
914 "{:?}",
915 partial_list.head()
916 );
917
918 let full_list = allocator.worker_local_list_full(size_index);
919 assert_eq!(
920 full_list.head().is_some(),
921 expect_full,
922 "{:?}",
923 full_list.head()
924 );
925 }
926 }
927
928 check_worker_list_expectations(&allocator, size_index, false, false);
930
931 let mut first_slab_allocations = vec![];
932 for _ in 0..allocations_per_slab - 1 {
933 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
934 }
935
936 check_worker_list_expectations(&allocator, size_index, true, false);
938
939 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
941
942 check_worker_list_expectations(&allocator, size_index, false, true);
944
945 let second_slab_allocation = allocator.allocate(allocation_size).unwrap();
947
948 check_worker_list_expectations(&allocator, size_index, true, true);
950
951 let mut first_slab_allocations = first_slab_allocations.drain(..);
952 unsafe {
953 allocator.free(first_slab_allocations.next().unwrap());
954 }
955 check_worker_list_expectations(&allocator, size_index, true, false);
957
958 for ptr in first_slab_allocations {
960 unsafe {
961 allocator.free(ptr);
962 }
963 }
964 check_worker_list_expectations(&allocator, size_index, true, false);
967
968 unsafe {
970 allocator.free(second_slab_allocation);
971 }
972 check_worker_list_expectations(&allocator, size_index, false, false);
974 }
975
976 #[test]
977 fn test_out_of_slabs() {
978 let slab_size = 65536; let num_workers = 4;
980 let (_file, allocator) = initialize_for_test(slab_size, num_workers);
981
982 let num_slabs = unsafe { allocator.base.header().as_ref() }.num_slabs;
983 for index in 0..num_slabs {
984 let slab_index = unsafe { allocator.take_slab(0) }.unwrap();
985 assert_eq!(slab_index, index);
986 }
987 assert!(unsafe { allocator.take_slab(0) }.is_none());
989 }
990
991 #[test]
992 fn test_remote_free_lists() {
993 let slab_size = 65536; let num_workers = 4;
995 let (file, allocator_0) = initialize_for_test(slab_size, num_workers);
996 let file_for_join = file.try_clone().unwrap();
997 let allocator_1 = Allocator::join(&file_for_join).unwrap();
998
999 let allocation_size = 2048;
1000 let size_index = size_class_index(allocation_size).unwrap();
1001 let allocations_per_slab = slab_size / allocation_size;
1002
1003 let mut allocations = vec![];
1005 for _ in 0..allocations_per_slab {
1006 allocations.push(allocator_0.allocate(allocation_size).unwrap());
1007 }
1008
1009 let slab_index = unsafe {
1011 let worker_local_list = allocator_0.worker_local_list_partial(size_index);
1012 assert!(worker_local_list.head().is_none());
1013 let worker_local_list = allocator_0.worker_local_list_full(size_index);
1014 assert!(worker_local_list.head().is_some());
1015 worker_local_list.head().unwrap()
1016 };
1017
1018 let remote_free_list = unsafe { allocator_0.remote_free_list(slab_index) };
1020 assert!(remote_free_list.iterate().next().is_none());
1021
1022 for ptr in allocations {
1024 unsafe {
1025 let offset = allocator_0.offset(ptr);
1026 allocator_1.free_offset(offset);
1027 }
1028 }
1029 assert_eq!(
1030 allocator_0.outstanding_allocation_bytes(),
1031 allocations_per_slab as u64 * allocation_size as u64
1032 );
1033
1034 let different_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
1036 let allocation_indexes = unsafe {
1037 allocator_0.find_allocation_indexes(allocator_0.offset(different_slab_allocation))
1038 };
1039 assert_ne!(allocation_indexes.slab_index, slab_index);
1040 unsafe { allocator_0.free(different_slab_allocation) };
1041
1042 allocator_0.clean_remote_free_lists();
1044 assert_eq!(allocator_0.outstanding_allocation_bytes(), 0);
1045 let same_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
1046 let allocation_indexes = unsafe {
1047 allocator_0.find_allocation_indexes(allocator_0.offset(same_slab_allocation))
1048 };
1049 assert_eq!(allocation_indexes.slab_index, slab_index);
1050 }
1051
1052 #[test]
1053 fn test_join_from_existing_reuses_mapping() {
1054 let slab_size = 65536; let num_workers = 4;
1056 let (_file, allocator_0) = initialize_for_test(slab_size, num_workers);
1057
1058 let allocator_1 = Allocator::join_from_existing(&allocator_0).unwrap();
1059 assert_ne!(allocator_0.worker_index, allocator_1.worker_index);
1060 assert_eq!(
1061 allocator_0.base.header().as_ptr(),
1062 allocator_1.base.header().as_ptr()
1063 );
1064
1065 let free_only_allocator = FreeOnlyAllocator::join_from_existing(&allocator_0);
1066 assert_eq!(
1067 allocator_0.base.header().as_ptr(),
1068 free_only_allocator.base.header().as_ptr()
1069 );
1070 }
1071
1072 #[test]
1073 fn test_drop_original_mapping_stays_alive() {
1074 let slab_size = 65536; let num_workers = 4;
1076 let (_file, allocator_0) = initialize_for_test(slab_size, num_workers);
1077
1078 let allocator_1 = Allocator::join_from_existing(&allocator_0).unwrap();
1080
1081 drop(allocator_0);
1083
1084 let allocation_size = 2048;
1086 let allocation = allocator_1.allocate(allocation_size).unwrap();
1087 unsafe {
1088 allocation
1089 .as_ptr()
1090 .write_bytes(0xAB, allocation_size as usize);
1091 assert_eq!(allocation.as_ptr().read(), 0xAB);
1092 allocator_1.free(allocation);
1093 }
1094 }
1095
1096 #[test]
1097 fn test_worker_reuse_with_free_only() {
1098 let slab_size = 65536; let num_workers = 4;
1100 let (_file, allocator_0) = initialize_for_test(slab_size, num_workers);
1101 let num_workers = unsafe { allocator_0.base.header().as_ref() }.num_workers;
1102
1103 let free_only_allocator = FreeOnlyAllocator::join_from_existing(&allocator_0);
1105
1106 let mut allocators = Vec::new();
1108 for _ in 0..(num_workers - 1) {
1109 allocators.push(Allocator::join_from_existing_free_only(&free_only_allocator).unwrap());
1110 }
1111 assert!(Allocator::join_from_existing_free_only(&free_only_allocator).is_err());
1112
1113 drop(allocator_0);
1115 allocators.push(Allocator::join_from_existing_free_only(&free_only_allocator).unwrap());
1116 assert!(Allocator::join_from_existing_free_only(&free_only_allocator).is_err());
1117
1118 drop(allocators);
1120
1121 let mut allocators = Vec::new();
1123 for _ in 0..num_workers {
1124 allocators.push(Allocator::join_from_existing_free_only(&free_only_allocator).unwrap());
1125 }
1126 assert!(Allocator::join_from_existing_free_only(&free_only_allocator).is_err());
1127
1128 let allocation_size = 2048u32;
1130 let allocation = allocators[0].allocate(allocation_size).unwrap();
1131 unsafe {
1132 allocation
1133 .as_ptr()
1134 .write_bytes(0xCD, allocation_size as usize);
1135 assert_eq!(allocation.as_ptr().read(), 0xCD);
1136 allocators[0].free(allocation);
1137 }
1138 }
1139
1140 #[test]
1141 fn test_free_only_allocator() {
1142 let slab_size = 65536; let num_workers = 4;
1144 let (file, allocator) = initialize_for_test(slab_size, num_workers);
1145 let file_for_join = file.try_clone().unwrap();
1146 let free_only_allocator = FreeOnlyAllocator::join(&file_for_join).unwrap();
1147
1148 let allocation_size = 2048;
1149 let allocation = allocator.allocate(allocation_size).unwrap();
1150
1151 let allocation_indexes =
1152 unsafe { allocator.find_allocation_indexes(allocator.offset(allocation)) };
1153
1154 unsafe {
1156 let offset = allocator.offset(allocation);
1157 free_only_allocator.free_offset(offset);
1158 }
1159
1160 assert_eq!(
1162 unsafe { allocator.remote_free_list(allocation_indexes.slab_index) }
1163 .iterate()
1164 .next()
1165 .unwrap(),
1166 allocation_indexes.index_within_slab
1167 );
1168 }
1169}