1use crate::free_stack::FreeStack;
2use crate::global_free_list::GlobalFreeList;
3use crate::header::{self, WorkerLocalListHeads, WorkerLocalListPartialFullHeads};
4use crate::linked_list_node::LinkedListNode;
5use crate::remote_free_list::RemoteFreeList;
6use crate::size_classes::{size_class, NUM_SIZE_CLASSES};
7use crate::slab_meta::SlabMeta;
8use crate::worker_local_list::WorkerLocalList;
9use crate::{error::Error, header::Header, size_classes::size_class_index};
10use core::mem::offset_of;
11use core::ptr::NonNull;
12use core::sync::atomic::Ordering;
13use std::path::Path;
14
15pub struct Allocator {
16 header: NonNull<Header>,
17 file_size: usize,
18 worker_index: u32,
19}
20
21impl Allocator {
22 pub fn create(
24 path: impl AsRef<Path>,
25 file_size: usize,
26 num_workers: u32,
27 slab_size: u32,
28 worker_index: u32,
29 ) -> Result<Self, Error> {
30 if worker_index >= num_workers {
31 return Err(Error::InvalidWorkerIndex);
32 }
33 let header = crate::init::create(path, file_size, num_workers, slab_size)?;
34
35 unsafe { Allocator::new(header, file_size, worker_index) }
37 }
38
39 pub fn join(path: impl AsRef<Path>, worker_index: u32) -> Result<Self, Error> {
44 let (header, file_size) = crate::init::join(path)?;
45
46 if worker_index >= unsafe { header.as_ref() }.num_workers {
49 return Err(Error::InvalidWorkerIndex);
50 }
51
52 unsafe { Allocator::new(header, file_size, worker_index) }
54 }
55
56 unsafe fn new(
61 header: NonNull<Header>,
62 file_size: usize,
63 worker_index: u32,
64 ) -> Result<Self, Error> {
65 if worker_index >= unsafe { header.as_ref() }.num_workers {
67 return Err(Error::InvalidWorkerIndex);
68 }
69 Ok(Allocator {
70 header,
71 file_size,
72 worker_index,
73 })
74 }
75}
76
77impl Drop for Allocator {
78 fn drop(&mut self) {
79 #[cfg(test)]
80 {
81 return;
83 }
84
85 #[allow(unreachable_code)]
86 unsafe {
89 libc::munmap(self.header.as_ptr() as *mut libc::c_void, self.file_size);
90 }
91 }
92}
93
94impl Allocator {
95 pub fn allocate(&self, size: u32) -> Option<NonNull<u8>> {
99 let size_index = size_class_index(size)?;
100
101 let slab_index = unsafe { self.find_allocatable_slab_index(size_index) }?;
103 unsafe { self.allocate_within_slab(slab_index, size_index) }
107 }
108
109 unsafe fn find_allocatable_slab_index(&self, size_index: usize) -> Option<u32> {
116 unsafe { self.worker_local_list_partial(size_index) }
118 .head()
119 .or_else(|| self.take_slab(size_index))
120 }
121
122 unsafe fn allocate_within_slab(
129 &self,
130 slab_index: u32,
131 size_index: usize,
132 ) -> Option<NonNull<u8>> {
133 let mut free_stack = unsafe { self.slab_free_stack(slab_index) };
135 let maybe_index_within_slab = free_stack.pop();
136
137 if free_stack.is_empty() {
140 unsafe {
144 self.worker_local_list_partial(size_index)
145 .remove(slab_index);
146 }
147 unsafe {
151 self.worker_local_list_full(size_index).push(slab_index);
152 }
153 }
154
155 maybe_index_within_slab.map(|index_within_slab| {
156 let slab = unsafe { self.slab(slab_index) };
158 let size = unsafe { size_class(size_index) };
160 slab.byte_add(index_within_slab as usize * size as usize)
161 })
162 }
163
164 unsafe fn take_slab(&self, size_index: usize) -> Option<u32> {
171 let slab_index = self.global_free_list().pop()?;
172
173 unsafe { self.slab_meta(slab_index).as_ref() }.assign(self.worker_index, size_index);
175 unsafe {
179 let slab_capacity = self.header.as_ref().slab_size / size_class(size_index);
180 self.slab_free_stack(slab_index).reset(slab_capacity as u16);
181 };
182 let mut worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
184 unsafe { worker_local_list.push(slab_index) };
186 Some(slab_index)
187 }
188}
189
190impl Allocator {
191 pub unsafe fn free(&self, ptr: NonNull<u8>) {
197 let offset = unsafe { self.offset(ptr) };
199 let allocation_indexes = self.find_allocation_indexes(offset);
200
201 if self.worker_index
203 == unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
204 .assigned_worker
205 .load(Ordering::Acquire)
206 {
207 self.local_free(allocation_indexes);
208 } else {
209 self.remote_free(allocation_indexes);
210 }
211 }
212
213 fn local_free(&self, allocation_indexes: AllocationIndexes) {
214 let (was_full, is_empty) = unsafe {
216 let mut free_stack = self.slab_free_stack(allocation_indexes.slab_index);
217 let was_full = free_stack.is_empty();
218 free_stack.push(allocation_indexes.index_within_slab);
219 (was_full, free_stack.is_full())
223 };
224
225 match (was_full, is_empty) {
226 (true, true) => {
227 unreachable!("slab can only contain one allocation - this is not allowed");
230 }
231 (true, false) => {
232 let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
233 .size_class_index
234 .load(Ordering::Relaxed);
235 unsafe {
239 self.worker_local_list_full(size_index)
240 .remove(allocation_indexes.slab_index);
241 }
242 unsafe {
244 self.worker_local_list_partial(size_index)
245 .push(allocation_indexes.slab_index);
246 }
247 }
248 (false, true) => {
249 let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
250 .size_class_index
251 .load(Ordering::Relaxed);
252 unsafe {
256 self.worker_local_list_partial(size_index)
257 .remove(allocation_indexes.slab_index);
258 }
259 unsafe {
261 self.global_free_list().push(allocation_indexes.slab_index);
262 }
263 }
264 (false, false) => {
265 }
268 }
269 }
270
271 fn remote_free(&self, allocation_indexes: AllocationIndexes) {
272 unsafe {
274 self.remote_free_list(allocation_indexes.slab_index)
275 .push(allocation_indexes.index_within_slab);
276 }
277 }
278
279 pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
284 ptr.byte_offset_from(self.header) as usize
285 }
286
287 pub fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
289 unsafe { self.header.byte_add(offset) }.cast()
290 }
291
292 fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
294 let (slab_index, offset_within_slab) = {
295 let header = unsafe { self.header.as_ref() };
297 debug_assert!(offset >= header.slabs_offset as usize);
298 let offset_from_slab_start = offset.wrapping_sub(header.slabs_offset as usize);
299 let slab_index = (offset_from_slab_start / header.slab_size as usize) as u32;
300 debug_assert!(slab_index < header.num_slabs, "slab index out of bounds");
301
302 let offset_within_slab =
304 unsafe { Self::offset_within_slab(header.slab_size, offset_from_slab_start) };
305
306 (slab_index, offset_within_slab)
307 };
308
309 let index_within_slab = {
310 let size_class_index = unsafe { self.slab_meta(slab_index).as_ref() }
312 .size_class_index
313 .load(Ordering::Acquire);
314 let size_class = unsafe { size_class(size_class_index) };
316 (offset_within_slab / size_class) as u16
317 };
318
319 AllocationIndexes {
320 slab_index,
321 index_within_slab,
322 }
323 }
324
325 const unsafe fn offset_within_slab(slab_size: u32, offset_from_slab_start: usize) -> u32 {
330 debug_assert!(slab_size.is_power_of_two());
331 (offset_from_slab_start & (slab_size as usize - 1)) as u32
332 }
333}
334
335impl Allocator {
336 pub fn clean_remote_free_lists(&self) {
338 for size_index in 0..NUM_SIZE_CLASSES {
342 let worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
344 self.clean_remote_free_lists_for_list(worker_local_list);
345
346 let worker_local_list = unsafe { self.worker_local_list_full(size_index) };
348 self.clean_remote_free_lists_for_list(worker_local_list);
349 }
350 }
351
352 fn clean_remote_free_lists_for_list(&self, worker_local_list: WorkerLocalList) {
354 for slab_index in worker_local_list.iterate() {
355 let remote_free_list = unsafe { self.remote_free_list(slab_index) };
357 for index_within_slab in remote_free_list.drain() {
358 self.local_free(AllocationIndexes {
359 slab_index,
360 index_within_slab,
361 })
362 }
363 }
364 }
365}
366
367impl Allocator {
368 fn free_list_elements(&self) -> NonNull<LinkedListNode> {
370 let offset = unsafe { self.header.as_ref() }.free_list_elements_offset;
372 unsafe { self.header.byte_add(offset as usize) }.cast()
374 }
375
376 fn global_free_list<'a>(&'a self) -> GlobalFreeList<'a> {
378 let head = &unsafe { self.header.as_ref() }.global_free_list_head;
380 let list = self.free_list_elements();
381 unsafe { GlobalFreeList::new(head, list) }
385 }
386
387 unsafe fn worker_local_list_partial<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
393 let head = &self.worker_head(size_index).partial;
394 let list = self.free_list_elements();
395
396 unsafe { WorkerLocalList::new(head, list) }
400 }
401
402 unsafe fn worker_local_list_full<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
408 let head = &self.worker_head(size_index).full;
409 let list = self.free_list_elements();
410
411 unsafe { WorkerLocalList::new(head, list) }
415 }
416
417 fn worker_head(&self, size_index: usize) -> &WorkerLocalListPartialFullHeads {
418 let all_workers_heads = unsafe {
420 self.header
421 .byte_add(offset_of!(Header, worker_local_list_heads))
422 .cast::<WorkerLocalListHeads>()
423 };
424 let worker_heads = unsafe { all_workers_heads.add(self.worker_index as usize) };
426 let worker_head = unsafe {
428 worker_heads
429 .cast::<WorkerLocalListPartialFullHeads>()
430 .add(size_index)
431 .as_ref()
432 };
433
434 worker_head
435 }
436
437 unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
442 let (head, slab_item_size) = {
443 let slab_meta = unsafe { self.slab_meta(slab_index).as_ref() };
445 let size_class =
447 unsafe { size_class(slab_meta.size_class_index.load(Ordering::Acquire)) };
448 (&slab_meta.remote_free_stack_head, size_class)
449 };
450 let slab = unsafe { self.slab(slab_index) };
452
453 unsafe { RemoteFreeList::new(slab_item_size, head, slab) }
459 }
460
461 unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
466 let offset = unsafe { self.header.as_ref() }.slab_shared_meta_offset;
468 let slab_metas = unsafe { self.header.byte_add(offset as usize).cast::<SlabMeta>() };
470 unsafe { slab_metas.add(slab_index as usize) }
472 }
473
474 unsafe fn slab_free_stack<'a>(&'a self, slab_index: u32) -> FreeStack<'a> {
479 let (slab_size, offset) = {
480 let header = unsafe { self.header.as_ref() };
482 (header.slab_size, header.slab_free_stacks_offset)
483 };
484 let free_stack_size = header::layout::single_free_stack_size(slab_size);
485
486 let mut top = unsafe {
489 self.header
490 .byte_add(offset as usize)
491 .byte_add(slab_index as usize * free_stack_size)
492 .cast()
493 };
494 let mut capacity = unsafe { top.add(1) };
495 let trailing_stack = unsafe { capacity.add(1) };
496 unsafe { FreeStack::new(top.as_mut(), capacity.as_mut(), trailing_stack) }
497 }
498
499 unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
504 let (slab_size, offset) = {
505 let header = unsafe { self.header.as_ref() };
507 (header.slab_size, header.slabs_offset)
508 };
509 unsafe {
512 self.header
513 .byte_add(offset as usize)
514 .byte_add(slab_index as usize * slab_size as usize)
515 .cast()
516 }
517 }
518}
519
520struct AllocationIndexes {
521 slab_index: u32,
522 index_within_slab: u16,
523}
524
525#[cfg(test)]
526mod tests {
527 use super::*;
528 use crate::{
529 header::layout,
530 init::initialize,
531 size_classes::{MAX_SIZE, SIZE_CLASSES},
532 };
533
534 #[repr(C, align(4096))]
535 struct DummyTypeForAlignment([u8; 4096]);
536 const TEST_BUFFER_SIZE: usize = 64 * 1024 * 1024; const TEST_BUFFER_CAPACITY: usize =
538 TEST_BUFFER_SIZE / core::mem::size_of::<DummyTypeForAlignment>();
539
540 fn test_buffer() -> Vec<DummyTypeForAlignment> {
541 (0..TEST_BUFFER_CAPACITY)
542 .map(|_| DummyTypeForAlignment([0; 4096]))
543 .collect::<Vec<_>>()
544 }
545
546 fn initialize_for_test(
547 buffer: *mut u8,
548 slab_size: u32,
549 num_workers: u32,
550 worker_index: u32,
551 ) -> Allocator {
552 let file_size = TEST_BUFFER_SIZE;
553
554 let layout = layout::offsets(file_size, slab_size, num_workers);
555
556 let header = NonNull::new(buffer as *mut Header).unwrap();
557 unsafe {
560 initialize::allocator(header, slab_size, num_workers, layout);
561 }
562
563 unsafe { Allocator::new(header, file_size, worker_index) }.unwrap()
565 }
566
567 fn join_for_tests(buffer: *mut u8, worker_index: u32) -> Allocator {
568 let header = NonNull::new(buffer as *mut Header).unwrap();
569 unsafe { Allocator::new(header, TEST_BUFFER_SIZE, worker_index) }.unwrap()
571 }
572
573 #[test]
574 fn test_allocator() {
575 let mut buffer = test_buffer();
576 let slab_size = 65536; let num_workers = 4;
578 let worker_index = 0;
579 let allocator = initialize_for_test(
580 buffer.as_mut_ptr().cast(),
581 slab_size,
582 num_workers,
583 worker_index,
584 );
585
586 let mut allocations = vec![];
587
588 for size_class in SIZE_CLASSES[..NUM_SIZE_CLASSES - 1].iter() {
589 for size in [size_class - 1, *size_class, size_class + 1] {
590 allocations.push(allocator.allocate(size).unwrap());
591 }
592 }
593 for size in [MAX_SIZE - 1, MAX_SIZE] {
594 allocations.push(allocator.allocate(size).unwrap());
595 }
596 assert!(allocator.allocate(MAX_SIZE + 1).is_none());
597
598 for size_index in 0..NUM_SIZE_CLASSES {
600 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
602 assert!(worker_local_list.head().is_some());
603 }
604
605 for ptr in allocations {
606 unsafe {
608 allocator.free(ptr);
609 }
610 }
611
612 for size_index in 0..NUM_SIZE_CLASSES {
614 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
616 assert_eq!(worker_local_list.head(), None);
617 }
618 }
619
620 #[test]
621 fn test_slab_list_transitions() {
622 let mut buffer = test_buffer();
623 let slab_size = 65536; let num_workers = 4;
625 let worker_index = 0;
626 let allocator = initialize_for_test(
627 buffer.as_mut_ptr().cast(),
628 slab_size,
629 num_workers,
630 worker_index,
631 );
632
633 let allocation_size = 2048;
634 let size_index = size_class_index(allocation_size).unwrap();
635 let allocations_per_slab = slab_size / allocation_size;
636
637 fn check_worker_list_expectations(
638 allocator: &Allocator,
639 size_index: usize,
640 expect_partial: bool,
641 expect_full: bool,
642 ) {
643 unsafe {
644 let partial_list = allocator.worker_local_list_partial(size_index);
645 assert_eq!(
646 partial_list.head().is_some(),
647 expect_partial,
648 "{:?}",
649 partial_list.head()
650 );
651
652 let full_list = allocator.worker_local_list_full(size_index);
653 assert_eq!(
654 full_list.head().is_some(),
655 expect_full,
656 "{:?}",
657 full_list.head()
658 );
659 }
660 }
661
662 check_worker_list_expectations(&allocator, size_index, false, false);
664
665 let mut first_slab_allocations = vec![];
666 for _ in 0..allocations_per_slab - 1 {
667 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
668 }
669
670 check_worker_list_expectations(&allocator, size_index, true, false);
672
673 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
675
676 check_worker_list_expectations(&allocator, size_index, false, true);
678
679 let second_slab_allocation = allocator.allocate(allocation_size).unwrap();
681
682 check_worker_list_expectations(&allocator, size_index, true, true);
684
685 let mut first_slab_allocations = first_slab_allocations.drain(..);
686 unsafe {
687 allocator.free(first_slab_allocations.next().unwrap());
688 }
689 check_worker_list_expectations(&allocator, size_index, true, false);
691
692 for ptr in first_slab_allocations {
694 unsafe {
695 allocator.free(ptr);
696 }
697 }
698 check_worker_list_expectations(&allocator, size_index, true, false);
701
702 unsafe {
704 allocator.free(second_slab_allocation);
705 }
706 check_worker_list_expectations(&allocator, size_index, false, false);
708 }
709
710 #[test]
711 fn test_out_of_slabs() {
712 let mut buffer = test_buffer();
713 let slab_size = 65536; let num_workers = 4;
715 let worker_index = 0;
716 let allocator = initialize_for_test(
717 buffer.as_mut_ptr().cast(),
718 slab_size,
719 num_workers,
720 worker_index,
721 );
722
723 let num_slabs = unsafe { allocator.header.as_ref() }.num_slabs;
724 for index in 0..num_slabs {
725 let slab_index = unsafe { allocator.take_slab(0) }.unwrap();
726 assert_eq!(slab_index, index);
727 }
728 assert!(unsafe { allocator.take_slab(0) }.is_none());
730 }
731
732 #[test]
733 fn test_remote_free_lists() {
734 let mut buffer = test_buffer();
735 let slab_size = 65536; let num_workers = 4;
737
738 let allocator_0 =
739 initialize_for_test(buffer.as_mut_ptr().cast(), slab_size, num_workers, 0);
740 let allocator_1 = join_for_tests(buffer.as_mut_ptr().cast(), 1);
741
742 let allocation_size = 2048;
743 let size_index = size_class_index(allocation_size).unwrap();
744 let allocations_per_slab = slab_size / allocation_size;
745
746 let mut allocations = vec![];
748 for _ in 0..allocations_per_slab {
749 allocations.push(allocator_0.allocate(allocation_size).unwrap());
750 }
751
752 let slab_index = unsafe {
754 let worker_local_list = allocator_0.worker_local_list_partial(size_index);
755 assert!(worker_local_list.head().is_none());
756 let worker_local_list = allocator_0.worker_local_list_full(size_index);
757 assert!(worker_local_list.head().is_some());
758 worker_local_list.head().unwrap()
759 };
760
761 let remote_free_list = unsafe { allocator_0.remote_free_list(slab_index) };
763 assert!(remote_free_list.iterate().next().is_none());
764
765 for ptr in allocations {
767 unsafe {
768 allocator_1.free(ptr);
769 }
770 }
771
772 let different_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
774 let allocation_indexes = unsafe {
775 allocator_0.find_allocation_indexes(allocator_0.offset(different_slab_allocation))
776 };
777 assert_ne!(allocation_indexes.slab_index, slab_index);
778 unsafe { allocator_0.free(different_slab_allocation) };
779
780 allocator_0.clean_remote_free_lists();
782 let same_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
783 let allocation_indexes = unsafe {
784 allocator_0.find_allocation_indexes(allocator_0.offset(same_slab_allocation))
785 };
786 assert_eq!(allocation_indexes.slab_index, slab_index);
787 }
788}