1use crate::free_stack::FreeStack;
2use crate::global_free_list::GlobalFreeList;
3use crate::header::{self, WorkerLocalListHeads, WorkerLocalListPartialFullHeads};
4use crate::linked_list_node::LinkedListNode;
5use crate::remote_free_list::RemoteFreeList;
6use crate::size_classes::{size_class, NUM_SIZE_CLASSES};
7use crate::slab_meta::SlabMeta;
8use crate::worker_local_list::WorkerLocalList;
9use crate::{error::Error, header::Header, size_classes::size_class_index};
10use core::mem::offset_of;
11use core::ptr::NonNull;
12use core::sync::atomic::Ordering;
13use std::fs::File;
14
15pub struct Allocator {
16 header: NonNull<Header>,
17 file_size: usize,
18 worker_index: u32,
19}
20
21impl Allocator {
22 pub fn create(
24 file: &File,
25 file_size: usize,
26 num_workers: u32,
27 slab_size: u32,
28 worker_index: u32,
29 ) -> Result<Self, Error> {
30 if worker_index >= num_workers {
31 return Err(Error::InvalidWorkerIndex);
32 }
33 let header = crate::init::create(file, file_size, num_workers, slab_size)?;
34
35 unsafe { Allocator::new(header, file_size, worker_index) }
37 }
38
39 pub unsafe fn join(file: &File, worker_index: u32) -> Result<Self, Error> {
44 let (header, file_size) = crate::init::join(file)?;
45
46 if worker_index >= unsafe { header.as_ref() }.num_workers {
49 return Err(Error::InvalidWorkerIndex);
50 }
51
52 unsafe { Allocator::new(header, file_size, worker_index) }
54 }
55
56 unsafe fn new(
61 header: NonNull<Header>,
62 file_size: usize,
63 worker_index: u32,
64 ) -> Result<Self, Error> {
65 if worker_index >= unsafe { header.as_ref() }.num_workers {
67 return Err(Error::InvalidWorkerIndex);
68 }
69 Ok(Allocator {
70 header,
71 file_size,
72 worker_index,
73 })
74 }
75}
76
77unsafe impl Send for Allocator {}
78
79impl Drop for Allocator {
80 fn drop(&mut self) {
81 #[cfg(test)]
82 {
83 return;
85 }
86
87 #[allow(unreachable_code)]
88 unsafe {
91 libc::munmap(self.header.as_ptr() as *mut libc::c_void, self.file_size);
92 }
93 }
94}
95
96impl Allocator {
97 pub fn allocate(&self, size: u32) -> Option<NonNull<u8>> {
101 let size_index = size_class_index(size)?;
102
103 let slab_index = unsafe { self.find_allocatable_slab_index(size_index) }?;
105 unsafe { self.allocate_within_slab(slab_index, size_index) }
109 }
110
111 unsafe fn find_allocatable_slab_index(&self, size_index: usize) -> Option<u32> {
118 unsafe { self.worker_local_list_partial(size_index) }
120 .head()
121 .or_else(|| self.take_slab(size_index))
122 }
123
124 unsafe fn allocate_within_slab(
131 &self,
132 slab_index: u32,
133 size_index: usize,
134 ) -> Option<NonNull<u8>> {
135 let mut free_stack = unsafe { self.slab_free_stack(slab_index) };
137 let maybe_index_within_slab = free_stack.pop();
138
139 if free_stack.is_empty() {
142 unsafe {
146 self.worker_local_list_partial(size_index)
147 .remove(slab_index);
148 }
149 unsafe {
153 self.worker_local_list_full(size_index).push(slab_index);
154 }
155 }
156
157 maybe_index_within_slab.map(|index_within_slab| {
158 let slab = unsafe { self.slab(slab_index) };
160 let size = unsafe { size_class(size_index) };
162 slab.byte_add(index_within_slab as usize * size as usize)
163 })
164 }
165
166 unsafe fn take_slab(&self, size_index: usize) -> Option<u32> {
173 let slab_index = self.global_free_list().pop()?;
174
175 unsafe { self.slab_meta(slab_index).as_ref() }.assign(self.worker_index, size_index);
177 unsafe {
181 let slab_capacity = self.header.as_ref().slab_size / size_class(size_index);
182 self.slab_free_stack(slab_index).reset(slab_capacity as u16);
183 };
184 let mut worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
186 unsafe { worker_local_list.push(slab_index) };
188 Some(slab_index)
189 }
190}
191
192impl Allocator {
193 pub unsafe fn free(&self, ptr: NonNull<u8>) {
199 let offset = unsafe { self.offset(ptr) };
201 let allocation_indexes = self.find_allocation_indexes(offset);
202
203 if self.worker_index
205 == unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
206 .assigned_worker
207 .load(Ordering::Acquire)
208 {
209 self.local_free(allocation_indexes);
210 } else {
211 self.remote_free(allocation_indexes);
212 }
213 }
214
215 fn local_free(&self, allocation_indexes: AllocationIndexes) {
216 let (was_full, is_empty) = unsafe {
218 let mut free_stack = self.slab_free_stack(allocation_indexes.slab_index);
219 let was_full = free_stack.is_empty();
220 free_stack.push(allocation_indexes.index_within_slab);
221 (was_full, free_stack.is_full())
225 };
226
227 match (was_full, is_empty) {
228 (true, true) => {
229 unreachable!("slab can only contain one allocation - this is not allowed");
232 }
233 (true, false) => {
234 let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
235 .size_class_index
236 .load(Ordering::Relaxed);
237 unsafe {
241 self.worker_local_list_full(size_index)
242 .remove(allocation_indexes.slab_index);
243 }
244 unsafe {
246 self.worker_local_list_partial(size_index)
247 .push(allocation_indexes.slab_index);
248 }
249 }
250 (false, true) => {
251 let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
252 .size_class_index
253 .load(Ordering::Relaxed);
254 unsafe {
258 self.worker_local_list_partial(size_index)
259 .remove(allocation_indexes.slab_index);
260 }
261 unsafe {
263 self.global_free_list().push(allocation_indexes.slab_index);
264 }
265 }
266 (false, false) => {
267 }
270 }
271 }
272
273 fn remote_free(&self, allocation_indexes: AllocationIndexes) {
274 unsafe {
276 self.remote_free_list(allocation_indexes.slab_index)
277 .push(allocation_indexes.index_within_slab);
278 }
279 }
280
281 pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
286 ptr.byte_offset_from(self.header) as usize
287 }
288
289 pub fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
291 unsafe { self.header.byte_add(offset) }.cast()
292 }
293
294 fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
296 let (slab_index, offset_within_slab) = {
297 let header = unsafe { self.header.as_ref() };
299 debug_assert!(offset >= header.slabs_offset as usize);
300 let offset_from_slab_start = offset.wrapping_sub(header.slabs_offset as usize);
301 let slab_index = (offset_from_slab_start / header.slab_size as usize) as u32;
302 debug_assert!(slab_index < header.num_slabs, "slab index out of bounds");
303
304 let offset_within_slab =
306 unsafe { Self::offset_within_slab(header.slab_size, offset_from_slab_start) };
307
308 (slab_index, offset_within_slab)
309 };
310
311 let index_within_slab = {
312 let size_class_index = unsafe { self.slab_meta(slab_index).as_ref() }
314 .size_class_index
315 .load(Ordering::Acquire);
316 let size_class = unsafe { size_class(size_class_index) };
318 (offset_within_slab / size_class) as u16
319 };
320
321 AllocationIndexes {
322 slab_index,
323 index_within_slab,
324 }
325 }
326
327 const unsafe fn offset_within_slab(slab_size: u32, offset_from_slab_start: usize) -> u32 {
332 debug_assert!(slab_size.is_power_of_two());
333 (offset_from_slab_start & (slab_size as usize - 1)) as u32
334 }
335}
336
337impl Allocator {
338 pub fn clean_remote_free_lists(&self) {
340 for size_index in 0..NUM_SIZE_CLASSES {
344 let worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
346 self.clean_remote_free_lists_for_list(worker_local_list);
347
348 let worker_local_list = unsafe { self.worker_local_list_full(size_index) };
350 self.clean_remote_free_lists_for_list(worker_local_list);
351 }
352 }
353
354 fn clean_remote_free_lists_for_list(&self, worker_local_list: WorkerLocalList) {
356 for slab_index in worker_local_list.iterate() {
357 let remote_free_list = unsafe { self.remote_free_list(slab_index) };
359 for index_within_slab in remote_free_list.drain() {
360 self.local_free(AllocationIndexes {
361 slab_index,
362 index_within_slab,
363 })
364 }
365 }
366 }
367}
368
369impl Allocator {
370 fn free_list_elements(&self) -> NonNull<LinkedListNode> {
372 let offset = unsafe { self.header.as_ref() }.free_list_elements_offset;
374 unsafe { self.header.byte_add(offset as usize) }.cast()
376 }
377
378 fn global_free_list<'a>(&'a self) -> GlobalFreeList<'a> {
380 let head = &unsafe { self.header.as_ref() }.global_free_list_head;
382 let list = self.free_list_elements();
383 unsafe { GlobalFreeList::new(head, list) }
387 }
388
389 unsafe fn worker_local_list_partial<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
395 let head = &self.worker_head(size_index).partial;
396 let list = self.free_list_elements();
397
398 unsafe { WorkerLocalList::new(head, list) }
402 }
403
404 unsafe fn worker_local_list_full<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
410 let head = &self.worker_head(size_index).full;
411 let list = self.free_list_elements();
412
413 unsafe { WorkerLocalList::new(head, list) }
417 }
418
419 fn worker_head(&self, size_index: usize) -> &WorkerLocalListPartialFullHeads {
420 let all_workers_heads = unsafe {
422 self.header
423 .byte_add(offset_of!(Header, worker_local_list_heads))
424 .cast::<WorkerLocalListHeads>()
425 };
426 let worker_heads = unsafe { all_workers_heads.add(self.worker_index as usize) };
428 let worker_head = unsafe {
430 worker_heads
431 .cast::<WorkerLocalListPartialFullHeads>()
432 .add(size_index)
433 .as_ref()
434 };
435
436 worker_head
437 }
438
439 unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
444 let (head, slab_item_size) = {
445 let slab_meta = unsafe { self.slab_meta(slab_index).as_ref() };
447 let size_class =
449 unsafe { size_class(slab_meta.size_class_index.load(Ordering::Acquire)) };
450 (&slab_meta.remote_free_stack_head, size_class)
451 };
452 let slab = unsafe { self.slab(slab_index) };
454
455 unsafe { RemoteFreeList::new(slab_item_size, head, slab) }
461 }
462
463 unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
468 let offset = unsafe { self.header.as_ref() }.slab_shared_meta_offset;
470 let slab_metas = unsafe { self.header.byte_add(offset as usize).cast::<SlabMeta>() };
472 unsafe { slab_metas.add(slab_index as usize) }
474 }
475
476 unsafe fn slab_free_stack<'a>(&'a self, slab_index: u32) -> FreeStack<'a> {
481 let (slab_size, offset) = {
482 let header = unsafe { self.header.as_ref() };
484 (header.slab_size, header.slab_free_stacks_offset)
485 };
486 let free_stack_size = header::layout::single_free_stack_size(slab_size);
487
488 let mut top = unsafe {
491 self.header
492 .byte_add(offset as usize)
493 .byte_add(slab_index as usize * free_stack_size)
494 .cast()
495 };
496 let mut capacity = unsafe { top.add(1) };
497 let trailing_stack = unsafe { capacity.add(1) };
498 unsafe { FreeStack::new(top.as_mut(), capacity.as_mut(), trailing_stack) }
499 }
500
501 unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
506 let (slab_size, offset) = {
507 let header = unsafe { self.header.as_ref() };
509 (header.slab_size, header.slabs_offset)
510 };
511 unsafe {
514 self.header
515 .byte_add(offset as usize)
516 .byte_add(slab_index as usize * slab_size as usize)
517 .cast()
518 }
519 }
520}
521
522struct AllocationIndexes {
523 slab_index: u32,
524 index_within_slab: u16,
525}
526
527#[cfg(test)]
528mod tests {
529 use super::*;
530 use crate::{
531 header::layout,
532 init::initialize,
533 size_classes::{MAX_SIZE, SIZE_CLASSES},
534 };
535
536 #[repr(C, align(4096))]
537 struct DummyTypeForAlignment([u8; 4096]);
538 const TEST_BUFFER_SIZE: usize = 64 * 1024 * 1024; const TEST_BUFFER_CAPACITY: usize =
540 TEST_BUFFER_SIZE / core::mem::size_of::<DummyTypeForAlignment>();
541
542 fn test_buffer() -> Vec<DummyTypeForAlignment> {
543 (0..TEST_BUFFER_CAPACITY)
544 .map(|_| DummyTypeForAlignment([0; 4096]))
545 .collect::<Vec<_>>()
546 }
547
548 fn initialize_for_test(
549 buffer: *mut u8,
550 slab_size: u32,
551 num_workers: u32,
552 worker_index: u32,
553 ) -> Allocator {
554 let file_size = TEST_BUFFER_SIZE;
555
556 let layout = layout::offsets(file_size, slab_size, num_workers);
557
558 let header = NonNull::new(buffer as *mut Header).unwrap();
559 unsafe {
562 initialize::allocator(header, slab_size, num_workers, layout);
563 }
564
565 unsafe { Allocator::new(header, file_size, worker_index) }.unwrap()
567 }
568
569 fn join_for_tests(buffer: *mut u8, worker_index: u32) -> Allocator {
570 let header = NonNull::new(buffer as *mut Header).unwrap();
571 unsafe { Allocator::new(header, TEST_BUFFER_SIZE, worker_index) }.unwrap()
573 }
574
575 #[test]
576 fn test_allocator() {
577 let mut buffer = test_buffer();
578 let slab_size = 65536; let num_workers = 4;
580 let worker_index = 0;
581 let allocator = initialize_for_test(
582 buffer.as_mut_ptr().cast(),
583 slab_size,
584 num_workers,
585 worker_index,
586 );
587
588 let mut allocations = vec![];
589
590 for size_class in SIZE_CLASSES[..NUM_SIZE_CLASSES - 1].iter() {
591 for size in [size_class - 1, *size_class, size_class + 1] {
592 allocations.push(allocator.allocate(size).unwrap());
593 }
594 }
595 for size in [MAX_SIZE - 1, MAX_SIZE] {
596 allocations.push(allocator.allocate(size).unwrap());
597 }
598 assert!(allocator.allocate(MAX_SIZE + 1).is_none());
599
600 for size_index in 0..NUM_SIZE_CLASSES {
602 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
604 assert!(worker_local_list.head().is_some());
605 }
606
607 for ptr in allocations {
608 unsafe {
610 allocator.free(ptr);
611 }
612 }
613
614 for size_index in 0..NUM_SIZE_CLASSES {
616 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
618 assert_eq!(worker_local_list.head(), None);
619 }
620 }
621
622 #[test]
623 fn test_slab_list_transitions() {
624 let mut buffer = test_buffer();
625 let slab_size = 65536; let num_workers = 4;
627 let worker_index = 0;
628 let allocator = initialize_for_test(
629 buffer.as_mut_ptr().cast(),
630 slab_size,
631 num_workers,
632 worker_index,
633 );
634
635 let allocation_size = 2048;
636 let size_index = size_class_index(allocation_size).unwrap();
637 let allocations_per_slab = slab_size / allocation_size;
638
639 fn check_worker_list_expectations(
640 allocator: &Allocator,
641 size_index: usize,
642 expect_partial: bool,
643 expect_full: bool,
644 ) {
645 unsafe {
646 let partial_list = allocator.worker_local_list_partial(size_index);
647 assert_eq!(
648 partial_list.head().is_some(),
649 expect_partial,
650 "{:?}",
651 partial_list.head()
652 );
653
654 let full_list = allocator.worker_local_list_full(size_index);
655 assert_eq!(
656 full_list.head().is_some(),
657 expect_full,
658 "{:?}",
659 full_list.head()
660 );
661 }
662 }
663
664 check_worker_list_expectations(&allocator, size_index, false, false);
666
667 let mut first_slab_allocations = vec![];
668 for _ in 0..allocations_per_slab - 1 {
669 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
670 }
671
672 check_worker_list_expectations(&allocator, size_index, true, false);
674
675 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
677
678 check_worker_list_expectations(&allocator, size_index, false, true);
680
681 let second_slab_allocation = allocator.allocate(allocation_size).unwrap();
683
684 check_worker_list_expectations(&allocator, size_index, true, true);
686
687 let mut first_slab_allocations = first_slab_allocations.drain(..);
688 unsafe {
689 allocator.free(first_slab_allocations.next().unwrap());
690 }
691 check_worker_list_expectations(&allocator, size_index, true, false);
693
694 for ptr in first_slab_allocations {
696 unsafe {
697 allocator.free(ptr);
698 }
699 }
700 check_worker_list_expectations(&allocator, size_index, true, false);
703
704 unsafe {
706 allocator.free(second_slab_allocation);
707 }
708 check_worker_list_expectations(&allocator, size_index, false, false);
710 }
711
712 #[test]
713 fn test_out_of_slabs() {
714 let mut buffer = test_buffer();
715 let slab_size = 65536; let num_workers = 4;
717 let worker_index = 0;
718 let allocator = initialize_for_test(
719 buffer.as_mut_ptr().cast(),
720 slab_size,
721 num_workers,
722 worker_index,
723 );
724
725 let num_slabs = unsafe { allocator.header.as_ref() }.num_slabs;
726 for index in 0..num_slabs {
727 let slab_index = unsafe { allocator.take_slab(0) }.unwrap();
728 assert_eq!(slab_index, index);
729 }
730 assert!(unsafe { allocator.take_slab(0) }.is_none());
732 }
733
734 #[test]
735 fn test_remote_free_lists() {
736 let mut buffer = test_buffer();
737 let slab_size = 65536; let num_workers = 4;
739
740 let allocator_0 =
741 initialize_for_test(buffer.as_mut_ptr().cast(), slab_size, num_workers, 0);
742 let allocator_1 = join_for_tests(buffer.as_mut_ptr().cast(), 1);
743
744 let allocation_size = 2048;
745 let size_index = size_class_index(allocation_size).unwrap();
746 let allocations_per_slab = slab_size / allocation_size;
747
748 let mut allocations = vec![];
750 for _ in 0..allocations_per_slab {
751 allocations.push(allocator_0.allocate(allocation_size).unwrap());
752 }
753
754 let slab_index = unsafe {
756 let worker_local_list = allocator_0.worker_local_list_partial(size_index);
757 assert!(worker_local_list.head().is_none());
758 let worker_local_list = allocator_0.worker_local_list_full(size_index);
759 assert!(worker_local_list.head().is_some());
760 worker_local_list.head().unwrap()
761 };
762
763 let remote_free_list = unsafe { allocator_0.remote_free_list(slab_index) };
765 assert!(remote_free_list.iterate().next().is_none());
766
767 for ptr in allocations {
769 unsafe {
770 allocator_1.free(ptr);
771 }
772 }
773
774 let different_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
776 let allocation_indexes = unsafe {
777 allocator_0.find_allocation_indexes(allocator_0.offset(different_slab_allocation))
778 };
779 assert_ne!(allocation_indexes.slab_index, slab_index);
780 unsafe { allocator_0.free(different_slab_allocation) };
781
782 allocator_0.clean_remote_free_lists();
784 let same_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
785 let allocation_indexes = unsafe {
786 allocator_0.find_allocation_indexes(allocator_0.offset(same_slab_allocation))
787 };
788 assert_eq!(allocation_indexes.slab_index, slab_index);
789 }
790}