1use crate::free_stack::FreeStack;
2use crate::global_free_list::GlobalFreeList;
3use crate::header::{self, WorkerLocalListHeads, WorkerLocalListPartialFullHeads};
4use crate::linked_list_node::LinkedListNode;
5use crate::remote_free_list::RemoteFreeList;
6use crate::size_classes::{size_class, NUM_SIZE_CLASSES};
7use crate::slab_meta::SlabMeta;
8use crate::worker_local_list::WorkerLocalList;
9use crate::{error::Error, header::Header, size_classes::size_class_index};
10use core::mem::offset_of;
11use core::ptr::NonNull;
12use core::sync::atomic::Ordering;
13use std::fs::File;
14
15pub struct Allocator {
16 header: NonNull<Header>,
17 file_size: usize,
18 worker_index: u32,
19}
20
21impl Allocator {
22 pub fn create(
24 file: &File,
25 file_size: usize,
26 num_workers: u32,
27 slab_size: u32,
28 worker_index: u32,
29 ) -> Result<Self, Error> {
30 if worker_index >= num_workers {
31 return Err(Error::InvalidWorkerIndex);
32 }
33 let header = crate::init::create(file, file_size, num_workers, slab_size)?;
34
35 unsafe { Allocator::new(header, file_size, worker_index) }
37 }
38
39 pub unsafe fn join(file: &File, worker_index: u32) -> Result<Self, Error> {
44 let (header, file_size) = crate::init::join(file)?;
45
46 if worker_index >= unsafe { header.as_ref() }.num_workers {
49 return Err(Error::InvalidWorkerIndex);
50 }
51
52 unsafe { Allocator::new(header, file_size, worker_index) }
54 }
55
56 unsafe fn new(
61 header: NonNull<Header>,
62 file_size: usize,
63 worker_index: u32,
64 ) -> Result<Self, Error> {
65 if worker_index >= unsafe { header.as_ref() }.num_workers {
67 return Err(Error::InvalidWorkerIndex);
68 }
69 Ok(Allocator {
70 header,
71 file_size,
72 worker_index,
73 })
74 }
75}
76
77unsafe impl Send for Allocator {}
78
79impl Drop for Allocator {
80 fn drop(&mut self) {
81 #[cfg(test)]
82 {
83 return;
85 }
86
87 #[allow(unreachable_code)]
88 unsafe {
91 libc::munmap(self.header.as_ptr() as *mut libc::c_void, self.file_size);
92 }
93 }
94}
95
96impl Allocator {
97 pub fn allocate(&self, size: u32) -> Option<NonNull<u8>> {
101 let size_index = size_class_index(size)?;
102
103 let slab_index = unsafe { self.find_allocatable_slab_index(size_index) }?;
105 unsafe { self.allocate_within_slab(slab_index, size_index) }
109 }
110
111 unsafe fn find_allocatable_slab_index(&self, size_index: usize) -> Option<u32> {
118 unsafe { self.worker_local_list_partial(size_index) }
120 .head()
121 .or_else(|| self.take_slab(size_index))
122 }
123
124 unsafe fn allocate_within_slab(
131 &self,
132 slab_index: u32,
133 size_index: usize,
134 ) -> Option<NonNull<u8>> {
135 let mut free_stack = unsafe { self.slab_free_stack(slab_index) };
137 let maybe_index_within_slab = free_stack.pop();
138
139 if free_stack.is_empty() {
142 unsafe {
146 self.worker_local_list_partial(size_index)
147 .remove(slab_index);
148 }
149 unsafe {
153 self.worker_local_list_full(size_index).push(slab_index);
154 }
155 }
156
157 maybe_index_within_slab.map(|index_within_slab| {
158 let slab = unsafe { self.slab(slab_index) };
160 let size = unsafe { size_class(size_index) };
162 slab.byte_add(index_within_slab as usize * size as usize)
163 })
164 }
165
166 unsafe fn take_slab(&self, size_index: usize) -> Option<u32> {
173 let slab_index = self.global_free_list().pop()?;
174
175 unsafe { self.slab_meta(slab_index).as_ref() }.assign(self.worker_index, size_index);
177 unsafe {
181 let slab_capacity = self.header.as_ref().slab_size / size_class(size_index);
182 self.slab_free_stack(slab_index).reset(slab_capacity as u16);
183 };
184 let mut worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
186 unsafe { worker_local_list.push(slab_index) };
188 Some(slab_index)
189 }
190}
191
192impl Allocator {
193 pub unsafe fn free(&self, ptr: NonNull<u8>) {
199 let offset = unsafe { self.offset(ptr) };
201 self.free_offset(offset);
202 }
203
204 pub unsafe fn free_offset(&self, offset: usize) {
210 let allocation_indexes = self.find_allocation_indexes(offset);
211
212 if self.worker_index
214 == unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
215 .assigned_worker
216 .load(Ordering::Acquire)
217 {
218 self.local_free(allocation_indexes);
219 } else {
220 self.remote_free(allocation_indexes);
221 }
222 }
223
224 fn local_free(&self, allocation_indexes: AllocationIndexes) {
225 let (was_full, is_empty) = unsafe {
227 let mut free_stack = self.slab_free_stack(allocation_indexes.slab_index);
228 let was_full = free_stack.is_empty();
229 free_stack.push(allocation_indexes.index_within_slab);
230 (was_full, free_stack.is_full())
234 };
235
236 match (was_full, is_empty) {
237 (true, true) => {
238 unreachable!("slab can only contain one allocation - this is not allowed");
241 }
242 (true, false) => {
243 let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
244 .size_class_index
245 .load(Ordering::Relaxed);
246 unsafe {
250 self.worker_local_list_full(size_index)
251 .remove(allocation_indexes.slab_index);
252 }
253 unsafe {
255 self.worker_local_list_partial(size_index)
256 .push(allocation_indexes.slab_index);
257 }
258 }
259 (false, true) => {
260 let size_index = unsafe { self.slab_meta(allocation_indexes.slab_index).as_ref() }
261 .size_class_index
262 .load(Ordering::Relaxed);
263 unsafe {
267 self.worker_local_list_partial(size_index)
268 .remove(allocation_indexes.slab_index);
269 }
270 unsafe {
272 self.global_free_list().push(allocation_indexes.slab_index);
273 }
274 }
275 (false, false) => {
276 }
279 }
280 }
281
282 fn remote_free(&self, allocation_indexes: AllocationIndexes) {
283 unsafe {
285 self.remote_free_list(allocation_indexes.slab_index)
286 .push(allocation_indexes.index_within_slab);
287 }
288 }
289
290 pub unsafe fn offset(&self, ptr: NonNull<u8>) -> usize {
295 ptr.byte_offset_from(self.header) as usize
296 }
297
298 pub unsafe fn ptr_from_offset(&self, offset: usize) -> NonNull<u8> {
304 unsafe { self.header.byte_add(offset) }.cast()
305 }
306
307 fn find_allocation_indexes(&self, offset: usize) -> AllocationIndexes {
309 let (slab_index, offset_within_slab) = {
310 let header = unsafe { self.header.as_ref() };
312 debug_assert!(offset >= header.slabs_offset as usize);
313 let offset_from_slab_start = offset.wrapping_sub(header.slabs_offset as usize);
314 let slab_index = (offset_from_slab_start / header.slab_size as usize) as u32;
315 debug_assert!(slab_index < header.num_slabs, "slab index out of bounds");
316
317 let offset_within_slab =
319 unsafe { Self::offset_within_slab(header.slab_size, offset_from_slab_start) };
320
321 (slab_index, offset_within_slab)
322 };
323
324 let index_within_slab = {
325 let size_class_index = unsafe { self.slab_meta(slab_index).as_ref() }
327 .size_class_index
328 .load(Ordering::Acquire);
329 let size_class = unsafe { size_class(size_class_index) };
331 (offset_within_slab / size_class) as u16
332 };
333
334 AllocationIndexes {
335 slab_index,
336 index_within_slab,
337 }
338 }
339
340 const unsafe fn offset_within_slab(slab_size: u32, offset_from_slab_start: usize) -> u32 {
345 debug_assert!(slab_size.is_power_of_two());
346 (offset_from_slab_start & (slab_size as usize - 1)) as u32
347 }
348}
349
350impl Allocator {
351 pub fn clean_remote_free_lists(&self) {
353 for size_index in 0..NUM_SIZE_CLASSES {
357 let worker_local_list = unsafe { self.worker_local_list_partial(size_index) };
359 self.clean_remote_free_lists_for_list(worker_local_list);
360
361 let worker_local_list = unsafe { self.worker_local_list_full(size_index) };
363 self.clean_remote_free_lists_for_list(worker_local_list);
364 }
365 }
366
367 fn clean_remote_free_lists_for_list(&self, worker_local_list: WorkerLocalList) {
369 for slab_index in worker_local_list.iterate() {
370 let remote_free_list = unsafe { self.remote_free_list(slab_index) };
372 for index_within_slab in remote_free_list.drain() {
373 self.local_free(AllocationIndexes {
374 slab_index,
375 index_within_slab,
376 })
377 }
378 }
379 }
380}
381
382impl Allocator {
383 fn free_list_elements(&self) -> NonNull<LinkedListNode> {
385 let offset = unsafe { self.header.as_ref() }.free_list_elements_offset;
387 unsafe { self.header.byte_add(offset as usize) }.cast()
389 }
390
391 fn global_free_list<'a>(&'a self) -> GlobalFreeList<'a> {
393 let head = &unsafe { self.header.as_ref() }.global_free_list_head;
395 let list = self.free_list_elements();
396 unsafe { GlobalFreeList::new(head, list) }
400 }
401
402 unsafe fn worker_local_list_partial<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
408 let head = &self.worker_head(size_index).partial;
409 let list = self.free_list_elements();
410
411 unsafe { WorkerLocalList::new(head, list) }
415 }
416
417 unsafe fn worker_local_list_full<'a>(&'a self, size_index: usize) -> WorkerLocalList<'a> {
423 let head = &self.worker_head(size_index).full;
424 let list = self.free_list_elements();
425
426 unsafe { WorkerLocalList::new(head, list) }
430 }
431
432 fn worker_head(&self, size_index: usize) -> &WorkerLocalListPartialFullHeads {
433 let all_workers_heads = unsafe {
435 self.header
436 .byte_add(offset_of!(Header, worker_local_list_heads))
437 .cast::<WorkerLocalListHeads>()
438 };
439 let worker_heads = unsafe { all_workers_heads.add(self.worker_index as usize) };
441 let worker_head = unsafe {
443 worker_heads
444 .cast::<WorkerLocalListPartialFullHeads>()
445 .add(size_index)
446 .as_ref()
447 };
448
449 worker_head
450 }
451
452 unsafe fn remote_free_list<'a>(&'a self, slab_index: u32) -> RemoteFreeList<'a> {
457 let (head, slab_item_size) = {
458 let slab_meta = unsafe { self.slab_meta(slab_index).as_ref() };
460 let size_class =
462 unsafe { size_class(slab_meta.size_class_index.load(Ordering::Acquire)) };
463 (&slab_meta.remote_free_stack_head, size_class)
464 };
465 let slab = unsafe { self.slab(slab_index) };
467
468 unsafe { RemoteFreeList::new(slab_item_size, head, slab) }
474 }
475
476 unsafe fn slab_meta(&self, slab_index: u32) -> NonNull<SlabMeta> {
481 let offset = unsafe { self.header.as_ref() }.slab_shared_meta_offset;
483 let slab_metas = unsafe { self.header.byte_add(offset as usize).cast::<SlabMeta>() };
485 unsafe { slab_metas.add(slab_index as usize) }
487 }
488
489 unsafe fn slab_free_stack<'a>(&'a self, slab_index: u32) -> FreeStack<'a> {
494 let (slab_size, offset) = {
495 let header = unsafe { self.header.as_ref() };
497 (header.slab_size, header.slab_free_stacks_offset)
498 };
499 let free_stack_size = header::layout::single_free_stack_size(slab_size);
500
501 let mut top = unsafe {
504 self.header
505 .byte_add(offset as usize)
506 .byte_add(slab_index as usize * free_stack_size)
507 .cast()
508 };
509 let mut capacity = unsafe { top.add(1) };
510 let trailing_stack = unsafe { capacity.add(1) };
511 unsafe { FreeStack::new(top.as_mut(), capacity.as_mut(), trailing_stack) }
512 }
513
514 unsafe fn slab(&self, slab_index: u32) -> NonNull<u8> {
519 let (slab_size, offset) = {
520 let header = unsafe { self.header.as_ref() };
522 (header.slab_size, header.slabs_offset)
523 };
524 unsafe {
527 self.header
528 .byte_add(offset as usize)
529 .byte_add(slab_index as usize * slab_size as usize)
530 .cast()
531 }
532 }
533}
534
535struct AllocationIndexes {
536 slab_index: u32,
537 index_within_slab: u16,
538}
539
540#[cfg(test)]
541mod tests {
542 use super::*;
543 use crate::{
544 header::layout,
545 init::initialize,
546 size_classes::{MAX_SIZE, SIZE_CLASSES},
547 };
548
549 #[repr(C, align(4096))]
550 struct DummyTypeForAlignment([u8; 4096]);
551 const TEST_BUFFER_SIZE: usize = 64 * 1024 * 1024; const TEST_BUFFER_CAPACITY: usize =
553 TEST_BUFFER_SIZE / core::mem::size_of::<DummyTypeForAlignment>();
554
555 fn test_buffer() -> Vec<DummyTypeForAlignment> {
556 (0..TEST_BUFFER_CAPACITY)
557 .map(|_| DummyTypeForAlignment([0; 4096]))
558 .collect::<Vec<_>>()
559 }
560
561 fn initialize_for_test(
562 buffer: *mut u8,
563 slab_size: u32,
564 num_workers: u32,
565 worker_index: u32,
566 ) -> Allocator {
567 let file_size = TEST_BUFFER_SIZE;
568
569 let layout = layout::offsets(file_size, slab_size, num_workers);
570
571 let header = NonNull::new(buffer as *mut Header).unwrap();
572 unsafe {
575 initialize::allocator(header, slab_size, num_workers, layout);
576 }
577
578 unsafe { Allocator::new(header, file_size, worker_index) }.unwrap()
580 }
581
582 fn join_for_tests(buffer: *mut u8, worker_index: u32) -> Allocator {
583 let header = NonNull::new(buffer as *mut Header).unwrap();
584 unsafe { Allocator::new(header, TEST_BUFFER_SIZE, worker_index) }.unwrap()
586 }
587
588 #[test]
589 fn test_allocator() {
590 let mut buffer = test_buffer();
591 let slab_size = 65536; let num_workers = 4;
593 let worker_index = 0;
594 let allocator = initialize_for_test(
595 buffer.as_mut_ptr().cast(),
596 slab_size,
597 num_workers,
598 worker_index,
599 );
600
601 let mut allocations = vec![];
602
603 for size_class in SIZE_CLASSES[..NUM_SIZE_CLASSES - 1].iter() {
604 for size in [size_class - 1, *size_class, size_class + 1] {
605 allocations.push(allocator.allocate(size).unwrap());
606 }
607 }
608 for size in [MAX_SIZE - 1, MAX_SIZE] {
609 allocations.push(allocator.allocate(size).unwrap());
610 }
611 assert!(allocator.allocate(MAX_SIZE + 1).is_none());
612
613 for size_index in 0..NUM_SIZE_CLASSES {
615 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
617 assert!(worker_local_list.head().is_some());
618 }
619
620 for ptr in allocations {
621 unsafe {
623 allocator.free(ptr);
624 }
625 }
626
627 for size_index in 0..NUM_SIZE_CLASSES {
629 let worker_local_list = unsafe { allocator.worker_local_list_partial(size_index) };
631 assert_eq!(worker_local_list.head(), None);
632 }
633 }
634
635 #[test]
636 fn test_slab_list_transitions() {
637 let mut buffer = test_buffer();
638 let slab_size = 65536; let num_workers = 4;
640 let worker_index = 0;
641 let allocator = initialize_for_test(
642 buffer.as_mut_ptr().cast(),
643 slab_size,
644 num_workers,
645 worker_index,
646 );
647
648 let allocation_size = 2048;
649 let size_index = size_class_index(allocation_size).unwrap();
650 let allocations_per_slab = slab_size / allocation_size;
651
652 fn check_worker_list_expectations(
653 allocator: &Allocator,
654 size_index: usize,
655 expect_partial: bool,
656 expect_full: bool,
657 ) {
658 unsafe {
659 let partial_list = allocator.worker_local_list_partial(size_index);
660 assert_eq!(
661 partial_list.head().is_some(),
662 expect_partial,
663 "{:?}",
664 partial_list.head()
665 );
666
667 let full_list = allocator.worker_local_list_full(size_index);
668 assert_eq!(
669 full_list.head().is_some(),
670 expect_full,
671 "{:?}",
672 full_list.head()
673 );
674 }
675 }
676
677 check_worker_list_expectations(&allocator, size_index, false, false);
679
680 let mut first_slab_allocations = vec![];
681 for _ in 0..allocations_per_slab - 1 {
682 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
683 }
684
685 check_worker_list_expectations(&allocator, size_index, true, false);
687
688 first_slab_allocations.push(allocator.allocate(allocation_size).unwrap());
690
691 check_worker_list_expectations(&allocator, size_index, false, true);
693
694 let second_slab_allocation = allocator.allocate(allocation_size).unwrap();
696
697 check_worker_list_expectations(&allocator, size_index, true, true);
699
700 let mut first_slab_allocations = first_slab_allocations.drain(..);
701 unsafe {
702 allocator.free(first_slab_allocations.next().unwrap());
703 }
704 check_worker_list_expectations(&allocator, size_index, true, false);
706
707 for ptr in first_slab_allocations {
709 unsafe {
710 allocator.free(ptr);
711 }
712 }
713 check_worker_list_expectations(&allocator, size_index, true, false);
716
717 unsafe {
719 allocator.free(second_slab_allocation);
720 }
721 check_worker_list_expectations(&allocator, size_index, false, false);
723 }
724
725 #[test]
726 fn test_out_of_slabs() {
727 let mut buffer = test_buffer();
728 let slab_size = 65536; let num_workers = 4;
730 let worker_index = 0;
731 let allocator = initialize_for_test(
732 buffer.as_mut_ptr().cast(),
733 slab_size,
734 num_workers,
735 worker_index,
736 );
737
738 let num_slabs = unsafe { allocator.header.as_ref() }.num_slabs;
739 for index in 0..num_slabs {
740 let slab_index = unsafe { allocator.take_slab(0) }.unwrap();
741 assert_eq!(slab_index, index);
742 }
743 assert!(unsafe { allocator.take_slab(0) }.is_none());
745 }
746
747 #[test]
748 fn test_remote_free_lists() {
749 let mut buffer = test_buffer();
750 let slab_size = 65536; let num_workers = 4;
752
753 let allocator_0 =
754 initialize_for_test(buffer.as_mut_ptr().cast(), slab_size, num_workers, 0);
755 let allocator_1 = join_for_tests(buffer.as_mut_ptr().cast(), 1);
756
757 let allocation_size = 2048;
758 let size_index = size_class_index(allocation_size).unwrap();
759 let allocations_per_slab = slab_size / allocation_size;
760
761 let mut allocations = vec![];
763 for _ in 0..allocations_per_slab {
764 allocations.push(allocator_0.allocate(allocation_size).unwrap());
765 }
766
767 let slab_index = unsafe {
769 let worker_local_list = allocator_0.worker_local_list_partial(size_index);
770 assert!(worker_local_list.head().is_none());
771 let worker_local_list = allocator_0.worker_local_list_full(size_index);
772 assert!(worker_local_list.head().is_some());
773 worker_local_list.head().unwrap()
774 };
775
776 let remote_free_list = unsafe { allocator_0.remote_free_list(slab_index) };
778 assert!(remote_free_list.iterate().next().is_none());
779
780 for ptr in allocations {
782 unsafe {
783 allocator_1.free(ptr);
784 }
785 }
786
787 let different_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
789 let allocation_indexes = unsafe {
790 allocator_0.find_allocation_indexes(allocator_0.offset(different_slab_allocation))
791 };
792 assert_ne!(allocation_indexes.slab_index, slab_index);
793 unsafe { allocator_0.free(different_slab_allocation) };
794
795 allocator_0.clean_remote_free_lists();
797 let same_slab_allocation = allocator_0.allocate(allocation_size).unwrap();
798 let allocation_indexes = unsafe {
799 allocator_0.find_allocation_indexes(allocator_0.offset(same_slab_allocation))
800 };
801 assert_eq!(allocation_indexes.slab_index, slab_index);
802 }
803}