1use std::sync::atomic::Ordering;
2
3use crate::{
4 block::Block, globals::IMMIX_BLOCK_SIZE, internal::block_list::AtomicBlockList, mmap::Mmap,
5};
6
7pub struct BlockAllocator {
8 free_blocks: AtomicBlockList,
9
10 pub data_bound: *mut u8,
12 pub data: *mut u8,
13 pub mmap: Mmap,
14}
15
16impl BlockAllocator {
17 pub fn total_blocks(&self) -> usize {
18 (self.mmap.end() as usize - self.mmap.aligned() as usize) / IMMIX_BLOCK_SIZE
19 }
20 pub fn start(&self) -> *mut u8 {
21 self.mmap.aligned()
22 }
23
24 pub fn end(&self) -> *mut u8 {
25 self.mmap.end()
26 }
27 pub fn size(&self) -> usize {
28 self.end() as usize - self.start() as usize
29 }
30 pub fn new(size: usize) -> BlockAllocator {
31 let map = Mmap::new(size);
32
33 let this = Self {
34 data: map.aligned(),
35 data_bound: map.end(),
36 free_blocks: AtomicBlockList::new(),
37
38 mmap: map,
39 };
40 debug_assert!(this.data as usize % IMMIX_BLOCK_SIZE == 0);
41 this.mmap.commit(this.mmap.start(), IMMIX_BLOCK_SIZE);
42 this
43 }
44
45 pub fn get_block(&self) -> *mut Block {
47 match self.free_blocks.take_free() {
48 x if x.is_null() => self.build_block().expect("Out of memory"),
49 x => {
50 self.mmap.commit(x as _, IMMIX_BLOCK_SIZE);
51 Block::new(x as _);
52 x
53 }
54 }
55 }
56
57 pub fn is_in_space(&self, object: *const u8) -> bool {
58 self.mmap.start() < object as *mut u8 && object <= self.data_bound
59 }
60 #[allow(unused_unsafe)]
61 fn build_block(&self) -> Option<*mut Block> {
62 unsafe {
63 let data = as_atomic!(&self.data;AtomicUsize);
64 let mut old = data.load(Ordering::Relaxed);
65 let mut new;
66 loop {
67 new = old + IMMIX_BLOCK_SIZE;
68 if new > self.data_bound as usize {
69 return None;
70 }
71 let res = data.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed);
72 match res {
73 Ok(_) => break,
74 Err(x) => old = x,
75 }
76 }
77 debug_assert!(
78 old % IMMIX_BLOCK_SIZE == 0,
79 "block is not aligned for block_size"
80 );
81 self.mmap.commit(old as *mut u8, IMMIX_BLOCK_SIZE);
82 Some(old as *mut Block)
83 }
84 }
85
86 pub fn return_blocks(&mut self, blocks: impl Iterator<Item = *mut Block>) {
88 blocks.for_each(|block| unsafe {
89 (*block).allocated = 0;
90 self.mmap.dontneed(block as *mut u8, IMMIX_BLOCK_SIZE); self.free_blocks.add_free(block);
92 });
93 }
94 pub fn return_block(&mut self, block: *mut Block) {
95 unsafe {
96 (*block).allocated = 0;
97 }
98 self.mmap.dontneed(block as *mut u8, IMMIX_BLOCK_SIZE); unsafe {
100 self.free_blocks.add_free(block);
101 }
102 }
103
104 pub fn available_blocks(&self) -> usize {
106 let nblocks = ((self.data_bound as usize) - (self.data as usize)) / IMMIX_BLOCK_SIZE;
107
108 nblocks + self.free_blocks.count()
109 }
110}