1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
use std::sync::atomic::Ordering;
use crate::{
block::Block, globals::IMMIX_BLOCK_SIZE, internal::block_list::AtomicBlockList, mmap::Mmap,
};
pub struct BlockAllocator {
free_blocks: AtomicBlockList,
pub data_bound: *mut u8,
pub data: *mut u8,
pub mmap: Mmap,
}
impl BlockAllocator {
pub fn total_blocks(&self) -> usize {
(self.mmap.end() as usize - self.mmap.aligned() as usize) / IMMIX_BLOCK_SIZE
}
pub fn start(&self) -> *mut u8 {
self.mmap.aligned()
}
pub fn end(&self) -> *mut u8 {
self.mmap.end()
}
pub fn size(&self) -> usize {
self.end() as usize - self.start() as usize
}
pub fn new(size: usize) -> BlockAllocator {
let map = Mmap::new(size);
let this = Self {
data: map.aligned(),
data_bound: map.end(),
free_blocks: AtomicBlockList::new(),
mmap: map,
};
debug_assert!(this.data as usize % IMMIX_BLOCK_SIZE == 0);
this.mmap.commit(this.mmap.start(), IMMIX_BLOCK_SIZE);
this
}
pub fn get_block(&self) -> *mut Block {
match self.free_blocks.take_free() {
x if x.is_null() => self.build_block().expect("Out of memory"),
x => {
self.mmap.commit(x as _, IMMIX_BLOCK_SIZE);
Block::new(x as _);
x
}
}
}
pub fn is_in_space(&self, object: *const u8) -> bool {
self.mmap.start() < object as *mut u8 && object <= self.data_bound
}
#[allow(unused_unsafe)]
fn build_block(&self) -> Option<*mut Block> {
unsafe {
let data = as_atomic!(&self.data;AtomicUsize);
let mut old = data.load(Ordering::Relaxed);
let mut new;
loop {
new = old + IMMIX_BLOCK_SIZE;
if new > self.data_bound as usize {
return None;
}
let res = data.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed);
match res {
Ok(_) => break,
Err(x) => old = x,
}
}
debug_assert!(
old % IMMIX_BLOCK_SIZE == 0,
"block is not aligned for block_size"
);
self.mmap.commit(old as *mut u8, IMMIX_BLOCK_SIZE);
Some(old as *mut Block)
}
}
pub fn return_blocks(&mut self, blocks: impl Iterator<Item = *mut Block>) {
blocks.for_each(|block| unsafe {
(*block).allocated = 0;
self.mmap.dontneed(block as *mut u8, IMMIX_BLOCK_SIZE);
self.free_blocks.add_free(block);
});
}
pub fn return_block(&mut self, block: *mut Block) {
unsafe {
(*block).allocated = 0;
}
self.mmap.dontneed(block as *mut u8, IMMIX_BLOCK_SIZE);
unsafe {
self.free_blocks.add_free(block);
}
}
pub fn available_blocks(&self) -> usize {
let nblocks = ((self.data_bound as usize) - (self.data as usize)) / IMMIX_BLOCK_SIZE;
nblocks + self.free_blocks.count()
}
}