libblobd_direct/allocator/
mod.rs1#[cfg(test)]
2pub mod tests;
3
4use crate::metrics::BlobdMetrics;
5use crate::pages::Pages;
6use crate::util::div_pow2;
7use crate::util::mod_pow2;
8use crate::util::mul_pow2;
9use off64::u32;
10use off64::u8;
11use off64::usz;
12use roaring::RoaringBitmap;
13use std::cmp::max;
14use std::error::Error;
15use std::fmt;
16use std::fmt::Display;
17use std::sync::atomic::Ordering::Relaxed;
18use struct_name::StructName;
19use struct_name_macro::StructName;
20
21#[derive(PartialEq, Eq, Clone, Copy, Debug, StructName)]
22pub(crate) struct OutOfSpaceError;
23
24impl Display for OutOfSpaceError {
25 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
26 f.write_str(Self::struct_name())
27 }
28}
29
30impl Error for OutOfSpaceError {}
31
32#[derive(PartialEq, Eq, Clone, Copy)]
33pub(crate) enum AllocDir {
34 Left,
35 Right,
36}
37
38type PageNum = u32;
39
40pub(crate) struct Allocator {
41 base_dev_offset: u64,
42 dir: AllocDir,
43 free: Vec<RoaringBitmap>,
45 pages: Pages,
46 metrics: BlobdMetrics,
47}
48
49impl Allocator {
50 pub fn new(
51 heap_dev_offset: u64,
52 heap_size: u64,
53 pages: Pages,
54 dir: AllocDir,
55 metrics: BlobdMetrics,
56 ) -> Self {
57 assert_eq!(mod_pow2(heap_dev_offset, pages.lpage_size_pow2), 0);
58 assert_eq!(mod_pow2(heap_size, pages.lpage_size_pow2), 0);
59 Self {
60 base_dev_offset: heap_dev_offset,
61 dir,
62 free: (pages.spage_size_pow2..=pages.lpage_size_pow2)
63 .map(|sz| {
64 let page_count = u32!(div_pow2(heap_size, sz));
65 let mut map = RoaringBitmap::new();
66 if sz == pages.lpage_size_pow2 {
67 map.insert_range(..page_count);
68 };
69 map
70 })
71 .collect(),
72 pages,
73 metrics,
74 }
75 }
76
77 fn bitmap(&self, page_size_pow2: u8) -> &RoaringBitmap {
78 &self.free[usz!(page_size_pow2 - self.pages.spage_size_pow2)]
79 }
80
81 fn bitmap_mut(&mut self, page_size_pow2: u8) -> &mut RoaringBitmap {
82 &mut self.free[usz!(page_size_pow2 - self.pages.spage_size_pow2)]
83 }
84
85 fn to_page_num(&self, page_dev_offset: u64, page_size_pow2: u8) -> PageNum {
86 u32!(div_pow2(
87 page_dev_offset - self.base_dev_offset,
88 page_size_pow2
89 ))
90 }
91
92 fn to_page_dev_offset(&self, page_num: u32, page_size_pow2: u8) -> u64 {
93 mul_pow2(page_num.into(), page_size_pow2) + self.base_dev_offset
94 }
95
96 fn _mark_as_allocated(&mut self, page_num: PageNum, page_size_pow2: u8) {
97 assert!(
98 page_size_pow2 >= self.pages.spage_size_pow2 && page_size_pow2 <= self.pages.lpage_size_pow2
99 );
100 if !self.bitmap_mut(page_size_pow2).remove(page_num) {
101 self._mark_as_allocated(page_num / 2, page_size_pow2 + 1);
103 self.bitmap_mut(page_size_pow2).insert(page_num ^ 1);
104 };
105 }
106
107 pub fn mark_as_allocated(&mut self, page_dev_offset: u64, page_size_pow2: u8) {
108 assert_eq!(mod_pow2(page_dev_offset, page_size_pow2), 0);
109
110 self._mark_as_allocated(
111 self.to_page_num(page_dev_offset, page_size_pow2),
112 page_size_pow2,
113 );
114
115 self
116 .metrics
117 .0
118 .allocated_bytes
119 .fetch_add(1 << page_size_pow2, Relaxed);
120 }
121
122 fn _allocate(&mut self, page_size_pow2: u8) -> Result<PageNum, OutOfSpaceError> {
124 assert!(
125 page_size_pow2 >= self.pages.spage_size_pow2 && page_size_pow2 <= self.pages.lpage_size_pow2
126 );
127 match if self.dir == AllocDir::Left {
128 self.bitmap(page_size_pow2).min()
129 } else {
130 self.bitmap(page_size_pow2).max()
131 } {
132 Some(page_num) => {
133 assert!(self.bitmap_mut(page_size_pow2).remove(page_num));
134 Ok(page_num)
135 }
136 None if page_size_pow2 == self.pages.lpage_size_pow2 => Err(OutOfSpaceError),
137 None => {
138 let larger_page_num = self._allocate(page_size_pow2 + 1)?;
140 Ok(match self.dir {
142 AllocDir::Left => {
144 assert!(self
145 .bitmap_mut(page_size_pow2)
146 .insert(larger_page_num * 2 + 1));
147 larger_page_num * 2
148 }
149 AllocDir::Right => {
151 assert!(self.bitmap_mut(page_size_pow2).insert(larger_page_num * 2));
152 larger_page_num * 2 + 1
153 }
154 })
155 }
156 }
157 }
158
159 pub fn allocate(&mut self, size: u64) -> Result<u64, OutOfSpaceError> {
160 let pow2 = max(
161 self.pages.spage_size_pow2,
162 u8!(size.next_power_of_two().ilog2()),
163 );
164 assert!(pow2 <= self.pages.lpage_size_pow2);
165 self.metrics.0.allocated_bytes.fetch_add(1 << pow2, Relaxed);
167 let page_num = self._allocate(pow2)?;
168 Ok(self.to_page_dev_offset(page_num, pow2))
169 }
170
171 fn _release(&mut self, page_num: PageNum, page_size_pow2: u8) {
172 if page_size_pow2 < self.pages.lpage_size_pow2 {
174 let buddy_page_num = page_num ^ 1;
175 if self.bitmap(page_size_pow2).contains(buddy_page_num) {
176 assert!(self.bitmap_mut(page_size_pow2).remove(buddy_page_num));
178 assert!(!self.bitmap(page_size_pow2).contains(page_num));
179 let parent_page_num = page_num / 2;
181 self._release(parent_page_num, page_size_pow2 + 1);
182 return;
183 };
184 };
185 assert!(self.bitmap_mut(page_size_pow2).insert(page_num));
186 }
187
188 pub fn release(&mut self, page_dev_offset: u64, page_size_pow2: u8) {
189 let page_num = self.to_page_num(page_dev_offset, page_size_pow2);
190 self
192 .metrics
193 .0
194 .allocated_bytes
195 .fetch_sub(1 << page_size_pow2, Relaxed);
196 self._release(page_num, page_size_pow2);
197 }
198}