vm_memory/bitmap/backend/
atomic_bitmap.rs1use std::num::NonZeroUsize;
7use std::sync::atomic::{AtomicU64, Ordering};
8
9use crate::bitmap::{Bitmap, RefSlice, WithBitmapSlice};
10
11#[cfg(feature = "backend-mmap")]
12use crate::mmap::NewBitmap;
13
14#[derive(Debug)]
18pub struct AtomicBitmap {
19 map: Vec<AtomicU64>,
20 size: usize,
21 byte_size: usize,
22 page_size: NonZeroUsize,
23}
24
25#[allow(clippy::len_without_is_empty)]
26impl AtomicBitmap {
27 pub fn new(byte_size: usize, page_size: NonZeroUsize) -> Self {
30 let num_pages = byte_size.div_ceil(page_size.get());
31 let map_size = num_pages.div_ceil(u64::BITS as usize);
32 let map: Vec<AtomicU64> = (0..map_size).map(|_| AtomicU64::new(0)).collect();
33
34 AtomicBitmap {
35 map,
36 size: num_pages,
37 byte_size,
38 page_size,
39 }
40 }
41
42 pub fn enlarge(&mut self, additional_size: usize) {
45 self.byte_size += additional_size;
46 self.size = self.byte_size.div_ceil(self.page_size.get());
47 let map_size = self.size.div_ceil(u64::BITS as usize);
48 self.map.resize_with(map_size, Default::default);
49 }
50
51 pub fn is_bit_set(&self, index: usize) -> bool {
53 if index < self.size {
54 (self.map[index >> 6].load(Ordering::Acquire) & (1 << (index & 63))) != 0
55 } else {
56 false
58 }
59 }
60
61 pub fn is_addr_set(&self, addr: usize) -> bool {
63 self.is_bit_set(addr / self.page_size)
64 }
65
66 pub fn set_addr_range(&self, start_addr: usize, len: usize) {
70 self.set_reset_addr_range(start_addr, len, true);
71 }
72
73 fn set_reset_addr_range(&self, start_addr: usize, len: usize, set: bool) {
78 if len == 0 {
81 return;
82 }
83
84 let first_bit = start_addr / self.page_size;
85 let last_bit = start_addr.saturating_add(len - 1) / self.page_size;
88 for n in first_bit..=last_bit {
89 if n >= self.size {
90 break;
92 }
93 if set {
94 self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
95 } else {
96 self.map[n >> 6].fetch_and(!(1 << (n & 63)), Ordering::SeqCst);
97 }
98 }
99 }
100
101 pub fn reset_addr_range(&self, start_addr: usize, len: usize) {
105 self.set_reset_addr_range(start_addr, len, false);
106 }
107
108 pub fn set_bit(&self, index: usize) {
110 if index >= self.size {
111 return;
113 }
114 self.map[index >> 6].fetch_or(1 << (index & 63), Ordering::SeqCst);
115 }
116
117 pub fn reset_bit(&self, index: usize) {
119 if index >= self.size {
120 return;
122 }
123 self.map[index >> 6].fetch_and(!(1 << (index & 63)), Ordering::SeqCst);
124 }
125
126 pub fn len(&self) -> usize {
128 self.size
129 }
130
131 pub fn byte_size(&self) -> usize {
133 self.byte_size
134 }
135
136 pub fn get_and_reset(&self) -> Vec<u64> {
138 self.map
139 .iter()
140 .map(|u| u.fetch_and(0, Ordering::SeqCst))
141 .collect()
142 }
143
144 pub fn reset(&self) {
146 for it in self.map.iter() {
147 it.store(0, Ordering::Release);
148 }
149 }
150}
151
152impl Clone for AtomicBitmap {
153 fn clone(&self) -> Self {
154 let map = self
155 .map
156 .iter()
157 .map(|i| i.load(Ordering::Acquire))
158 .map(AtomicU64::new)
159 .collect();
160 AtomicBitmap {
161 map,
162 size: self.size,
163 byte_size: self.byte_size,
164 page_size: self.page_size,
165 }
166 }
167}
168
169impl<'a> WithBitmapSlice<'a> for AtomicBitmap {
170 type S = RefSlice<'a, Self>;
171}
172
173impl Bitmap for AtomicBitmap {
174 fn mark_dirty(&self, offset: usize, len: usize) {
175 self.set_addr_range(offset, len)
176 }
177
178 fn dirty_at(&self, offset: usize) -> bool {
179 self.is_addr_set(offset)
180 }
181
182 fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S {
183 RefSlice::new(self, offset)
184 }
185}
186
187impl Default for AtomicBitmap {
188 fn default() -> Self {
189 AtomicBitmap::new(0, unsafe { NonZeroUsize::new_unchecked(0x1000) })
191 }
192}
193
194#[cfg(feature = "backend-mmap")]
195impl NewBitmap for AtomicBitmap {
196 fn with_len(len: usize) -> Self {
197 #[cfg(unix)]
198 let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
200
201 #[cfg(windows)]
202 let page_size = {
203 use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO};
204 let mut sysinfo = MaybeUninit::zeroed();
205 unsafe { GetSystemInfo(sysinfo.as_mut_ptr()) };
208 unsafe { sysinfo.assume_init().dwPageSize }
210 };
211
212 AtomicBitmap::new(
215 len,
216 NonZeroUsize::try_from(usize::try_from(page_size).unwrap()).unwrap(),
217 )
218 }
219}
220
221#[cfg(test)]
222mod tests {
223 use super::*;
224
225 use crate::bitmap::tests::test_bitmap;
226
227 #[allow(clippy::undocumented_unsafe_blocks)]
228 const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(128) };
229
230 #[test]
231 fn test_bitmap_basic() {
232 let a = AtomicBitmap::new(1025, DEFAULT_PAGE_SIZE);
234 assert_eq!(a.len(), 9);
235
236 let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
237 assert_eq!(b.len(), 8);
238 b.set_addr_range(128, 129);
239 assert!(!b.is_addr_set(0));
240 assert!(b.is_addr_set(128));
241 assert!(b.is_addr_set(256));
242 assert!(!b.is_addr_set(384));
243
244 #[allow(clippy::redundant_clone)]
245 let copy_b = b.clone();
246 assert!(copy_b.is_addr_set(256));
247 assert!(!copy_b.is_addr_set(384));
248
249 b.reset();
250 assert!(!b.is_addr_set(128));
251 assert!(!b.is_addr_set(256));
252 assert!(!b.is_addr_set(384));
253
254 b.set_addr_range(128, 129);
255 let v = b.get_and_reset();
256
257 assert!(!b.is_addr_set(128));
258 assert!(!b.is_addr_set(256));
259 assert!(!b.is_addr_set(384));
260
261 assert_eq!(v.len(), 1);
262 assert_eq!(v[0], 0b110);
263 }
264
265 #[test]
266 fn test_bitmap_reset() {
267 let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
268 assert_eq!(b.len(), 8);
269 b.set_addr_range(128, 129);
270 assert!(!b.is_addr_set(0));
271 assert!(b.is_addr_set(128));
272 assert!(b.is_addr_set(256));
273 assert!(!b.is_addr_set(384));
274
275 b.reset_addr_range(128, 129);
276 assert!(!b.is_addr_set(0));
277 assert!(!b.is_addr_set(128));
278 assert!(!b.is_addr_set(256));
279 assert!(!b.is_addr_set(384));
280 }
281
282 #[test]
283 fn test_bitmap_out_of_range() {
284 let b = AtomicBitmap::new(1024, NonZeroUsize::MIN);
285 b.set_addr_range(768, 512);
287 assert!(b.is_addr_set(768));
288 assert!(!b.is_addr_set(1024));
290 assert!(!b.is_addr_set(1152));
291 }
292
293 #[test]
294 fn test_bitmap_impl() {
295 let b = AtomicBitmap::new(0x800, DEFAULT_PAGE_SIZE);
296 test_bitmap(&b);
297 }
298
299 #[test]
300 fn test_bitmap_enlarge() {
301 let mut b = AtomicBitmap::new(8 * 1024, DEFAULT_PAGE_SIZE);
302 assert_eq!(b.len(), 64);
303 b.set_addr_range(128, 129);
304 assert!(!b.is_addr_set(0));
305 assert!(b.is_addr_set(128));
306 assert!(b.is_addr_set(256));
307 assert!(!b.is_addr_set(384));
308
309 b.reset_addr_range(128, 129);
310 assert!(!b.is_addr_set(0));
311 assert!(!b.is_addr_set(128));
312 assert!(!b.is_addr_set(256));
313 assert!(!b.is_addr_set(384));
314 b.set_addr_range(128, 129);
315 b.enlarge(8 * 1024);
316 for i in 65..128 {
317 assert!(!b.is_bit_set(i));
318 }
319 assert_eq!(b.len(), 128);
320 assert!(!b.is_addr_set(0));
321 assert!(b.is_addr_set(128));
322 assert!(b.is_addr_set(256));
323 assert!(!b.is_addr_set(384));
324
325 b.set_bit(55);
326 assert!(b.is_bit_set(55));
327 for i in 65..128 {
328 b.set_bit(i);
329 }
330 for i in 65..128 {
331 assert!(b.is_bit_set(i));
332 }
333 b.reset_addr_range(0, 16 * 1024);
334 for i in 0..128 {
335 assert!(!b.is_bit_set(i));
336 }
337 }
338}