vm_memory/bitmap/backend/
atomic_bitmap.rs

1// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4//! Bitmap backend implementation based on atomic integers.
5
6use std::num::NonZeroUsize;
7use std::sync::atomic::{AtomicU64, Ordering};
8
9use crate::bitmap::{Bitmap, RefSlice, WithBitmapSlice};
10
11#[cfg(feature = "backend-mmap")]
12use crate::mmap::NewBitmap;
13
14/// `AtomicBitmap` implements a simple bit map on the page level with test and set operations.
15/// It is page-size aware, so it converts addresses to page numbers before setting or clearing
16/// the bits.
17#[derive(Debug)]
18pub struct AtomicBitmap {
19    map: Vec<AtomicU64>,
20    size: usize,
21    byte_size: usize,
22    page_size: NonZeroUsize,
23}
24
25#[allow(clippy::len_without_is_empty)]
26impl AtomicBitmap {
27    /// Create a new bitmap of `byte_size`, with one bit per page. This is effectively
28    /// rounded up, and we get a new vector of the next multiple of 64 bigger than `bit_size`.
29    pub fn new(byte_size: usize, page_size: NonZeroUsize) -> Self {
30        let num_pages = byte_size.div_ceil(page_size.get());
31        let map_size = num_pages.div_ceil(u64::BITS as usize);
32        let map: Vec<AtomicU64> = (0..map_size).map(|_| AtomicU64::new(0)).collect();
33
34        AtomicBitmap {
35            map,
36            size: num_pages,
37            byte_size,
38            page_size,
39        }
40    }
41
42    /// Enlarge this bitmap with enough bits to track `additional_size` additional bytes at page granularity.
43    /// New bits are initialized to zero.
44    pub fn enlarge(&mut self, additional_size: usize) {
45        self.byte_size += additional_size;
46        self.size = self.byte_size.div_ceil(self.page_size.get());
47        let map_size = self.size.div_ceil(u64::BITS as usize);
48        self.map.resize_with(map_size, Default::default);
49    }
50
51    /// Is bit `n` set? Bits outside the range of the bitmap are always unset.
52    pub fn is_bit_set(&self, index: usize) -> bool {
53        if index < self.size {
54            (self.map[index >> 6].load(Ordering::Acquire) & (1 << (index & 63))) != 0
55        } else {
56            // Out-of-range bits are always unset.
57            false
58        }
59    }
60
61    /// Is the bit corresponding to address `addr` set?
62    pub fn is_addr_set(&self, addr: usize) -> bool {
63        self.is_bit_set(addr / self.page_size)
64    }
65
66    /// Set a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap
67    /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
68    /// to address `start_addr + len - 1`.
69    pub fn set_addr_range(&self, start_addr: usize, len: usize) {
70        self.set_reset_addr_range(start_addr, len, true);
71    }
72
73    // Set/Reset a range of `len` bytes starting at `start_addr`
74    // reset parameter determines whether bit will be set/reset
75    // if set is true then the range of bits will be set to one,
76    // otherwise zero
77    fn set_reset_addr_range(&self, start_addr: usize, len: usize, set: bool) {
78        // Return early in the unlikely event that `len == 0` so the `len - 1` computation
79        // below does not underflow.
80        if len == 0 {
81            return;
82        }
83
84        let first_bit = start_addr / self.page_size;
85        // Handle input ranges where `start_addr + len - 1` would otherwise overflow an `usize`
86        // by ignoring pages at invalid addresses.
87        let last_bit = start_addr.saturating_add(len - 1) / self.page_size;
88        for n in first_bit..=last_bit {
89            if n >= self.size {
90                // Attempts to set bits beyond the end of the bitmap are simply ignored.
91                break;
92            }
93            if set {
94                self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
95            } else {
96                self.map[n >> 6].fetch_and(!(1 << (n & 63)), Ordering::SeqCst);
97            }
98        }
99    }
100
101    /// Reset a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap
102    /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
103    /// to address `start_addr + len - 1`.
104    pub fn reset_addr_range(&self, start_addr: usize, len: usize) {
105        self.set_reset_addr_range(start_addr, len, false);
106    }
107
108    /// Set bit to corresponding index
109    pub fn set_bit(&self, index: usize) {
110        if index >= self.size {
111            // Attempts to set bits beyond the end of the bitmap are simply ignored.
112            return;
113        }
114        self.map[index >> 6].fetch_or(1 << (index & 63), Ordering::SeqCst);
115    }
116
117    /// Reset bit to corresponding index
118    pub fn reset_bit(&self, index: usize) {
119        if index >= self.size {
120            // Attempts to reset bits beyond the end of the bitmap are simply ignored.
121            return;
122        }
123        self.map[index >> 6].fetch_and(!(1 << (index & 63)), Ordering::SeqCst);
124    }
125
126    /// Get the length of the bitmap in bits (i.e. in how many pages it can represent).
127    pub fn len(&self) -> usize {
128        self.size
129    }
130
131    /// Get the size in bytes i.e how many bytes the bitmap can represent, one bit per page.
132    pub fn byte_size(&self) -> usize {
133        self.byte_size
134    }
135
136    /// Atomically get and reset the dirty page bitmap.
137    pub fn get_and_reset(&self) -> Vec<u64> {
138        self.map
139            .iter()
140            .map(|u| u.fetch_and(0, Ordering::SeqCst))
141            .collect()
142    }
143
144    /// Reset all bitmap bits to 0.
145    pub fn reset(&self) {
146        for it in self.map.iter() {
147            it.store(0, Ordering::Release);
148        }
149    }
150}
151
152impl Clone for AtomicBitmap {
153    fn clone(&self) -> Self {
154        let map = self
155            .map
156            .iter()
157            .map(|i| i.load(Ordering::Acquire))
158            .map(AtomicU64::new)
159            .collect();
160        AtomicBitmap {
161            map,
162            size: self.size,
163            byte_size: self.byte_size,
164            page_size: self.page_size,
165        }
166    }
167}
168
169impl<'a> WithBitmapSlice<'a> for AtomicBitmap {
170    type S = RefSlice<'a, Self>;
171}
172
173impl Bitmap for AtomicBitmap {
174    fn mark_dirty(&self, offset: usize, len: usize) {
175        self.set_addr_range(offset, len)
176    }
177
178    fn dirty_at(&self, offset: usize) -> bool {
179        self.is_addr_set(offset)
180    }
181
182    fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S {
183        RefSlice::new(self, offset)
184    }
185}
186
187impl Default for AtomicBitmap {
188    fn default() -> Self {
189        // SAFETY: Safe as `0x1000` is non-zero.
190        AtomicBitmap::new(0, unsafe { NonZeroUsize::new_unchecked(0x1000) })
191    }
192}
193
194#[cfg(feature = "backend-mmap")]
195impl NewBitmap for AtomicBitmap {
196    fn with_len(len: usize) -> Self {
197        #[cfg(unix)]
198        // SAFETY: There's no unsafe potential in calling this function.
199        let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
200
201        #[cfg(windows)]
202        let page_size = {
203            use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO};
204            let mut sysinfo = MaybeUninit::zeroed();
205            // SAFETY: It's safe to call `GetSystemInfo` as `sysinfo` is rightly sized
206            // allocated memory.
207            unsafe { GetSystemInfo(sysinfo.as_mut_ptr()) };
208            // SAFETY: It's safe to call `assume_init` as `GetSystemInfo` initializes `sysinfo`.
209            unsafe { sysinfo.assume_init().dwPageSize }
210        };
211
212        // The `unwrap` is safe to use because the above call should always succeed on the
213        // supported platforms, and the size of a page will always fit within a `usize`.
214        AtomicBitmap::new(
215            len,
216            NonZeroUsize::try_from(usize::try_from(page_size).unwrap()).unwrap(),
217        )
218    }
219}
220
221#[cfg(test)]
222mod tests {
223    use super::*;
224
225    use crate::bitmap::tests::test_bitmap;
226
227    #[allow(clippy::undocumented_unsafe_blocks)]
228    const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(128) };
229
230    #[test]
231    fn test_bitmap_basic() {
232        // Test that bitmap size is properly rounded up.
233        let a = AtomicBitmap::new(1025, DEFAULT_PAGE_SIZE);
234        assert_eq!(a.len(), 9);
235
236        let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
237        assert_eq!(b.len(), 8);
238        b.set_addr_range(128, 129);
239        assert!(!b.is_addr_set(0));
240        assert!(b.is_addr_set(128));
241        assert!(b.is_addr_set(256));
242        assert!(!b.is_addr_set(384));
243
244        #[allow(clippy::redundant_clone)]
245        let copy_b = b.clone();
246        assert!(copy_b.is_addr_set(256));
247        assert!(!copy_b.is_addr_set(384));
248
249        b.reset();
250        assert!(!b.is_addr_set(128));
251        assert!(!b.is_addr_set(256));
252        assert!(!b.is_addr_set(384));
253
254        b.set_addr_range(128, 129);
255        let v = b.get_and_reset();
256
257        assert!(!b.is_addr_set(128));
258        assert!(!b.is_addr_set(256));
259        assert!(!b.is_addr_set(384));
260
261        assert_eq!(v.len(), 1);
262        assert_eq!(v[0], 0b110);
263    }
264
265    #[test]
266    fn test_bitmap_reset() {
267        let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
268        assert_eq!(b.len(), 8);
269        b.set_addr_range(128, 129);
270        assert!(!b.is_addr_set(0));
271        assert!(b.is_addr_set(128));
272        assert!(b.is_addr_set(256));
273        assert!(!b.is_addr_set(384));
274
275        b.reset_addr_range(128, 129);
276        assert!(!b.is_addr_set(0));
277        assert!(!b.is_addr_set(128));
278        assert!(!b.is_addr_set(256));
279        assert!(!b.is_addr_set(384));
280    }
281
282    #[test]
283    fn test_bitmap_out_of_range() {
284        let b = AtomicBitmap::new(1024, NonZeroUsize::MIN);
285        // Set a partial range that goes beyond the end of the bitmap
286        b.set_addr_range(768, 512);
287        assert!(b.is_addr_set(768));
288        // The bitmap is never set beyond its end.
289        assert!(!b.is_addr_set(1024));
290        assert!(!b.is_addr_set(1152));
291    }
292
293    #[test]
294    fn test_bitmap_impl() {
295        let b = AtomicBitmap::new(0x800, DEFAULT_PAGE_SIZE);
296        test_bitmap(&b);
297    }
298
299    #[test]
300    fn test_bitmap_enlarge() {
301        let mut b = AtomicBitmap::new(8 * 1024, DEFAULT_PAGE_SIZE);
302        assert_eq!(b.len(), 64);
303        b.set_addr_range(128, 129);
304        assert!(!b.is_addr_set(0));
305        assert!(b.is_addr_set(128));
306        assert!(b.is_addr_set(256));
307        assert!(!b.is_addr_set(384));
308
309        b.reset_addr_range(128, 129);
310        assert!(!b.is_addr_set(0));
311        assert!(!b.is_addr_set(128));
312        assert!(!b.is_addr_set(256));
313        assert!(!b.is_addr_set(384));
314        b.set_addr_range(128, 129);
315        b.enlarge(8 * 1024);
316        for i in 65..128 {
317            assert!(!b.is_bit_set(i));
318        }
319        assert_eq!(b.len(), 128);
320        assert!(!b.is_addr_set(0));
321        assert!(b.is_addr_set(128));
322        assert!(b.is_addr_set(256));
323        assert!(!b.is_addr_set(384));
324
325        b.set_bit(55);
326        assert!(b.is_bit_set(55));
327        for i in 65..128 {
328            b.set_bit(i);
329        }
330        for i in 65..128 {
331            assert!(b.is_bit_set(i));
332        }
333        b.reset_addr_range(0, 16 * 1024);
334        for i in 0..128 {
335            assert!(!b.is_bit_set(i));
336        }
337    }
338}