vm_memory/bitmap/backend/
atomic_bitmap.rs

1// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4//! Bitmap backend implementation based on atomic integers.
5
6use std::num::NonZeroUsize;
7use std::sync::atomic::{AtomicU64, Ordering};
8
9use crate::bitmap::{Bitmap, NewBitmap, RefSlice, WithBitmapSlice};
10
11/// `AtomicBitmap` implements a simple bit map on the page level with test and set operations.
12/// It is page-size aware, so it converts addresses to page numbers before setting or clearing
13/// the bits.
14#[derive(Debug)]
15pub struct AtomicBitmap {
16    map: Vec<AtomicU64>,
17    size: usize,
18    byte_size: usize,
19    page_size: NonZeroUsize,
20}
21
22#[allow(clippy::len_without_is_empty)]
23impl AtomicBitmap {
24    /// Create a new bitmap of `byte_size`, with one bit per page. This is effectively
25    /// rounded up, and we get a new vector of the next multiple of 64 bigger than `bit_size`.
26    pub fn new(byte_size: usize, page_size: NonZeroUsize) -> Self {
27        let num_pages = byte_size.div_ceil(page_size.get());
28        let map_size = num_pages.div_ceil(u64::BITS as usize);
29        let map: Vec<AtomicU64> = (0..map_size).map(|_| AtomicU64::new(0)).collect();
30
31        AtomicBitmap {
32            map,
33            size: num_pages,
34            byte_size,
35            page_size,
36        }
37    }
38
39    /// Enlarge this bitmap with enough bits to track `additional_size` additional bytes at page granularity.
40    /// New bits are initialized to zero.
41    pub fn enlarge(&mut self, additional_size: usize) {
42        self.byte_size += additional_size;
43        self.size = self.byte_size.div_ceil(self.page_size.get());
44        let map_size = self.size.div_ceil(u64::BITS as usize);
45        self.map.resize_with(map_size, Default::default);
46    }
47
48    /// Is bit `n` set? Bits outside the range of the bitmap are always unset.
49    pub fn is_bit_set(&self, index: usize) -> bool {
50        if index < self.size {
51            (self.map[index >> 6].load(Ordering::Acquire) & (1 << (index & 63))) != 0
52        } else {
53            // Out-of-range bits are always unset.
54            false
55        }
56    }
57
58    /// Is the bit corresponding to address `addr` set?
59    pub fn is_addr_set(&self, addr: usize) -> bool {
60        self.is_bit_set(addr / self.page_size)
61    }
62
63    /// Set a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap
64    /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
65    /// to address `start_addr + len - 1`.
66    pub fn set_addr_range(&self, start_addr: usize, len: usize) {
67        self.set_reset_addr_range(start_addr, len, true);
68    }
69
70    // Set/Reset a range of `len` bytes starting at `start_addr`
71    // reset parameter determines whether bit will be set/reset
72    // if set is true then the range of bits will be set to one,
73    // otherwise zero
74    fn set_reset_addr_range(&self, start_addr: usize, len: usize, set: bool) {
75        // Return early in the unlikely event that `len == 0` so the `len - 1` computation
76        // below does not underflow.
77        if len == 0 {
78            return;
79        }
80
81        let first_bit = start_addr / self.page_size;
82        // Handle input ranges where `start_addr + len - 1` would otherwise overflow an `usize`
83        // by ignoring pages at invalid addresses.
84        let last_bit = start_addr.saturating_add(len - 1) / self.page_size;
85        for n in first_bit..=last_bit {
86            if n >= self.size {
87                // Attempts to set bits beyond the end of the bitmap are simply ignored.
88                break;
89            }
90            if set {
91                self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
92            } else {
93                self.map[n >> 6].fetch_and(!(1 << (n & 63)), Ordering::SeqCst);
94            }
95        }
96    }
97
98    /// Reset a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap
99    /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
100    /// to address `start_addr + len - 1`.
101    pub fn reset_addr_range(&self, start_addr: usize, len: usize) {
102        self.set_reset_addr_range(start_addr, len, false);
103    }
104
105    /// Set bit to corresponding index
106    pub fn set_bit(&self, index: usize) {
107        if index >= self.size {
108            // Attempts to set bits beyond the end of the bitmap are simply ignored.
109            return;
110        }
111        self.map[index >> 6].fetch_or(1 << (index & 63), Ordering::SeqCst);
112    }
113
114    /// Reset bit to corresponding index
115    pub fn reset_bit(&self, index: usize) {
116        if index >= self.size {
117            // Attempts to reset bits beyond the end of the bitmap are simply ignored.
118            return;
119        }
120        self.map[index >> 6].fetch_and(!(1 << (index & 63)), Ordering::SeqCst);
121    }
122
123    /// Get the length of the bitmap in bits (i.e. in how many pages it can represent).
124    pub fn len(&self) -> usize {
125        self.size
126    }
127
128    /// Get the size in bytes i.e how many bytes the bitmap can represent, one bit per page.
129    pub fn byte_size(&self) -> usize {
130        self.byte_size
131    }
132
133    /// Atomically get and reset the dirty page bitmap.
134    pub fn get_and_reset(&self) -> Vec<u64> {
135        self.map
136            .iter()
137            .map(|u| u.fetch_and(0, Ordering::SeqCst))
138            .collect()
139    }
140
141    /// Reset all bitmap bits to 0.
142    pub fn reset(&self) {
143        for it in self.map.iter() {
144            it.store(0, Ordering::Release);
145        }
146    }
147}
148
149impl Clone for AtomicBitmap {
150    fn clone(&self) -> Self {
151        let map = self
152            .map
153            .iter()
154            .map(|i| i.load(Ordering::Acquire))
155            .map(AtomicU64::new)
156            .collect();
157        AtomicBitmap {
158            map,
159            size: self.size,
160            byte_size: self.byte_size,
161            page_size: self.page_size,
162        }
163    }
164}
165
166impl<'a> WithBitmapSlice<'a> for AtomicBitmap {
167    type S = RefSlice<'a, Self>;
168}
169
170impl Bitmap for AtomicBitmap {
171    fn mark_dirty(&self, offset: usize, len: usize) {
172        self.set_addr_range(offset, len)
173    }
174
175    fn dirty_at(&self, offset: usize) -> bool {
176        self.is_addr_set(offset)
177    }
178
179    fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice<'_>>::S {
180        RefSlice::new(self, offset)
181    }
182}
183
184impl Default for AtomicBitmap {
185    fn default() -> Self {
186        // SAFETY: Safe as `0x1000` is non-zero.
187        AtomicBitmap::new(0, const { NonZeroUsize::new(0x1000).unwrap() })
188    }
189}
190
191impl NewBitmap for AtomicBitmap {
192    fn with_len(len: usize) -> Self {
193        #[cfg(target_family = "unix")]
194        // SAFETY: There's no unsafe potential in calling this function.
195        let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
196
197        #[cfg(target_family = "windows")]
198        let page_size = {
199            use winapi::um::sysinfoapi::GetSystemInfo;
200            let mut sysinfo = std::mem::MaybeUninit::zeroed();
201            // SAFETY: It's safe to call `GetSystemInfo` as `sysinfo` is rightly sized
202            // allocated memory.
203            unsafe { GetSystemInfo(sysinfo.as_mut_ptr()) };
204            // SAFETY: It's safe to call `assume_init` as `GetSystemInfo` initializes `sysinfo`.
205            unsafe { sysinfo.assume_init().dwPageSize }
206        };
207
208        // The `unwrap` is safe to use because the above call should always succeed on the
209        // supported platforms, and the size of a page will always fit within a `usize`.
210        AtomicBitmap::new(
211            len,
212            NonZeroUsize::try_from(usize::try_from(page_size).unwrap()).unwrap(),
213        )
214    }
215}
216
217#[cfg(test)]
218mod tests {
219    use super::*;
220
221    use crate::bitmap::tests::test_bitmap;
222
223    #[allow(clippy::undocumented_unsafe_blocks)]
224    const DEFAULT_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(128).unwrap();
225
226    #[test]
227    fn test_bitmap_basic() {
228        // Test that bitmap size is properly rounded up.
229        let a = AtomicBitmap::new(1025, DEFAULT_PAGE_SIZE);
230        assert_eq!(a.len(), 9);
231
232        let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
233        assert_eq!(b.len(), 8);
234        b.set_addr_range(128, 129);
235        assert!(!b.is_addr_set(0));
236        assert!(b.is_addr_set(128));
237        assert!(b.is_addr_set(256));
238        assert!(!b.is_addr_set(384));
239
240        #[allow(clippy::redundant_clone)]
241        let copy_b = b.clone();
242        assert!(copy_b.is_addr_set(256));
243        assert!(!copy_b.is_addr_set(384));
244
245        b.reset();
246        assert!(!b.is_addr_set(128));
247        assert!(!b.is_addr_set(256));
248        assert!(!b.is_addr_set(384));
249
250        b.set_addr_range(128, 129);
251        let v = b.get_and_reset();
252
253        assert!(!b.is_addr_set(128));
254        assert!(!b.is_addr_set(256));
255        assert!(!b.is_addr_set(384));
256
257        assert_eq!(v.len(), 1);
258        assert_eq!(v[0], 0b110);
259    }
260
261    #[test]
262    fn test_bitmap_reset() {
263        let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
264        assert_eq!(b.len(), 8);
265        b.set_addr_range(128, 129);
266        assert!(!b.is_addr_set(0));
267        assert!(b.is_addr_set(128));
268        assert!(b.is_addr_set(256));
269        assert!(!b.is_addr_set(384));
270
271        b.reset_addr_range(128, 129);
272        assert!(!b.is_addr_set(0));
273        assert!(!b.is_addr_set(128));
274        assert!(!b.is_addr_set(256));
275        assert!(!b.is_addr_set(384));
276    }
277
278    #[test]
279    fn test_bitmap_out_of_range() {
280        let b = AtomicBitmap::new(1024, NonZeroUsize::MIN);
281        // Set a partial range that goes beyond the end of the bitmap
282        b.set_addr_range(768, 512);
283        assert!(b.is_addr_set(768));
284        // The bitmap is never set beyond its end.
285        assert!(!b.is_addr_set(1024));
286        assert!(!b.is_addr_set(1152));
287    }
288
289    #[test]
290    fn test_bitmap_impl() {
291        let b = AtomicBitmap::new(0x800, DEFAULT_PAGE_SIZE);
292        test_bitmap(&b);
293    }
294
295    #[test]
296    fn test_bitmap_enlarge() {
297        let mut b = AtomicBitmap::new(8 * 1024, DEFAULT_PAGE_SIZE);
298        assert_eq!(b.len(), 64);
299        b.set_addr_range(128, 129);
300        assert!(!b.is_addr_set(0));
301        assert!(b.is_addr_set(128));
302        assert!(b.is_addr_set(256));
303        assert!(!b.is_addr_set(384));
304
305        b.reset_addr_range(128, 129);
306        assert!(!b.is_addr_set(0));
307        assert!(!b.is_addr_set(128));
308        assert!(!b.is_addr_set(256));
309        assert!(!b.is_addr_set(384));
310        b.set_addr_range(128, 129);
311        b.enlarge(8 * 1024);
312        for i in 65..128 {
313            assert!(!b.is_bit_set(i));
314        }
315        assert_eq!(b.len(), 128);
316        assert!(!b.is_addr_set(0));
317        assert!(b.is_addr_set(128));
318        assert!(b.is_addr_set(256));
319        assert!(!b.is_addr_set(384));
320
321        b.set_bit(55);
322        assert!(b.is_bit_set(55));
323        for i in 65..128 {
324            b.set_bit(i);
325        }
326        for i in 65..128 {
327            assert!(b.is_bit_set(i));
328        }
329        b.reset_addr_range(0, 16 * 1024);
330        for i in 0..128 {
331            assert!(!b.is_bit_set(i));
332        }
333    }
334}