vm_memory/bitmap/
mod.rs

1// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4//! This module holds abstractions that enable tracking the areas dirtied by writes of a specified
5//! length to a given offset. In particular, this is used to track write accesses within a
6//! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the
7//! global view for an entire `GuestMemoryBackend` object.
8
9#[cfg(feature = "backend-bitmap")]
10mod backend;
11
12use std::fmt::Debug;
13
14use crate::{GuestMemoryBackend, GuestMemoryRegion};
15
16#[cfg(feature = "backend-bitmap")]
17pub use backend::{ArcSlice, AtomicBitmap, RefSlice};
18
19/// Trait implemented by types that support creating `BitmapSlice` objects.
20pub trait WithBitmapSlice<'a> {
21    /// Type of the bitmap slice.
22    type S: BitmapSlice;
23}
24
25/// Trait used to represent that a `BitmapSlice` is a `Bitmap` itself, but also satisfies the
26/// restriction that slices created from it have the same type as `Self`.
27pub trait BitmapSlice: Bitmap + Clone + Debug + for<'a> WithBitmapSlice<'a, S = Self> {}
28
29/// Common bitmap operations. Using Higher-Rank Trait Bounds (HRTBs) to effectively define
30/// an associated type that has a lifetime parameter, without tagging the `Bitmap` trait with
31/// a lifetime as well.
32///
33/// Using an associated type allows implementing the `Bitmap` and `BitmapSlice` functionality
34/// as a zero-cost abstraction when providing trivial implementations such as the one
35/// defined for `()`.
36// These methods represent the core functionality that's required by `vm-memory` abstractions
37// to implement generic tracking logic, as well as tests that can be reused by different backends.
38pub trait Bitmap: for<'a> WithBitmapSlice<'a> {
39    /// Mark the memory range specified by the given `offset` and `len` as dirtied.
40    fn mark_dirty(&self, offset: usize, len: usize);
41
42    /// Check whether the specified `offset` is marked as dirty.
43    fn dirty_at(&self, offset: usize) -> bool;
44
45    /// Return a `<Self as WithBitmapSlice>::S` slice of the current bitmap, starting at
46    /// the specified `offset`.
47    fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice<'_>>::S;
48}
49
50/// A `Bitmap` that can be created starting from an initial size.
51// Cannot be a part of the Bitmap trait itself because it cannot be implemented for BaseSlice
52pub trait NewBitmap: Bitmap + Default {
53    /// Create a new object based on the specified length in bytes.
54    fn with_len(len: usize) -> Self;
55}
56
57/// A no-op `Bitmap` implementation that can be provided for backends that do not actually
58/// require the tracking functionality.
59impl WithBitmapSlice<'_> for () {
60    type S = Self;
61}
62
63impl BitmapSlice for () {}
64
65impl Bitmap for () {
66    fn mark_dirty(&self, _offset: usize, _len: usize) {}
67
68    fn dirty_at(&self, _offset: usize) -> bool {
69        false
70    }
71
72    fn slice_at(&self, _offset: usize) -> Self {}
73}
74
75impl NewBitmap for () {
76    fn with_len(_len: usize) -> Self {}
77}
78
79/// A `Bitmap` and `BitmapSlice` implementation for `Option<B>`.
80impl<'a, B> WithBitmapSlice<'a> for Option<B>
81where
82    B: WithBitmapSlice<'a>,
83{
84    type S = Option<B::S>;
85}
86
87impl<B: BitmapSlice> BitmapSlice for Option<B> {}
88
89impl<B: Bitmap> Bitmap for Option<B> {
90    fn mark_dirty(&self, offset: usize, len: usize) {
91        if let Some(inner) = self {
92            inner.mark_dirty(offset, len)
93        }
94    }
95
96    fn dirty_at(&self, offset: usize) -> bool {
97        if let Some(inner) = self {
98            return inner.dirty_at(offset);
99        }
100        false
101    }
102
103    fn slice_at(&self, offset: usize) -> Option<<B as WithBitmapSlice<'_>>::S> {
104        if let Some(inner) = self {
105            return Some(inner.slice_at(offset));
106        }
107        None
108    }
109}
110
111/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
112/// an object `B: WithBitmapSlice<'a>`.
113pub type BS<'a, B> = <B as WithBitmapSlice<'a>>::S;
114
115/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
116/// the memory regions of an object `M: GuestMemoryBackend`.
117pub type MS<'a, M> = BS<'a, <<M as GuestMemoryBackend>::R as GuestMemoryRegion>::B>;
118
119#[cfg(test)]
120#[cfg(feature = "backend-bitmap")]
121pub(crate) mod tests {
122    use super::*;
123
124    use std::mem::size_of_val;
125    use std::sync::atomic::Ordering;
126
127    use crate::{Bytes, VolatileMemory};
128    #[cfg(feature = "backend-mmap")]
129    use crate::{GuestAddress, MemoryRegionAddress};
130
131    // Helper method to check whether a specified range is clean.
132    pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
133        (start..start + len).all(|offset| !b.dirty_at(offset))
134    }
135
136    // Helper method to check whether a specified range is dirty.
137    pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
138        (start..start + len).all(|offset| b.dirty_at(offset))
139    }
140
141    pub fn check_range<B: Bitmap>(b: &B, start: usize, len: usize, clean: bool) -> bool {
142        if clean {
143            range_is_clean(b, start, len)
144        } else {
145            range_is_dirty(b, start, len)
146        }
147    }
148
149    // Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
150    // an area of length at least 0x800.
151    pub fn test_bitmap<B: Bitmap>(b: &B) {
152        let len = 0x800;
153        let dirty_offset = 0x400;
154        let dirty_len = 0x100;
155
156        // Some basic checks.
157        let s = b.slice_at(dirty_offset);
158
159        assert!(range_is_clean(b, 0, len));
160        assert!(range_is_clean(&s, 0, dirty_len));
161
162        b.mark_dirty(dirty_offset, dirty_len);
163        assert!(range_is_dirty(b, dirty_offset, dirty_len));
164        assert!(range_is_dirty(&s, 0, dirty_len));
165    }
166
167    // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment.
168    // The `step` parameter represents the offset that's added the the current address after
169    // performing each access. It provides finer grained control when testing tracking
170    // implementations that aggregate entire ranges for accounting purposes (for example, doing
171    // tracking at the page level).
172    pub fn test_bytes<F, G, M, A>(bytes: &M, check_range_fn: F, address_fn: G, step: usize)
173    where
174        F: Fn(&M, usize, usize, bool) -> bool,
175        G: Fn(usize) -> A,
176        M: Bytes<A>,
177        <M as Bytes<A>>::E: Debug,
178    {
179        const BUF_SIZE: usize = 1024;
180        let buf = vec![1u8; 1024];
181        let mut dirty_offset = 0x1000;
182
183        let val = 1u64;
184
185        // Test `write`.
186        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true));
187        assert_eq!(
188            bytes
189                .write(buf.as_slice(), address_fn(dirty_offset))
190                .unwrap(),
191            BUF_SIZE
192        );
193        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false));
194
195        // Test `write_slice`.
196        dirty_offset += step;
197        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true));
198        bytes
199            .write_slice(buf.as_slice(), address_fn(dirty_offset))
200            .unwrap();
201        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false));
202
203        // Test `write_obj`.
204        dirty_offset += step;
205        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true));
206        bytes.write_obj(val, address_fn(dirty_offset)).unwrap();
207        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false));
208
209        // Test `store`.
210        dirty_offset += step;
211        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true));
212        bytes
213            .store(val, address_fn(dirty_offset), Ordering::Relaxed)
214            .unwrap();
215        assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false));
216    }
217
218    // This function and the next are currently conditionally compiled because we only use
219    // them to test the mmap-based backend implementations for now. Going forward, the generic
220    // test functions defined here can be placed in a separate module (i.e. `test_utilities`)
221    // which is gated by a feature and can be used for testing purposes by other crates as well.
222    #[cfg(feature = "backend-mmap")]
223    fn test_guest_memory_region<R: GuestMemoryRegion>(region: &R) {
224        let dirty_addr = MemoryRegionAddress(0x0);
225        let val = 123u64;
226        let dirty_len = size_of_val(&val);
227
228        let slice = region.get_slice(dirty_addr, dirty_len).unwrap();
229
230        assert!(range_is_clean(&region.bitmap(), 0, region.len() as usize));
231        assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
232
233        region.write_obj(val, dirty_addr).unwrap();
234
235        assert!(range_is_dirty(
236            &region.bitmap(),
237            dirty_addr.0 as usize,
238            dirty_len
239        ));
240
241        assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
242
243        // Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same
244        // `region` handle because `test_bytes` starts performing writes after the range that's
245        // been already dirtied in the first part of this test.
246        test_bytes(
247            region,
248            |r: &R, start: usize, len: usize, clean: bool| {
249                check_range(&r.bitmap(), start, len, clean)
250            },
251            |offset| MemoryRegionAddress(offset as u64),
252            0x1000,
253        );
254    }
255
256    #[cfg(feature = "backend-mmap")]
257    // Assumptions about M generated by f ...
258    pub fn test_guest_memory_and_region<M, F>(f: F)
259    where
260        M: GuestMemoryBackend,
261        F: Fn() -> M,
262    {
263        let m = f();
264        let dirty_addr = GuestAddress(0x1000);
265        let val = 123u64;
266        let dirty_len = size_of_val(&val);
267
268        let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap();
269        let mut slices = m.get_slices(dirty_addr, dirty_len);
270        let slice = slices.next().unwrap().unwrap();
271        assert!(slices.next().is_none());
272
273        assert!(range_is_clean(&region.bitmap(), 0, region.len() as usize));
274        assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
275
276        m.write_obj(val, dirty_addr).unwrap();
277
278        assert!(range_is_dirty(
279            &region.bitmap(),
280            region_addr.0 as usize,
281            dirty_len
282        ));
283
284        assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
285
286        // Now let's invoke the tests for the inner `GuestMemoryRegion` type.
287        test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap());
288
289        // Finally, let's invoke the generic tests for `Bytes`.
290        let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool {
291            m.get_slices(GuestAddress(start as u64), len).all(|r| {
292                let slice = r.unwrap();
293                check_range(slice.bitmap(), 0, slice.len(), clean)
294            })
295        };
296
297        test_bytes(
298            &f(),
299            check_range_closure,
300            |offset| GuestAddress(offset as u64),
301            0x1000,
302        );
303    }
304
305    pub fn test_volatile_memory<M: VolatileMemory>(m: &M) {
306        assert!(m.len() >= 0x8000);
307
308        let dirty_offset = 0x1000;
309        let val = 123u64;
310        let dirty_len = size_of_val(&val);
311
312        let get_ref_offset = 0x2000;
313        let array_ref_offset = 0x3000;
314
315        let s1 = m.as_volatile_slice();
316        let s2 = m.get_slice(dirty_offset, dirty_len).unwrap();
317
318        assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
319        assert!(range_is_clean(s2.bitmap(), 0, s2.len()));
320
321        s1.write_obj(val, dirty_offset).unwrap();
322
323        assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len));
324        assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
325
326        let v_ref = m.get_ref::<u64>(get_ref_offset).unwrap();
327        assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len));
328        v_ref.store(val);
329        assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len));
330
331        let arr_ref = m.get_array_ref::<u64>(array_ref_offset, 1).unwrap();
332        assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len));
333        arr_ref.store(0, val);
334        assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len));
335    }
336}