vm_memory/bitmap/
mod.rs

1// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4//! This module holds abstractions that enable tracking the areas dirtied by writes of a specified
5//! length to a given offset. In particular, this is used to track write accesses within a
6//! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the
7//! global view for an entire `GuestMemory` object.
8
9#[cfg(any(test, feature = "backend-bitmap"))]
10mod backend;
11
12use std::fmt::Debug;
13
14use crate::{GuestMemory, GuestMemoryRegion};
15
16#[cfg(any(test, feature = "backend-bitmap"))]
17pub use backend::{ArcSlice, AtomicBitmap, RefSlice};
18
19/// Trait implemented by types that support creating `BitmapSlice` objects.
20pub trait WithBitmapSlice<'a> {
21    /// Type of the bitmap slice.
22    type S: BitmapSlice;
23}
24
25/// Trait used to represent that a `BitmapSlice` is a `Bitmap` itself, but also satisfies the
26/// restriction that slices created from it have the same type as `Self`.
27pub trait BitmapSlice: Bitmap + Clone + Debug + for<'a> WithBitmapSlice<'a, S = Self> {}
28
29/// Common bitmap operations. Using Higher-Rank Trait Bounds (HRTBs) to effectively define
30/// an associated type that has a lifetime parameter, without tagging the `Bitmap` trait with
31/// a lifetime as well.
32///
33/// Using an associated type allows implementing the `Bitmap` and `BitmapSlice` functionality
34/// as a zero-cost abstraction when providing trivial implementations such as the one
35/// defined for `()`.
36// These methods represent the core functionality that's required by `vm-memory` abstractions
37// to implement generic tracking logic, as well as tests that can be reused by different backends.
38pub trait Bitmap: for<'a> WithBitmapSlice<'a> {
39    /// Mark the memory range specified by the given `offset` and `len` as dirtied.
40    fn mark_dirty(&self, offset: usize, len: usize);
41
42    /// Check whether the specified `offset` is marked as dirty.
43    fn dirty_at(&self, offset: usize) -> bool;
44
45    /// Return a `<Self as WithBitmapSlice>::S` slice of the current bitmap, starting at
46    /// the specified `offset`.
47    fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S;
48}
49
50/// A no-op `Bitmap` implementation that can be provided for backends that do not actually
51/// require the tracking functionality.
52impl WithBitmapSlice<'_> for () {
53    type S = Self;
54}
55
56impl BitmapSlice for () {}
57
58impl Bitmap for () {
59    fn mark_dirty(&self, _offset: usize, _len: usize) {}
60
61    fn dirty_at(&self, _offset: usize) -> bool {
62        false
63    }
64
65    fn slice_at(&self, _offset: usize) -> Self {}
66}
67
68/// A `Bitmap` and `BitmapSlice` implementation for `Option<B>`.
69impl<'a, B> WithBitmapSlice<'a> for Option<B>
70where
71    B: WithBitmapSlice<'a>,
72{
73    type S = Option<B::S>;
74}
75
76impl<B: BitmapSlice> BitmapSlice for Option<B> {}
77
78impl<B: Bitmap> Bitmap for Option<B> {
79    fn mark_dirty(&self, offset: usize, len: usize) {
80        if let Some(inner) = self {
81            inner.mark_dirty(offset, len)
82        }
83    }
84
85    fn dirty_at(&self, offset: usize) -> bool {
86        if let Some(inner) = self {
87            return inner.dirty_at(offset);
88        }
89        false
90    }
91
92    fn slice_at(&self, offset: usize) -> Option<<B as WithBitmapSlice>::S> {
93        if let Some(inner) = self {
94            return Some(inner.slice_at(offset));
95        }
96        None
97    }
98}
99
100/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
101/// an object `B: WithBitmapSlice<'a>`.
102pub type BS<'a, B> = <B as WithBitmapSlice<'a>>::S;
103
104/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
105/// the memory regions of an object `M: GuestMemory`.
106pub type MS<'a, M> = BS<'a, <<M as GuestMemory>::R as GuestMemoryRegion>::B>;
107
108#[cfg(test)]
109pub(crate) mod tests {
110    use super::*;
111
112    use std::io::Cursor;
113    use std::marker::PhantomData;
114    use std::mem::size_of_val;
115    use std::result::Result;
116    use std::sync::atomic::Ordering;
117
118    use crate::{Bytes, VolatileMemory};
119    #[cfg(feature = "backend-mmap")]
120    use crate::{GuestAddress, MemoryRegionAddress};
121
122    // Helper method to check whether a specified range is clean.
123    pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
124        (start..start + len).all(|offset| !b.dirty_at(offset))
125    }
126
127    // Helper method to check whether a specified range is dirty.
128    pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
129        (start..start + len).all(|offset| b.dirty_at(offset))
130    }
131
132    pub fn check_range<B: Bitmap>(b: &B, start: usize, len: usize, clean: bool) -> bool {
133        if clean {
134            range_is_clean(b, start, len)
135        } else {
136            range_is_dirty(b, start, len)
137        }
138    }
139
140    // Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
141    // an area of length at least 0x800.
142    pub fn test_bitmap<B: Bitmap>(b: &B) {
143        let len = 0x800;
144        let dirty_offset = 0x400;
145        let dirty_len = 0x100;
146
147        // Some basic checks.
148        let s = b.slice_at(dirty_offset);
149
150        assert!(range_is_clean(b, 0, len));
151        assert!(range_is_clean(&s, 0, dirty_len));
152
153        b.mark_dirty(dirty_offset, dirty_len);
154        assert!(range_is_dirty(b, dirty_offset, dirty_len));
155        assert!(range_is_dirty(&s, 0, dirty_len));
156    }
157
158    #[derive(Debug)]
159    pub enum TestAccessError {
160        RangeCleanCheck,
161        RangeDirtyCheck,
162    }
163
164    // A helper object that implements auxiliary operations for testing `Bytes` implementations
165    // in the context of dirty bitmap tracking.
166    struct BytesHelper<F, G, M> {
167        check_range_fn: F,
168        address_fn: G,
169        phantom: PhantomData<*const M>,
170    }
171
172    // `F` represents a closure the checks whether a specified range associated with the `Bytes`
173    // object that's being tested is marked as dirty or not (depending on the value of the last
174    // parameter). It has the following parameters:
175    // - A reference to a `Bytes` implementations that's subject to testing.
176    // - The offset of the range.
177    // - The length of the range.
178    // - Whether we are checking if the range is clean (when `true`) or marked as dirty.
179    //
180    // `G` represents a closure that translates an offset into an address value that's
181    // relevant for the `Bytes` implementation being tested.
182    impl<F, G, M, A> BytesHelper<F, G, M>
183    where
184        F: Fn(&M, usize, usize, bool) -> bool,
185        G: Fn(usize) -> A,
186        M: Bytes<A>,
187    {
188        fn check_range(&self, m: &M, start: usize, len: usize, clean: bool) -> bool {
189            (self.check_range_fn)(m, start, len, clean)
190        }
191
192        fn address(&self, offset: usize) -> A {
193            (self.address_fn)(offset)
194        }
195
196        fn test_access<Op>(
197            &self,
198            bytes: &M,
199            dirty_offset: usize,
200            dirty_len: usize,
201            op: Op,
202        ) -> Result<(), TestAccessError>
203        where
204            Op: Fn(&M, A),
205        {
206            if !self.check_range(bytes, dirty_offset, dirty_len, true) {
207                return Err(TestAccessError::RangeCleanCheck);
208            }
209
210            op(bytes, self.address(dirty_offset));
211
212            if !self.check_range(bytes, dirty_offset, dirty_len, false) {
213                return Err(TestAccessError::RangeDirtyCheck);
214            }
215
216            Ok(())
217        }
218    }
219
220    // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment.
221    // The `step` parameter represents the offset that's added the the current address after
222    // performing each access. It provides finer grained control when testing tracking
223    // implementations that aggregate entire ranges for accounting purposes (for example, doing
224    // tracking at the page level).
225    pub fn test_bytes<F, G, M, A>(bytes: &M, check_range_fn: F, address_fn: G, step: usize)
226    where
227        F: Fn(&M, usize, usize, bool) -> bool,
228        G: Fn(usize) -> A,
229        A: Copy,
230        M: Bytes<A>,
231        <M as Bytes<A>>::E: Debug,
232    {
233        const BUF_SIZE: usize = 1024;
234        let buf = vec![1u8; 1024];
235
236        let val = 1u64;
237
238        let h = BytesHelper {
239            check_range_fn,
240            address_fn,
241            phantom: PhantomData,
242        };
243
244        let mut dirty_offset = 0x1000;
245
246        // Test `write`.
247        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
248            assert_eq!(m.write(buf.as_slice(), addr).unwrap(), BUF_SIZE)
249        })
250        .unwrap();
251        dirty_offset += step;
252
253        // Test `write_slice`.
254        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
255            m.write_slice(buf.as_slice(), addr).unwrap()
256        })
257        .unwrap();
258        dirty_offset += step;
259
260        // Test `write_obj`.
261        h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
262            m.write_obj(val, addr).unwrap()
263        })
264        .unwrap();
265        dirty_offset += step;
266
267        // Test `read_from`.
268        #[allow(deprecated)] // test of deprecated functions
269        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
270            assert_eq!(
271                m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(),
272                BUF_SIZE
273            )
274        })
275        .unwrap();
276        dirty_offset += step;
277
278        // Test `read_exact_from`.
279        #[allow(deprecated)] // test of deprecated functions
280        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
281            m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE)
282                .unwrap()
283        })
284        .unwrap();
285        dirty_offset += step;
286
287        // Test `store`.
288        h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
289            m.store(val, addr, Ordering::Relaxed).unwrap()
290        })
291        .unwrap();
292    }
293
294    // This function and the next are currently conditionally compiled because we only use
295    // them to test the mmap-based backend implementations for now. Going forward, the generic
296    // test functions defined here can be placed in a separate module (i.e. `test_utilities`)
297    // which is gated by a feature and can be used for testing purposes by other crates as well.
298    #[cfg(feature = "backend-mmap")]
299    fn test_guest_memory_region<R: GuestMemoryRegion>(region: &R) {
300        let dirty_addr = MemoryRegionAddress(0x0);
301        let val = 123u64;
302        let dirty_len = size_of_val(&val);
303
304        let slice = region.get_slice(dirty_addr, dirty_len).unwrap();
305
306        assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
307        assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
308
309        region.write_obj(val, dirty_addr).unwrap();
310
311        assert!(range_is_dirty(
312            region.bitmap(),
313            dirty_addr.0 as usize,
314            dirty_len
315        ));
316
317        assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
318
319        // Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same
320        // `region` handle because `test_bytes` starts performing writes after the range that's
321        // been already dirtied in the first part of this test.
322        test_bytes(
323            region,
324            |r: &R, start: usize, len: usize, clean: bool| {
325                check_range(r.bitmap(), start, len, clean)
326            },
327            |offset| MemoryRegionAddress(offset as u64),
328            0x1000,
329        );
330    }
331
332    #[cfg(feature = "backend-mmap")]
333    // Assumptions about M generated by f ...
334    pub fn test_guest_memory_and_region<M, F>(f: F)
335    where
336        M: GuestMemory,
337        F: Fn() -> M,
338    {
339        let m = f();
340        let dirty_addr = GuestAddress(0x1000);
341        let val = 123u64;
342        let dirty_len = size_of_val(&val);
343
344        let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap();
345        let slice = m.get_slice(dirty_addr, dirty_len).unwrap();
346
347        assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
348        assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
349
350        m.write_obj(val, dirty_addr).unwrap();
351
352        assert!(range_is_dirty(
353            region.bitmap(),
354            region_addr.0 as usize,
355            dirty_len
356        ));
357
358        assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
359
360        // Now let's invoke the tests for the inner `GuestMemoryRegion` type.
361        test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap());
362
363        // Finally, let's invoke the generic tests for `Bytes`.
364        let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool {
365            let mut check_result = true;
366            m.try_access(len, GuestAddress(start as u64), |_, size, reg_addr, reg| {
367                if !check_range(reg.bitmap(), reg_addr.0 as usize, size, clean) {
368                    check_result = false;
369                }
370                Ok(size)
371            })
372            .unwrap();
373
374            check_result
375        };
376
377        test_bytes(
378            &f(),
379            check_range_closure,
380            |offset| GuestAddress(offset as u64),
381            0x1000,
382        );
383    }
384
385    pub fn test_volatile_memory<M: VolatileMemory>(m: &M) {
386        assert!(m.len() >= 0x8000);
387
388        let dirty_offset = 0x1000;
389        let val = 123u64;
390        let dirty_len = size_of_val(&val);
391
392        let get_ref_offset = 0x2000;
393        let array_ref_offset = 0x3000;
394
395        let s1 = m.as_volatile_slice();
396        let s2 = m.get_slice(dirty_offset, dirty_len).unwrap();
397
398        assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
399        assert!(range_is_clean(s2.bitmap(), 0, s2.len()));
400
401        s1.write_obj(val, dirty_offset).unwrap();
402
403        assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len));
404        assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
405
406        let v_ref = m.get_ref::<u64>(get_ref_offset).unwrap();
407        assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len));
408        v_ref.store(val);
409        assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len));
410
411        let arr_ref = m.get_array_ref::<u64>(array_ref_offset, 1).unwrap();
412        assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len));
413        arr_ref.store(0, val);
414        assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len));
415    }
416}