vm_memory/bitmap/backend/
slice.rs

1// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4//! Contains a generic implementation of `BitmapSlice`.
5
6use std::fmt::{self, Debug};
7use std::ops::Deref;
8use std::sync::Arc;
9
10use crate::bitmap::{Bitmap, BitmapSlice, WithBitmapSlice};
11
12/// Represents a slice into a `Bitmap` object, starting at `base_offset`.
13#[derive(Clone, Copy)]
14pub struct BaseSlice<B> {
15    inner: B,
16    base_offset: usize,
17}
18
19impl<B> BaseSlice<B> {
20    /// Create a new `BitmapSlice`, starting at the specified `offset`.
21    pub fn new(inner: B, offset: usize) -> Self {
22        BaseSlice {
23            inner,
24            base_offset: offset,
25        }
26    }
27}
28
29impl<B> WithBitmapSlice<'_> for BaseSlice<B>
30where
31    B: Clone + Deref,
32    B::Target: Bitmap,
33{
34    type S = Self;
35}
36
37impl<B> BitmapSlice for BaseSlice<B>
38where
39    B: Clone + Deref,
40    B::Target: Bitmap,
41{
42}
43
44impl<B> Bitmap for BaseSlice<B>
45where
46    B: Clone + Deref,
47    B::Target: Bitmap,
48{
49    /// Mark the memory range specified by the given `offset` (relative to the base offset of
50    /// the slice) and `len` as dirtied.
51    fn mark_dirty(&self, offset: usize, len: usize) {
52        // The `Bitmap` operations are supposed to accompany guest memory accesses defined by the
53        // same parameters (i.e. offset & length), so we use simple wrapping arithmetic instead of
54        // performing additional checks. If an overflow would occur, we simply end up marking some
55        // other region as dirty (which is just a false positive) instead of a region that could
56        // not have been accessed to begin with.
57        self.inner
58            .mark_dirty(self.base_offset.wrapping_add(offset), len)
59    }
60
61    fn dirty_at(&self, offset: usize) -> bool {
62        self.inner.dirty_at(self.base_offset.wrapping_add(offset))
63    }
64
65    /// Create a new `BitmapSlice` starting from the specified `offset` into the current slice.
66    fn slice_at(&self, offset: usize) -> Self {
67        BaseSlice {
68            inner: self.inner.clone(),
69            base_offset: self.base_offset.wrapping_add(offset),
70        }
71    }
72}
73
74impl<B> Debug for BaseSlice<B> {
75    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
76        // Dummy impl for now.
77        write!(f, "(bitmap slice)")
78    }
79}
80
81impl<B: Default> Default for BaseSlice<B> {
82    fn default() -> Self {
83        BaseSlice {
84            inner: B::default(),
85            base_offset: 0,
86        }
87    }
88}
89
90/// A `BitmapSlice` implementation that wraps a reference to a `Bitmap` object.
91pub type RefSlice<'a, B> = BaseSlice<&'a B>;
92
93/// A `BitmapSlice` implementation that uses an `Arc` handle to a `Bitmap` object.
94pub type ArcSlice<B> = BaseSlice<Arc<B>>;
95
96#[cfg(test)]
97mod tests {
98    use super::*;
99
100    use crate::bitmap::tests::{range_is_clean, range_is_dirty, test_bitmap};
101    use crate::bitmap::AtomicBitmap;
102    use std::num::NonZeroUsize;
103
104    #[test]
105    fn test_slice() {
106        let bitmap_size = 0x800;
107        let dirty_offset = 0x400;
108        let dirty_len = 0x100;
109
110        {
111            let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN);
112            let slice1 = bitmap.slice_at(0);
113            let slice2 = bitmap.slice_at(dirty_offset);
114
115            assert!(range_is_clean(&slice1, 0, bitmap_size));
116            assert!(range_is_clean(&slice2, 0, dirty_len));
117
118            bitmap.mark_dirty(dirty_offset, dirty_len);
119
120            assert!(range_is_dirty(&slice1, dirty_offset, dirty_len));
121            assert!(range_is_dirty(&slice2, 0, dirty_len));
122        }
123
124        {
125            let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN);
126            let slice = bitmap.slice_at(0);
127            test_bitmap(&slice);
128        }
129    }
130}