#[cfg(any(test, feature = "backend-bitmap"))]
mod backend;
use std::fmt::Debug;
use crate::{GuestMemory, GuestMemoryRegion};
#[cfg(any(test, feature = "backend-bitmap"))]
pub use backend::{ArcSlice, AtomicBitmap, RefSlice};
pub trait WithBitmapSlice<'a> {
type S: BitmapSlice;
}
pub trait BitmapSlice: Bitmap + Clone + Debug + for<'a> WithBitmapSlice<'a, S = Self> {}
pub trait Bitmap: for<'a> WithBitmapSlice<'a> {
fn mark_dirty(&self, offset: usize, len: usize);
fn dirty_at(&self, offset: usize) -> bool;
fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S;
}
impl<'a> WithBitmapSlice<'a> for () {
type S = Self;
}
impl BitmapSlice for () {}
impl Bitmap for () {
fn mark_dirty(&self, _offset: usize, _len: usize) {}
fn dirty_at(&self, _offset: usize) -> bool {
false
}
fn slice_at(&self, _offset: usize) -> Self {}
}
impl<'a, B> WithBitmapSlice<'a> for Option<B>
where
B: WithBitmapSlice<'a>,
{
type S = Option<B::S>;
}
impl<B: BitmapSlice> BitmapSlice for Option<B> {}
impl<B: Bitmap> Bitmap for Option<B> {
fn mark_dirty(&self, offset: usize, len: usize) {
if let Some(inner) = self {
inner.mark_dirty(offset, len)
}
}
fn dirty_at(&self, offset: usize) -> bool {
if let Some(inner) = self {
return inner.dirty_at(offset);
}
false
}
fn slice_at(&self, offset: usize) -> Option<<B as WithBitmapSlice>::S> {
if let Some(inner) = self {
return Some(inner.slice_at(offset));
}
None
}
}
pub type BS<'a, B> = <B as WithBitmapSlice<'a>>::S;
pub type MS<'a, M> = BS<'a, <<M as GuestMemory>::R as GuestMemoryRegion>::B>;
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use std::io::Cursor;
use std::marker::PhantomData;
use std::mem::size_of_val;
use std::result::Result;
use std::sync::atomic::Ordering;
use crate::{Bytes, VolatileMemory};
#[cfg(feature = "backend-mmap")]
use crate::{GuestAddress, MemoryRegionAddress};
pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
(start..start + len).all(|offset| !b.dirty_at(offset))
}
pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
(start..start + len).all(|offset| b.dirty_at(offset))
}
pub fn check_range<B: Bitmap>(b: &B, start: usize, len: usize, clean: bool) -> bool {
if clean {
range_is_clean(b, start, len)
} else {
range_is_dirty(b, start, len)
}
}
pub fn test_bitmap<B: Bitmap>(b: &B) {
let len = 0x2000;
let dirty_offset = 0x1000;
let dirty_len = 0x100;
let s = b.slice_at(dirty_offset);
assert!(range_is_clean(b, 0, len));
assert!(range_is_clean(&s, 0, dirty_len));
b.mark_dirty(dirty_offset, dirty_len);
assert!(range_is_dirty(b, dirty_offset, dirty_len));
assert!(range_is_dirty(&s, 0, dirty_len));
}
#[derive(Debug)]
pub enum TestAccessError {
RangeCleanCheck,
RangeDirtyCheck,
}
struct BytesHelper<F, G, M> {
check_range_fn: F,
address_fn: G,
phantom: PhantomData<*const M>,
}
impl<F, G, M, A> BytesHelper<F, G, M>
where
F: Fn(&M, usize, usize, bool) -> bool,
G: Fn(usize) -> A,
M: Bytes<A>,
{
fn check_range(&self, m: &M, start: usize, len: usize, clean: bool) -> bool {
(self.check_range_fn)(m, start, len, clean)
}
fn address(&self, offset: usize) -> A {
(self.address_fn)(offset)
}
fn test_access<Op>(
&self,
bytes: &M,
dirty_offset: usize,
dirty_len: usize,
op: Op,
) -> Result<(), TestAccessError>
where
Op: Fn(&M, A),
{
if !self.check_range(bytes, dirty_offset, dirty_len, true) {
return Err(TestAccessError::RangeCleanCheck);
}
op(bytes, self.address(dirty_offset));
if !self.check_range(bytes, dirty_offset, dirty_len, false) {
return Err(TestAccessError::RangeDirtyCheck);
}
Ok(())
}
}
pub fn test_bytes<F, G, M, A>(bytes: &M, check_range_fn: F, address_fn: G, step: usize)
where
F: Fn(&M, usize, usize, bool) -> bool,
G: Fn(usize) -> A,
A: Copy,
M: Bytes<A>,
<M as Bytes<A>>::E: Debug,
{
const BUF_SIZE: usize = 1024;
let buf = vec![1u8; 1024];
let val = 1u64;
let h = BytesHelper {
check_range_fn,
address_fn,
phantom: PhantomData,
};
let mut dirty_offset = 0x1000;
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
assert_eq!(m.write(buf.as_slice(), addr).unwrap(), BUF_SIZE)
})
.unwrap();
dirty_offset += step;
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
m.write_slice(buf.as_slice(), addr).unwrap()
})
.unwrap();
dirty_offset += step;
h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
m.write_obj(val, addr).unwrap()
})
.unwrap();
dirty_offset += step;
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
assert_eq!(
m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(),
BUF_SIZE
)
})
.unwrap();
dirty_offset += step;
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE)
.unwrap()
})
.unwrap();
dirty_offset += step;
h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
m.store(val, addr, Ordering::Relaxed).unwrap()
})
.unwrap();
}
#[cfg(feature = "backend-mmap")]
fn test_guest_memory_region<R: GuestMemoryRegion>(region: &R) {
let dirty_addr = MemoryRegionAddress(0x0);
let val = 123u64;
let dirty_len = size_of_val(&val);
let slice = region.get_slice(dirty_addr, dirty_len).unwrap();
assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
region.write_obj(val, dirty_addr).unwrap();
assert!(range_is_dirty(
region.bitmap(),
dirty_addr.0 as usize,
dirty_len
));
assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
test_bytes(
region,
|r: &R, start: usize, len: usize, clean: bool| {
check_range(r.bitmap(), start, len, clean)
},
|offset| MemoryRegionAddress(offset as u64),
0x1000,
);
}
#[cfg(feature = "backend-mmap")]
pub fn test_guest_memory_and_region<M, F>(f: F)
where
M: GuestMemory,
F: Fn() -> M,
{
let m = f();
let dirty_addr = GuestAddress(0x1000);
let val = 123u64;
let dirty_len = size_of_val(&val);
let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap();
let slice = m.get_slice(dirty_addr, dirty_len).unwrap();
assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
m.write_obj(val, dirty_addr).unwrap();
assert!(range_is_dirty(
region.bitmap(),
region_addr.0 as usize,
dirty_len
));
assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap());
let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool {
let mut check_result = true;
m.try_access(len, GuestAddress(start as u64), |_, size, reg_addr, reg| {
if !check_range(reg.bitmap(), reg_addr.0 as usize, size, clean) {
check_result = false;
}
Ok(size)
})
.unwrap();
check_result
};
test_bytes(
&f(),
check_range_closure,
|offset| GuestAddress(offset as u64),
0x1000,
);
}
pub fn test_volatile_memory<M: VolatileMemory>(m: &M) {
assert!(m.len() >= 0x8000);
let dirty_offset = 0x1000;
let val = 123u64;
let dirty_len = size_of_val(&val);
let get_ref_offset = 0x2000;
let array_ref_offset = 0x3000;
let s1 = m.as_volatile_slice();
let s2 = m.get_slice(dirty_offset, dirty_len).unwrap();
assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
assert!(range_is_clean(s2.bitmap(), 0, s2.len()));
s1.write_obj(val, dirty_offset).unwrap();
assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len));
assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
let v_ref = m.get_ref::<u64>(get_ref_offset).unwrap();
assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len));
v_ref.store(val);
assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len));
let arr_ref = m.get_array_ref::<u64>(array_ref_offset, 1).unwrap();
assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len));
arr_ref.store(0, val);
assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len));
}
}