use crate::bitmap::{Bitmap, BS};
use crate::guest_memory::Result;
use crate::{
Address, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemoryBackend, GuestMemoryError,
GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile,
};
use std::sync::atomic::Ordering;
use std::sync::Arc;
#[allow(clippy::len_without_is_empty)]
pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = GuestMemoryError> {
type B: Bitmap;
fn len(&self) -> GuestUsize;
fn start_addr(&self) -> GuestAddress;
fn last_addr(&self) -> GuestAddress {
self.start_addr().unchecked_add(self.len() - 1)
}
fn bitmap(&self) -> BS<'_, Self::B>;
fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
if self.address_in_range(addr) {
Some(addr)
} else {
None
}
}
fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
addr.raw_value() < self.len()
}
fn checked_offset(
&self,
base: MemoryRegionAddress,
offset: usize,
) -> Option<MemoryRegionAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
addr.checked_offset_from(self.start_addr())
.and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
}
fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
Err(GuestMemoryError::HostAddressNotAvailable)
}
fn file_offset(&self) -> Option<&FileOffset> {
None
}
#[allow(unused_variables)]
fn get_slice(
&self,
offset: MemoryRegionAddress,
count: usize,
) -> Result<VolatileSlice<'_, BS<'_, Self::B>>> {
Err(GuestMemoryError::HostAddressNotAvailable)
}
fn as_volatile_slice(&self) -> Result<VolatileSlice<'_, BS<'_, Self::B>>> {
self.get_slice(MemoryRegionAddress(0), self.len() as usize)
}
#[cfg(target_os = "linux")]
fn is_hugetlbfs(&self) -> Option<bool> {
None
}
}
#[derive(Debug, thiserror::Error)]
pub enum GuestRegionCollectionError {
#[error("No memory region found")]
NoMemoryRegion,
#[error("Some of the memory regions intersect with each other")]
MemoryRegionOverlap,
#[error("The provided memory regions haven't been sorted")]
UnsortedMemoryRegions,
}
#[derive(Debug)]
pub struct GuestRegionCollection<R> {
regions: Vec<Arc<R>>,
}
impl<R> Default for GuestRegionCollection<R> {
fn default() -> Self {
Self {
regions: Vec::new(),
}
}
}
impl<R> Clone for GuestRegionCollection<R> {
fn clone(&self) -> Self {
GuestRegionCollection {
regions: self.regions.iter().map(Arc::clone).collect(),
}
}
}
impl<R: GuestMemoryRegion> GuestRegionCollection<R> {
pub fn new() -> Self {
Self::default()
}
pub fn from_regions(
mut regions: Vec<R>,
) -> std::result::Result<Self, GuestRegionCollectionError> {
Self::from_arc_regions(regions.drain(..).map(Arc::new).collect())
}
pub fn from_arc_regions(
regions: Vec<Arc<R>>,
) -> std::result::Result<Self, GuestRegionCollectionError> {
if regions.is_empty() {
return Err(GuestRegionCollectionError::NoMemoryRegion);
}
for window in regions.windows(2) {
let prev = &window[0];
let next = &window[1];
if prev.start_addr() > next.start_addr() {
return Err(GuestRegionCollectionError::UnsortedMemoryRegions);
}
if prev.last_addr() >= next.start_addr() {
return Err(GuestRegionCollectionError::MemoryRegionOverlap);
}
}
Ok(Self { regions })
}
pub fn insert_region(
&self,
region: Arc<R>,
) -> std::result::Result<GuestRegionCollection<R>, GuestRegionCollectionError> {
let mut regions = self.regions.clone();
regions.push(region);
regions.sort_by_key(|x| x.start_addr());
Self::from_arc_regions(regions)
}
pub fn remove_region(
&self,
base: GuestAddress,
size: GuestUsize,
) -> std::result::Result<(GuestRegionCollection<R>, Arc<R>), GuestRegionCollectionError> {
if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) {
if self.regions.get(region_index).unwrap().len() == size {
let mut regions = self.regions.clone();
let region = regions.remove(region_index);
return Ok((Self { regions }, region));
}
}
Err(GuestRegionCollectionError::NoMemoryRegion)
}
}
impl<R: GuestMemoryRegion> GuestMemoryBackend for GuestRegionCollection<R> {
type R = R;
fn num_regions(&self) -> usize {
self.regions.len()
}
fn find_region(&self, addr: GuestAddress) -> Option<&R> {
let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) {
Ok(x) => Some(x),
Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1),
_ => None,
};
index.map(|x| self.regions[x].as_ref())
}
fn iter(&self) -> impl Iterator<Item = &Self::R> {
self.regions.iter().map(AsRef::as_ref)
}
}
pub trait GuestMemoryRegionBytes: GuestMemoryRegion {}
impl<R: GuestMemoryRegionBytes> Bytes<MemoryRegionAddress> for R {
type E = GuestMemoryError;
fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<usize> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()?
.write(buf, maddr)
.map_err(Into::into)
}
fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<usize> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()?
.read(buf, maddr)
.map_err(Into::into)
}
fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()?
.write_slice(buf, maddr)
.map_err(Into::into)
}
fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()?
.read_slice(buf, maddr)
.map_err(Into::into)
}
fn read_volatile_from<F>(
&self,
addr: MemoryRegionAddress,
src: &mut F,
count: usize,
) -> Result<usize>
where
F: ReadVolatile,
{
self.as_volatile_slice()?
.read_volatile_from(addr.0 as usize, src, count)
.map_err(Into::into)
}
fn read_exact_volatile_from<F>(
&self,
addr: MemoryRegionAddress,
src: &mut F,
count: usize,
) -> Result<()>
where
F: ReadVolatile,
{
self.as_volatile_slice()?
.read_exact_volatile_from(addr.0 as usize, src, count)
.map_err(Into::into)
}
fn write_volatile_to<F>(
&self,
addr: MemoryRegionAddress,
dst: &mut F,
count: usize,
) -> Result<usize>
where
F: WriteVolatile,
{
self.as_volatile_slice()?
.write_volatile_to(addr.0 as usize, dst, count)
.map_err(Into::into)
}
fn write_all_volatile_to<F>(
&self,
addr: MemoryRegionAddress,
dst: &mut F,
count: usize,
) -> Result<()>
where
F: WriteVolatile,
{
self.as_volatile_slice()?
.write_all_volatile_to(addr.0 as usize, dst, count)
.map_err(Into::into)
}
fn store<T: AtomicAccess>(
&self,
val: T,
addr: MemoryRegionAddress,
order: Ordering,
) -> Result<()> {
self.as_volatile_slice().and_then(|s| {
s.store(val, addr.raw_value() as usize, order)
.map_err(Into::into)
})
}
fn load<T: AtomicAccess>(&self, addr: MemoryRegionAddress, order: Ordering) -> Result<T> {
self.as_volatile_slice()
.and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
}
}
#[cfg(test)]
pub(crate) mod tests {
use crate::region::{GuestMemoryRegionBytes, GuestRegionCollectionError};
use crate::{
Address, GuestAddress, GuestMemoryBackend, GuestMemoryRegion, GuestRegionCollection,
GuestUsize,
};
use matches::assert_matches;
use std::sync::Arc;
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct MockRegion {
pub(crate) start: GuestAddress,
pub(crate) len: GuestUsize,
}
impl GuestMemoryRegion for MockRegion {
type B = ();
fn len(&self) -> GuestUsize {
self.len
}
fn start_addr(&self) -> GuestAddress {
self.start
}
fn bitmap(&self) {}
}
impl GuestMemoryRegionBytes for MockRegion {}
pub(crate) type Collection = GuestRegionCollection<MockRegion>;
fn check_guest_memory_mmap(
maybe_guest_mem: Result<Collection, GuestRegionCollectionError>,
expected_regions_summary: &[(GuestAddress, u64)],
) {
assert!(maybe_guest_mem.is_ok());
let guest_mem = maybe_guest_mem.unwrap();
assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
let maybe_last_mem_reg = expected_regions_summary.last();
if let Some((region_addr, region_size)) = maybe_last_mem_reg {
let mut last_addr = region_addr.unchecked_add(*region_size);
if last_addr.raw_value() != 0 {
last_addr = last_addr.unchecked_sub(1);
}
assert_eq!(guest_mem.last_addr(), last_addr);
}
for ((region_addr, region_size), mmap) in
expected_regions_summary.iter().zip(guest_mem.iter())
{
assert_eq!(region_addr, &mmap.start);
assert_eq!(region_size, &mmap.len);
assert!(guest_mem.find_region(*region_addr).is_some());
}
}
pub(crate) fn new_guest_memory_collection_from_regions(
regions_summary: &[(GuestAddress, u64)],
) -> Result<Collection, GuestRegionCollectionError> {
Collection::from_regions(
regions_summary
.iter()
.map(|&(start, len)| MockRegion { start, len })
.collect(),
)
}
fn new_guest_memory_collection_from_arc_regions(
regions_summary: &[(GuestAddress, u64)],
) -> Result<Collection, GuestRegionCollectionError> {
Collection::from_arc_regions(
regions_summary
.iter()
.map(|&(start, len)| Arc::new(MockRegion { start, len }))
.collect(),
)
}
#[test]
fn test_no_memory_region() {
let regions_summary = [];
assert_matches!(
new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(),
GuestRegionCollectionError::NoMemoryRegion
);
assert_matches!(
new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(),
GuestRegionCollectionError::NoMemoryRegion
);
}
#[test]
fn test_overlapping_memory_regions() {
let regions_summary = [(GuestAddress(0), 100), (GuestAddress(99), 100)];
assert_matches!(
new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(),
GuestRegionCollectionError::MemoryRegionOverlap
);
assert_matches!(
new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(),
GuestRegionCollectionError::MemoryRegionOverlap
);
}
#[test]
fn test_unsorted_memory_regions() {
let regions_summary = [(GuestAddress(100), 100), (GuestAddress(0), 100)];
assert_matches!(
new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(),
GuestRegionCollectionError::UnsortedMemoryRegions
);
assert_matches!(
new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(),
GuestRegionCollectionError::UnsortedMemoryRegions
);
}
#[test]
fn test_valid_memory_regions() {
let regions_summary = [(GuestAddress(0), 100), (GuestAddress(100), 100)];
let guest_mem = Collection::new();
assert_eq!(guest_mem.num_regions(), 0);
check_guest_memory_mmap(
new_guest_memory_collection_from_regions(®ions_summary),
®ions_summary,
);
check_guest_memory_mmap(
new_guest_memory_collection_from_arc_regions(®ions_summary),
®ions_summary,
);
}
#[test]
fn test_mmap_insert_region() {
let region_size = 0x1000;
let regions = vec![
(GuestAddress(0x0), region_size),
(GuestAddress(0x10_0000), region_size),
];
let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap();
let mut gm = mem_orig.clone();
assert_eq!(mem_orig.num_regions(), 2);
let new_regions = [
(GuestAddress(0x8000), 0x1000),
(GuestAddress(0x4000), 0x1000),
(GuestAddress(0xc000), 0x1000),
];
for (start, len) in new_regions {
gm = gm
.insert_region(Arc::new(MockRegion { start, len }))
.unwrap();
}
gm.insert_region(Arc::new(MockRegion {
start: GuestAddress(0xc000),
len: 0x1000,
}))
.unwrap_err();
assert_eq!(mem_orig.num_regions(), 2);
assert_eq!(gm.num_regions(), 5);
let regions = gm.iter().collect::<Vec<_>>();
assert_eq!(regions[0].start_addr(), GuestAddress(0x0000));
assert_eq!(regions[1].start_addr(), GuestAddress(0x4000));
assert_eq!(regions[2].start_addr(), GuestAddress(0x8000));
assert_eq!(regions[3].start_addr(), GuestAddress(0xc000));
assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000));
}
#[test]
fn test_mmap_remove_region() {
let region_size = 0x1000;
let regions = vec![
(GuestAddress(0x0), region_size),
(GuestAddress(0x10_0000), region_size),
];
let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap();
let gm = mem_orig.clone();
assert_eq!(mem_orig.num_regions(), 2);
gm.remove_region(GuestAddress(0), 128).unwrap_err();
gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
assert_eq!(mem_orig.num_regions(), 2);
assert_eq!(gm.num_regions(), 1);
assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000));
assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
}
#[test]
fn test_iter() {
let region_size = 0x400;
let regions = vec![
(GuestAddress(0x0), region_size),
(GuestAddress(0x1000), region_size),
];
let mut iterated_regions = Vec::new();
let gm = new_guest_memory_collection_from_regions(®ions).unwrap();
for region in gm.iter() {
assert_eq!(region.len(), region_size as GuestUsize);
}
for region in gm.iter() {
iterated_regions.push((region.start_addr(), region.len()));
}
assert_eq!(regions, iterated_regions);
assert!(regions
.iter()
.map(|x| (x.0, x.1))
.eq(iterated_regions.iter().copied()));
let mmap_regions = gm.iter().collect::<Vec<_>>();
assert_eq!(mmap_regions[0].start, regions[0].0);
assert_eq!(mmap_regions[1].start, regions[1].0);
}
#[test]
fn test_address_in_range() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)])
.unwrap();
assert!(guest_mem.address_in_range(GuestAddress(0x200)));
assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
}
#[test]
fn test_check_address() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)])
.unwrap();
assert_eq!(
guest_mem.check_address(GuestAddress(0x200)),
Some(GuestAddress(0x200))
);
assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
assert_eq!(
guest_mem.check_address(GuestAddress(0xa00)),
Some(GuestAddress(0xa00))
);
assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
}
#[test]
fn test_checked_offset() {
let start_addr1 = GuestAddress(0);
let start_addr2 = GuestAddress(0x800);
let start_addr3 = GuestAddress(0xc00);
let guest_mem = new_guest_memory_collection_from_regions(&[
(start_addr1, 0x400),
(start_addr2, 0x400),
(start_addr3, 0x400),
])
.unwrap();
assert_eq!(
guest_mem.checked_offset(start_addr1, 0x200),
Some(GuestAddress(0x200))
);
assert_eq!(
guest_mem.checked_offset(start_addr1, 0xa00),
Some(GuestAddress(0xa00))
);
assert_eq!(
guest_mem.checked_offset(start_addr2, 0x7ff),
Some(GuestAddress(0xfff))
);
assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None);
assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
assert_eq!(
guest_mem.checked_offset(start_addr1, 0x400 - 1),
Some(GuestAddress(0x400 - 1))
);
}
}