use std::convert::From;
use std::fs::File;
use std::io;
use std::iter::FusedIterator;
use std::mem::size_of;
use std::ops::{BitAnd, BitOr, Deref};
use std::rc::Rc;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use crate::address::{Address, AddressValue};
use crate::bitmap::{Bitmap, BitmapSlice, BS, MS};
use crate::bytes::{AtomicAccess, Bytes};
use crate::io::{ReadVolatile, WriteVolatile};
#[cfg(feature = "iommu")]
use crate::iommu::Error as IommuError;
use crate::volatile_memory::{self, VolatileSlice};
use crate::GuestMemoryRegion;
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Guest memory error: invalid guest address {}",.0.raw_value())]
InvalidGuestAddress(GuestAddress),
#[error("Guest memory error: {0}")]
IOError(io::Error),
#[error("Guest memory error: only used {completed} bytes in {expected} long buffer")]
PartialBuffer { expected: usize, completed: usize },
#[error("Guest memory error: invalid backend address")]
InvalidBackendAddress,
#[error("Guest memory error: host virtual address not available")]
HostAddressNotAvailable,
#[error(
"The length returned by the callback passed to `try_access` is outside the address range."
)]
CallbackOutOfRange,
#[error("The address to be read by `try_access` is outside the address range")]
GuestAddressOverflow,
#[cfg(feature = "iommu")]
#[error("IOMMU failed to translate guest address: {0}")]
IommuError(IommuError),
}
impl From<volatile_memory::Error> for Error {
fn from(e: volatile_memory::Error) -> Self {
match e {
volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::IOError(e) => Error::IOError(e),
volatile_memory::Error::PartialBuffer {
expected,
completed,
} => Error::PartialBuffer {
expected,
completed,
},
}
}
}
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct GuestAddress(pub u64);
impl_address_ops!(GuestAddress, u64);
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct MemoryRegionAddress(pub u64);
impl_address_ops!(MemoryRegionAddress, u64);
pub type GuestUsize = <GuestAddress as AddressValue>::V;
#[derive(Clone, Debug)]
pub struct FileOffset {
file: Arc<File>,
start: u64,
}
impl FileOffset {
pub fn new(file: File, start: u64) -> Self {
FileOffset::from_arc(Arc::new(file), start)
}
pub fn from_arc(file: Arc<File>, start: u64) -> Self {
FileOffset { file, start }
}
pub fn file(&self) -> &File {
self.file.as_ref()
}
pub fn arc(&self) -> &Arc<File> {
&self.file
}
pub fn start(&self) -> u64 {
self.start
}
}
pub trait GuestAddressSpace: Clone {
type M: GuestMemory;
type T: Clone + Deref<Target = Self::M>;
fn memory(&self) -> Self::T;
}
impl<M: GuestMemory> GuestAddressSpace for &M {
type M = M;
type T = Self;
fn memory(&self) -> Self {
self
}
}
impl<M: GuestMemory> GuestAddressSpace for Rc<M> {
type M = M;
type T = Self;
fn memory(&self) -> Self {
self.clone()
}
}
impl<M: GuestMemory> GuestAddressSpace for Arc<M> {
type M = M;
type T = Self;
fn memory(&self) -> Self {
self.clone()
}
}
pub trait GuestMemoryBackend {
type R: GuestMemoryRegion;
fn num_regions(&self) -> usize {
self.iter().count()
}
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
self.iter()
.find(|region| addr >= region.start_addr() && addr <= region.last_addr())
}
fn iter(&self) -> impl Iterator<Item = &Self::R>;
fn last_addr(&self) -> GuestAddress {
self.iter()
.map(GuestMemoryRegion::last_addr)
.fold(GuestAddress(0), std::cmp::max)
}
fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
self.find_region(addr)
.map(|r| (r, r.to_region_addr(addr).unwrap()))
}
fn address_in_range(&self, addr: GuestAddress) -> bool {
self.find_region(addr).is_some()
}
fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
self.find_region(addr).map(|_| addr)
}
fn check_range(&self, base: GuestAddress, len: usize) -> bool {
self.get_slices(base, len).all(|r| r.is_ok())
}
fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
#[deprecated(
since = "0.17.0",
note = "supplemented by external iterator `get_slices()`"
)]
fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
where
F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
{
let mut cur = addr;
let mut total = 0;
while let Some(region) = self.find_region(cur) {
let start = region.to_region_addr(cur).unwrap();
let cap = region.len() - start.raw_value();
let len = std::cmp::min(cap, (count - total) as GuestUsize);
match f(total, len as usize, start, region) {
Ok(0) => return Ok(total),
Ok(len) => {
total = match total.checked_add(len) {
Some(x) if x < count => x,
Some(x) if x == count => return Ok(x),
_ => return Err(Error::CallbackOutOfRange),
};
cur = match cur.overflowing_add(len as GuestUsize) {
(x @ GuestAddress(0), _) | (x, false) => x,
(_, true) => return Err(Error::GuestAddressOverflow),
};
}
e => return e,
}
}
if total == 0 {
Err(Error::InvalidGuestAddress(addr))
} else {
Ok(total)
}
}
fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(r, addr)| r.get_host_address(addr))
}
fn get_slice(
&self,
addr: GuestAddress,
count: usize,
) -> Result<VolatileSlice<'_, MS<'_, Self>>> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(r, addr)| r.get_slice(addr, count))
}
fn get_slices<'a>(
&'a self,
addr: GuestAddress,
count: usize,
) -> GuestMemoryBackendSliceIterator<'a, Self> {
GuestMemoryBackendSliceIterator {
mem: self,
addr,
count,
}
}
}
#[derive(Debug)]
pub struct GuestMemoryBackendSliceIterator<'a, M: GuestMemoryBackend + ?Sized> {
mem: &'a M,
addr: GuestAddress,
count: usize,
}
impl<'a, M: GuestMemoryBackend + ?Sized> GuestMemoryBackendSliceIterator<'a, M> {
unsafe fn do_next(&mut self) -> Option<Result<VolatileSlice<'a, MS<'a, M>>>> {
if self.count == 0 {
return None;
}
let Some((region, start)) = self.mem.to_region_addr(self.addr) else {
return Some(Err(Error::InvalidGuestAddress(self.addr)));
};
let cap = region.len() - start.raw_value();
let len = std::cmp::min(cap as usize, self.count);
self.count -= len;
self.addr = match self.addr.overflowing_add(len as GuestUsize) {
(x @ GuestAddress(0), _) | (x, false) => x,
(_, true) => return Some(Err(Error::GuestAddressOverflow)),
};
Some(region.get_slice(start, len).inspect(|s| {
assert_eq!(
s.len(),
len,
"get_slice() returned a slice with wrong length"
)
}))
}
pub fn stop_on_error(self) -> Result<impl Iterator<Item = VolatileSlice<'a, MS<'a, M>>>> {
<Self as GuestMemorySliceIterator<'a, MS<'a, M>>>::stop_on_error(self)
}
}
impl<'a, M: GuestMemoryBackend + ?Sized> Iterator for GuestMemoryBackendSliceIterator<'a, M> {
type Item = Result<VolatileSlice<'a, MS<'a, M>>>;
fn next(&mut self) -> Option<Self::Item> {
match unsafe { self.do_next() } {
Some(Ok(slice)) => Some(Ok(slice)),
other => {
self.count = 0;
other
}
}
}
}
impl<'a, M: GuestMemoryBackend + ?Sized> GuestMemorySliceIterator<'a, MS<'a, M>>
for GuestMemoryBackendSliceIterator<'a, M>
{
}
impl<M: GuestMemoryBackend + ?Sized> FusedIterator for GuestMemoryBackendSliceIterator<'_, M> {}
impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
type E = Error;
fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
self.get_slices(addr, buf.len(), Permissions::Write)?
.stop_on_error()?
.try_fold(0, |acc, slice| Ok(acc + slice.write(&buf[acc..], 0)?))
}
fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
self.get_slices(addr, buf.len(), Permissions::Read)?
.stop_on_error()?
.try_fold(0, |acc, slice| Ok(acc + slice.read(&mut buf[acc..], 0)?))
}
fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
let res = self.write(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
let res = self.read(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_volatile_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
where
F: ReadVolatile,
{
self.get_slices(addr, count, Permissions::Write)?
.stop_on_error()?
.try_fold(0, |acc, slice| {
Ok(acc + slice.read_volatile_from(0, src, slice.len())?)
})
}
fn read_exact_volatile_from<F>(
&self,
addr: GuestAddress,
src: &mut F,
count: usize,
) -> Result<()>
where
F: ReadVolatile,
{
let res = self.read_volatile_from(addr, src, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
fn write_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
where
F: WriteVolatile,
{
self.get_slices(addr, count, Permissions::Read)?
.stop_on_error()?
.try_fold(0, |acc, slice| {
slice.write_all_volatile_to(0, dst, slice.len())?;
Ok(acc + slice.len())
})
}
fn write_all_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
where
F: WriteVolatile,
{
let res = self.write_volatile_to(addr, dst, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
self.get_slices(addr, size_of::<O>(), Permissions::Write)?
.next()
.unwrap()? .store(val, 0, order)
.map_err(Into::into)
}
fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
self.get_slices(addr, size_of::<O>(), Permissions::Read)?
.next()
.unwrap()? .load(0, order)
.map_err(Into::into)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[repr(u8)]
pub enum Permissions {
No = 0b00,
Read = 0b01,
Write = 0b10,
ReadWrite = 0b11,
}
impl Permissions {
fn from_repr(raw: u8) -> Self {
use Permissions::*;
match raw {
value if value == No as u8 => No,
value if value == Read as u8 => Read,
value if value == Write as u8 => Write,
value if value == ReadWrite as u8 => ReadWrite,
_ => panic!("{raw:x} is not a valid raw Permissions value"),
}
}
pub fn allow(&self, access: Self) -> bool {
*self & access == access
}
pub fn has_write(&self) -> bool {
*self & Permissions::Write == Permissions::Write
}
}
impl std::ops::BitOr for Permissions {
type Output = Permissions;
fn bitor(self, rhs: Permissions) -> Self::Output {
Self::from_repr(self as u8 | rhs as u8)
}
}
impl std::ops::BitAnd for Permissions {
type Output = Permissions;
fn bitand(self, rhs: Permissions) -> Self::Output {
Self::from_repr(self as u8 & rhs as u8)
}
}
pub trait GuestMemory {
type PhysicalMemory: GuestMemoryBackend + ?Sized;
type Bitmap: Bitmap;
fn check_range(&self, addr: GuestAddress, count: usize, access: Permissions) -> bool;
fn get_slices<'a>(
&'a self,
addr: GuestAddress,
count: usize,
access: Permissions,
) -> Result<impl GuestMemorySliceIterator<'a, BS<'a, Self::Bitmap>>>;
fn physical_memory(&self) -> Option<&Self::PhysicalMemory> {
None
}
}
pub trait GuestMemorySliceIterator<'a, B: BitmapSlice>:
Iterator<Item = Result<VolatileSlice<'a, B>>> + FusedIterator + Sized
{
fn stop_on_error(self) -> Result<impl Iterator<Item = VolatileSlice<'a, B>>> {
let mut peek = self.peekable();
if let Some(err) = peek.next_if(Result::is_err) {
return Err(err.unwrap_err());
}
Ok(peek.filter_map(Result::ok))
}
}
impl<M: GuestMemoryBackend + ?Sized> GuestMemory for M {
type PhysicalMemory = M;
type Bitmap = <M::R as GuestMemoryRegion>::B;
fn check_range(&self, addr: GuestAddress, count: usize, _access: Permissions) -> bool {
<M as GuestMemoryBackend>::check_range(self, addr, count)
}
fn get_slices<'a>(
&'a self,
addr: GuestAddress,
count: usize,
_access: Permissions,
) -> Result<impl GuestMemorySliceIterator<'a, BS<'a, Self::Bitmap>>> {
Ok(<M as GuestMemoryBackend>::get_slices(self, addr, count))
}
fn physical_memory(&self) -> Option<&Self::PhysicalMemory> {
Some(self)
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
#[cfg(feature = "backend-mmap")]
use crate::bytes::ByteValued;
#[cfg(feature = "backend-mmap")]
use crate::GuestAddress;
#[cfg(feature = "backend-mmap")]
use std::time::{Duration, Instant};
use vmm_sys_util::tempfile::TempFile;
#[cfg(feature = "backend-mmap")]
type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
#[cfg(feature = "backend-mmap")]
fn make_image(size: u8) -> Vec<u8> {
let mut image: Vec<u8> = Vec::with_capacity(size as usize);
for i in 0..size {
image.push(i);
}
image
}
#[test]
fn test_file_offset() {
let file = TempFile::new().unwrap().into_file();
let start = 1234;
let file_offset = FileOffset::new(file, start);
assert_eq!(file_offset.start(), start);
assert_eq!(
file_offset.file() as *const File,
file_offset.arc().as_ref() as *const File
);
}
#[cfg(feature = "backend-mmap")]
#[test]
fn checked_read_from() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x40);
let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap();
let image = make_image(0x80);
let offset = GuestAddress(0x30);
let count: usize = 0x20;
assert_eq!(
0x20_usize,
mem.read_volatile_from(offset, &mut image.as_slice(), count)
.unwrap()
);
}
#[cfg(feature = "backend-mmap")]
fn loop_timed<F>(duration: Duration, mut f: F)
where
F: FnMut(),
{
const CHECK_PERIOD: u64 = 1_000_000;
let start_time = Instant::now();
loop {
for _ in 0..CHECK_PERIOD {
f();
}
if start_time.elapsed() >= duration {
break;
}
}
}
#[cfg(feature = "backend-mmap")]
#[cfg(not(miri))] fn non_atomic_access_helper<T>()
where
T: ByteValued
+ std::fmt::Debug
+ From<u8>
+ Into<u128>
+ std::ops::Not<Output = T>
+ PartialEq,
{
use std::mem;
use std::thread;
#[derive(Clone, Copy, Debug, Default, PartialEq)]
struct Data<T> {
val: T,
some_bytes: [u8; 8],
}
assert_eq!(mem::align_of::<T>(), mem::align_of::<Data<T>>());
assert_eq!(mem::size_of::<T>(), mem::align_of::<T>());
assert_eq!(mem::size_of::<Data<T>>(), mem::size_of::<T>() + 8);
unsafe impl<T: ByteValued> ByteValued for Data<T> {}
let start = GuestAddress(0);
let region_len = 1 << 12;
let data_start = GuestAddress((region_len - mem::size_of::<T>()) as u64);
let mem = GuestMemoryMmap::from_ranges(&[
(start, region_len),
(start.unchecked_add(region_len as u64), region_len),
])
.unwrap();
let mem2 = mem.clone();
let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255];
let mut data = Data {
val: T::from(0u8),
some_bytes,
};
mem.write_obj(data, data_start).unwrap();
let read_data = mem.read_obj::<Data<T>>(data_start).unwrap();
assert_eq!(read_data, data);
let t = thread::spawn(move || {
let mut count: u64 = 0;
loop_timed(Duration::from_secs(3), || {
let data = mem2.read_obj::<Data<T>>(data_start).unwrap();
let bytes = data.val.into().to_le_bytes();
for i in 1..mem::size_of::<T>() {
if bytes[0] != bytes[i] {
panic!(
"val bytes don't match {:?} after {} iterations",
&bytes[..mem::size_of::<T>()],
count
);
}
}
count += 1;
});
});
loop_timed(Duration::from_secs(3), || {
mem.write_obj(data, data_start).unwrap();
data.val = !data.val;
});
t.join().unwrap()
}
#[cfg(feature = "backend-mmap")]
#[test]
#[cfg(not(miri))]
fn test_non_atomic_access() {
non_atomic_access_helper::<u16>()
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_zero_length_accesses() {
#[derive(Default, Clone, Copy)]
#[repr(C)]
struct ZeroSizedStruct {
dummy: [u32; 0],
}
unsafe impl ByteValued for ZeroSizedStruct {}
let addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
let obj = ZeroSizedStruct::default();
let mut image = make_image(0x80);
assert_eq!(mem.write(&[], addr).unwrap(), 0);
assert_eq!(mem.read(&mut [], addr).unwrap(), 0);
assert!(mem.write_slice(&[], addr).is_ok());
assert!(mem.read_slice(&mut [], addr).is_ok());
assert!(mem.write_obj(obj, addr).is_ok());
assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
assert_eq!(
mem.read_volatile_from(addr, &mut image.as_slice(), 0)
.unwrap(),
0
);
assert!(mem
.read_exact_volatile_from(addr, &mut image.as_slice(), 0)
.is_ok());
assert_eq!(
mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0)
.unwrap(),
0
);
assert!(mem
.write_all_volatile_to(addr, &mut image.as_mut_slice(), 0)
.is_ok());
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_atomic_accesses() {
let addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
let bad_addr = addr.unchecked_add(0x1000);
crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
}
#[cfg(feature = "backend-mmap")]
#[cfg(target_os = "linux")]
#[test]
fn test_guest_memory_mmap_is_hugetlbfs() {
let addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
let r = mem.find_region(addr).unwrap();
assert_eq!(r.is_hugetlbfs(), None);
}
#[test]
fn test_perm_and() {
use Permissions::*;
for p in [No, Read, Write, ReadWrite] {
assert_eq!(p & p, p);
}
for p1 in [No, Read, Write, ReadWrite] {
for p2 in [No, Read, Write, ReadWrite] {
assert_eq!(p1 & p2, p2 & p1);
}
}
for p in [No, Read, Write, ReadWrite] {
assert_eq!(No & p, No);
}
for p in [No, Read, Write, ReadWrite] {
assert_eq!(ReadWrite & p, p);
}
assert_eq!(Read & Write, No);
}
#[test]
fn test_perm_or() {
use Permissions::*;
for p in [No, Read, Write, ReadWrite] {
assert_eq!(p | p, p);
}
for p1 in [No, Read, Write, ReadWrite] {
for p2 in [No, Read, Write, ReadWrite] {
assert_eq!(p1 | p2, p2 | p1);
}
}
for p in [No, Read, Write, ReadWrite] {
assert_eq!(No | p, p);
}
for p in [No, Read, Write, ReadWrite] {
assert_eq!(ReadWrite | p, ReadWrite);
}
assert_eq!(Read | Write, ReadWrite);
}
#[test]
fn test_perm_has_write() {
assert!(!Permissions::No.has_write());
assert!(!Permissions::Read.has_write());
assert!(Permissions::Write.has_write());
assert!(Permissions::ReadWrite.has_write());
}
}