use std::convert::From;
use std::fmt::{self, Display};
use std::fs::File;
use std::io::{self, Read, Write};
use std::ops::{BitAnd, BitOr, Deref};
use std::rc::Rc;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use crate::address::{Address, AddressValue};
use crate::bitmap::{Bitmap, BS, MS};
use crate::bytes::{AtomicAccess, Bytes};
use crate::volatile_memory::{self, VolatileSlice};
static MAX_ACCESS_CHUNK: usize = 4096;
#[allow(missing_docs)]
#[derive(Debug)]
pub enum Error {
InvalidGuestAddress(GuestAddress),
IOError(io::Error),
PartialBuffer { expected: usize, completed: usize },
InvalidBackendAddress,
HostAddressNotAvailable,
}
impl From<volatile_memory::Error> for Error {
fn from(e: volatile_memory::Error) -> Self {
match e {
volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::IOError(e) => Error::IOError(e),
volatile_memory::Error::PartialBuffer {
expected,
completed,
} => Error::PartialBuffer {
expected,
completed,
},
}
}
}
pub type Result<T> = std::result::Result<T, Error>;
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Guest memory error: ")?;
match self {
Error::InvalidGuestAddress(addr) => {
write!(f, "invalid guest address {}", addr.raw_value())
}
Error::IOError(error) => write!(f, "{}", error),
Error::PartialBuffer {
expected,
completed,
} => write!(
f,
"only used {} bytes in {} long buffer",
completed, expected,
),
Error::InvalidBackendAddress => write!(f, "invalid backend address"),
Error::HostAddressNotAvailable => write!(f, "host virtual address not available"),
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct GuestAddress(pub u64);
impl_address_ops!(GuestAddress, u64);
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct MemoryRegionAddress(pub u64);
impl_address_ops!(MemoryRegionAddress, u64);
pub type GuestUsize = <GuestAddress as AddressValue>::V;
#[derive(Clone, Debug)]
pub struct FileOffset {
file: Arc<File>,
start: u64,
}
impl FileOffset {
pub fn new(file: File, start: u64) -> Self {
FileOffset::from_arc(Arc::new(file), start)
}
pub fn from_arc(file: Arc<File>, start: u64) -> Self {
FileOffset { file, start }
}
pub fn file(&self) -> &File {
self.file.as_ref()
}
pub fn arc(&self) -> &Arc<File> {
&self.file
}
pub fn start(&self) -> u64 {
self.start
}
}
#[allow(clippy::len_without_is_empty)]
pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
type B: Bitmap;
fn len(&self) -> GuestUsize;
fn start_addr(&self) -> GuestAddress;
fn last_addr(&self) -> GuestAddress {
self.start_addr().unchecked_add(self.len() - 1)
}
fn bitmap(&self) -> &Self::B;
fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
if self.address_in_range(addr) {
Some(addr)
} else {
None
}
}
fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
addr.raw_value() < self.len()
}
fn checked_offset(
&self,
base: MemoryRegionAddress,
offset: usize,
) -> Option<MemoryRegionAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
addr.checked_offset_from(self.start_addr())
.and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
}
fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
Err(Error::HostAddressNotAvailable)
}
fn file_offset(&self) -> Option<&FileOffset> {
None
}
unsafe fn as_slice(&self) -> Option<&[u8]> {
None
}
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
None
}
#[allow(unused_variables)]
fn get_slice(
&self,
offset: MemoryRegionAddress,
count: usize,
) -> Result<VolatileSlice<BS<Self::B>>> {
Err(Error::HostAddressNotAvailable)
}
fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
self.get_slice(MemoryRegionAddress(0), self.len() as usize)
}
#[cfg(target_os = "linux")]
fn is_hugetlbfs(&self) -> Option<bool> {
None
}
}
pub trait GuestAddressSpace {
type M: GuestMemory;
type T: Clone + Deref<Target = Self::M>;
fn memory(&self) -> Self::T;
}
impl<M: GuestMemory> GuestAddressSpace for &M {
type M = M;
type T = Self;
fn memory(&self) -> Self {
self
}
}
impl<M: GuestMemory> GuestAddressSpace for Rc<M> {
type M = M;
type T = Self;
fn memory(&self) -> Self {
self.clone()
}
}
impl<M: GuestMemory> GuestAddressSpace for Arc<M> {
type M = M;
type T = Self;
fn memory(&self) -> Self {
self.clone()
}
}
pub trait GuestMemoryIterator<'a, R: 'a> {
type Iter: Iterator<Item = &'a R>;
}
pub trait GuestMemory {
type R: GuestMemoryRegion;
type I: for<'a> GuestMemoryIterator<'a, Self::R>;
fn num_regions(&self) -> usize;
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
#[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
where
F: Fn(usize, &Self::R) -> std::result::Result<(), E>,
{
for (index, region) in self.iter().enumerate() {
cb(index, region)?;
}
Ok(())
}
#[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
fn with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E>
where
F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,
{
for (index, region) in self.iter().enumerate() {
cb(index, region)?;
}
Ok(())
}
fn iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter;
#[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
where
F: Fn((usize, &Self::R)) -> T,
G: Fn(T, T) -> T,
{
self.iter().enumerate().map(mapf).fold(init, foldf)
}
fn last_addr(&self) -> GuestAddress {
self.iter()
.map(GuestMemoryRegion::last_addr)
.fold(GuestAddress(0), std::cmp::max)
}
fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
self.find_region(addr)
.map(|r| (r, r.to_region_addr(addr).unwrap()))
}
fn address_in_range(&self, addr: GuestAddress) -> bool {
self.find_region(addr).is_some()
}
fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
self.find_region(addr).map(|_| addr)
}
fn check_range(&self, base: GuestAddress, len: usize) -> bool {
match self.try_access(len, base, |_, count, _, _| -> Result<usize> { Ok(count) }) {
Ok(count) => count == len,
_ => false,
}
}
fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
where
F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
{
let mut cur = addr;
let mut total = 0;
while let Some(region) = self.find_region(cur) {
let start = region.to_region_addr(cur).unwrap();
let cap = region.len() - start.raw_value();
let len = std::cmp::min(cap, (count - total) as GuestUsize);
match f(total, len as usize, start, region) {
Ok(0) => return Ok(total),
Ok(len) => {
total += len;
if total == count {
break;
}
cur = match cur.overflowing_add(len as GuestUsize) {
(GuestAddress(0), _) => GuestAddress(0),
(result, false) => result,
(_, true) => panic!("guest address overflow"),
}
}
e => return e,
}
}
if total == 0 {
Err(Error::InvalidGuestAddress(addr))
} else {
Ok(total)
}
}
fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(r, addr)| r.get_host_address(addr))
}
fn get_slice(&self, addr: GuestAddress, count: usize) -> Result<VolatileSlice<MS<Self>>> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(r, addr)| r.get_slice(addr, count))
}
}
impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
type E = Error;
fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.write(&buf[offset as usize..], caddr)
},
)
}
fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.read(&mut buf[offset as usize..], caddr)
},
)
}
fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
let res = self.write(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
let res = self.read(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
assert!(offset <= count);
if let Some(dst) = unsafe { region.as_mut_slice() } {
let start = caddr.raw_value() as usize;
let end = start + len;
let bytes_read = loop {
match src.read(&mut dst[start..end]) {
Ok(n) => break n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(Error::IOError(e)),
}
};
region.bitmap().mark_dirty(start, bytes_read);
Ok(bytes_read)
} else {
let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
let mut buf = vec![0u8; len].into_boxed_slice();
loop {
match src.read(&mut buf[..]) {
Ok(bytes_read) => {
let bytes_written = region.write(&buf[0..bytes_read], caddr)?;
assert_eq!(bytes_written, bytes_read);
break Ok(bytes_read);
}
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => break Err(Error::IOError(e)),
}
}
}
})
}
fn read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let res = self.read_from(addr, src, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
fn write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
assert!(offset <= count);
if let Some(src) = unsafe { region.as_slice() } {
let start = caddr.raw_value() as usize;
let end = start + len;
loop {
match dst.write(&src[start..end]) {
Ok(n) => break Ok(n),
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => break Err(Error::IOError(e)),
}
}
} else {
let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
let mut buf = vec![0u8; len].into_boxed_slice();
let bytes_read = region.read(&mut buf, caddr)?;
assert_eq!(bytes_read, len);
dst.write_all(&buf).map_err(Error::IOError)?;
Ok(len)
}
})
}
fn write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let res = self.write_to(addr, dst, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(region, region_addr)| region.store(val, region_addr, order))
}
fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(region, region_addr)| region.load(region_addr, order))
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
#[cfg(feature = "backend-mmap")]
use crate::bytes::ByteValued;
#[cfg(feature = "backend-mmap")]
use crate::GuestAddress;
#[cfg(feature = "backend-mmap")]
use std::io::Cursor;
#[cfg(feature = "backend-mmap")]
use std::time::{Duration, Instant};
use vmm_sys_util::tempfile::TempFile;
#[cfg(feature = "backend-mmap")]
type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
#[cfg(feature = "backend-mmap")]
fn make_image(size: u8) -> Vec<u8> {
let mut image: Vec<u8> = Vec::with_capacity(size as usize);
for i in 0..size {
image.push(i);
}
image
}
#[test]
fn test_file_offset() {
let file = TempFile::new().unwrap().into_file();
let start = 1234;
let file_offset = FileOffset::new(file, start);
assert_eq!(file_offset.start(), start);
assert_eq!(
file_offset.file() as *const File,
file_offset.arc().as_ref() as *const File
);
}
#[cfg(feature = "backend-mmap")]
#[test]
fn checked_read_from() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x40);
let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap();
let image = make_image(0x80);
let offset = GuestAddress(0x30);
let count: usize = 0x20;
assert_eq!(
0x20_usize,
mem.read_from(offset, &mut Cursor::new(&image), count)
.unwrap()
);
}
#[cfg(feature = "backend-mmap")]
fn loop_timed<F>(duration: Duration, mut f: F)
where
F: FnMut(),
{
const CHECK_PERIOD: u64 = 1_000_000;
let start_time = Instant::now();
loop {
for _ in 0..CHECK_PERIOD {
f();
}
if start_time.elapsed() >= duration {
break;
}
}
}
#[cfg(feature = "backend-mmap")]
fn non_atomic_access_helper<T>()
where
T: ByteValued
+ std::fmt::Debug
+ From<u8>
+ Into<u128>
+ std::ops::Not<Output = T>
+ PartialEq,
{
use std::mem;
use std::thread;
#[derive(Clone, Copy, Debug, Default, PartialEq)]
struct Data<T> {
val: T,
some_bytes: [u8; 7],
}
assert_eq!(mem::align_of::<T>(), mem::align_of::<Data<T>>());
assert_eq!(mem::size_of::<T>(), mem::align_of::<T>());
unsafe impl<T: ByteValued> ByteValued for Data<T> {}
let start = GuestAddress(0);
let region_len = 1 << 12;
let data_start = GuestAddress((region_len - mem::size_of::<T>()) as u64);
let mem = GuestMemoryMmap::from_ranges(&[
(start, region_len),
(start.unchecked_add(region_len as u64), region_len),
])
.unwrap();
let mem2 = mem.clone();
let some_bytes = [1u8, 2, 4, 16, 32, 64, 128];
let mut data = Data {
val: T::from(0u8),
some_bytes,
};
mem.write_obj(data, data_start).unwrap();
let read_data = mem.read_obj::<Data<T>>(data_start).unwrap();
assert_eq!(read_data, data);
let t = thread::spawn(move || {
let mut count: u64 = 0;
loop_timed(Duration::from_secs(3), || {
let data = mem2.read_obj::<Data<T>>(data_start).unwrap();
let bytes = data.val.into().to_le_bytes();
for i in 1..mem::size_of::<T>() {
if bytes[0] != bytes[i] {
panic!(
"val bytes don't match {:?} after {} iterations",
&bytes[..mem::size_of::<T>()],
count
);
}
}
count += 1;
});
});
loop_timed(Duration::from_secs(3), || {
mem.write_obj(data, data_start).unwrap();
data.val = !data.val;
});
t.join().unwrap()
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_non_atomic_access() {
non_atomic_access_helper::<u16>()
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_zero_length_accesses() {
#[derive(Default, Clone, Copy)]
#[repr(C)]
struct ZeroSizedStruct {
dummy: [u32; 0],
}
unsafe impl ByteValued for ZeroSizedStruct {}
let addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
let obj = ZeroSizedStruct::default();
let mut image = make_image(0x80);
assert_eq!(mem.write(&[], addr).unwrap(), 0);
assert_eq!(mem.read(&mut [], addr).unwrap(), 0);
assert!(mem.write_slice(&[], addr).is_ok());
assert!(mem.read_slice(&mut [], addr).is_ok());
assert!(mem.write_obj(obj, addr).is_ok());
assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
assert_eq!(mem.read_from(addr, &mut Cursor::new(&image), 0).unwrap(), 0);
assert!(mem
.read_exact_from(addr, &mut Cursor::new(&image), 0)
.is_ok());
assert_eq!(
mem.write_to(addr, &mut Cursor::new(&mut image), 0).unwrap(),
0
);
assert!(mem
.write_all_to(addr, &mut Cursor::new(&mut image), 0)
.is_ok());
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_atomic_accesses() {
let addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
let bad_addr = addr.unchecked_add(0x1000);
crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
}
#[cfg(feature = "backend-mmap")]
#[cfg(target_os = "linux")]
#[test]
fn test_guest_memory_mmap_is_hugetlbfs() {
let addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
let r = mem.find_region(addr).unwrap();
assert_eq!(r.is_hugetlbfs(), None);
}
}