use std::cmp::min;
use std::error;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::PhantomData;
use std::mem::{align_of, size_of};
use std::ptr::copy;
use std::ptr::{read_volatile, write_volatile};
use std::result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::sync::atomic::Ordering;
use std::usize;
use crate::atomic_integer::AtomicInteger;
use crate::bitmap::{Bitmap, BitmapSlice, BS};
use crate::{AtomicAccess, ByteValued, Bytes};
use copy_slice_impl::copy_slice;
#[allow(missing_docs)]
#[derive(Debug)]
pub enum Error {
OutOfBounds { addr: usize },
Overflow { base: usize, offset: usize },
TooBig { nelements: usize, size: usize },
Misaligned { addr: usize, alignment: usize },
IOError(io::Error),
PartialBuffer { expected: usize, completed: usize },
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::OutOfBounds { addr } => write!(f, "address 0x{:x} is out of bounds", addr),
Error::Overflow { base, offset } => write!(
f,
"address 0x{:x} offset by 0x{:x} would overflow",
base, offset
),
Error::TooBig { nelements, size } => write!(
f,
"{:?} elements of size {:?} would overflow a usize",
nelements, size
),
Error::Misaligned { addr, alignment } => {
write!(f, "address 0x{:x} is not aligned to {:?}", addr, alignment)
}
Error::IOError(error) => write!(f, "{}", error),
Error::PartialBuffer {
expected,
completed,
} => write!(
f,
"only used {} bytes in {} long buffer",
completed, expected
),
}
}
}
impl error::Error for Error {}
pub type Result<T> = result::Result<T, Error>;
pub fn compute_offset(base: usize, offset: usize) -> Result<usize> {
match base.checked_add(offset) {
None => Err(Error::Overflow { base, offset }),
Some(m) => Ok(m),
}
}
pub trait VolatileMemory {
type B: Bitmap;
fn len(&self) -> usize;
fn is_empty(&self) -> bool {
self.len() == 0
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<BS<Self::B>>>;
fn as_volatile_slice(&self) -> VolatileSlice<BS<Self::B>> {
self.get_slice(0, self.len()).unwrap()
}
fn get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<T, BS<Self::B>>> {
let slice = self.get_slice(offset, size_of::<T>())?;
unsafe { Ok(VolatileRef::with_bitmap(slice.addr, slice.bitmap)) }
}
fn get_array_ref<T: ByteValued>(
&self,
offset: usize,
n: usize,
) -> Result<VolatileArrayRef<T, BS<Self::B>>> {
let nbytes = isize::try_from(n)
.ok()
.and_then(|n| n.checked_mul(size_of::<T>() as isize))
.ok_or(Error::TooBig {
nelements: n,
size: size_of::<T>(),
})?;
let slice = self.get_slice(offset, nbytes as usize)?;
unsafe { Ok(VolatileArrayRef::with_bitmap(slice.addr, n, slice.bitmap)) }
}
unsafe fn aligned_as_ref<T: ByteValued>(&self, offset: usize) -> Result<&T> {
let slice = self.get_slice(offset, size_of::<T>())?;
slice.check_alignment(align_of::<T>())?;
Ok(&*(slice.addr as *const T))
}
unsafe fn aligned_as_mut<T: ByteValued>(&self, offset: usize) -> Result<&mut T> {
let slice = self.get_slice(offset, size_of::<T>())?;
slice.check_alignment(align_of::<T>())?;
Ok(&mut *(slice.addr as *mut T))
}
fn get_atomic_ref<T: AtomicInteger>(&self, offset: usize) -> Result<&T> {
let slice = self.get_slice(offset, size_of::<T>())?;
slice.check_alignment(align_of::<T>())?;
unsafe { Ok(&*(slice.addr as *const T)) }
}
fn compute_end_offset(&self, base: usize, offset: usize) -> Result<usize> {
let mem_end = compute_offset(base, offset)?;
if mem_end > self.len() {
return Err(Error::OutOfBounds { addr: mem_end });
}
Ok(mem_end)
}
}
impl<'a> VolatileMemory for &'a mut [u8] {
type B = ();
fn len(&self) -> usize {
<[u8]>::len(self)
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<()>> {
let _ = self.compute_end_offset(offset, count)?;
unsafe {
Ok(VolatileSlice::new(
(self.as_ptr() as usize + offset) as *mut _,
count,
))
}
}
}
#[repr(C, packed)]
struct Packed<T>(T);
#[derive(Clone, Copy, Debug)]
pub struct VolatileSlice<'a, B = ()> {
addr: *mut u8,
size: usize,
bitmap: B,
phantom: PhantomData<&'a u8>,
}
impl<'a> VolatileSlice<'a, ()> {
pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
Self::with_bitmap(addr, size, ())
}
}
impl<'a, B: BitmapSlice> VolatileSlice<'a, B> {
pub unsafe fn with_bitmap(addr: *mut u8, size: usize, bitmap: B) -> VolatileSlice<'a, B> {
VolatileSlice {
addr,
size,
bitmap,
phantom: PhantomData,
}
}
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
pub fn len(&self) -> usize {
self.size
}
pub fn is_empty(&self) -> bool {
self.size == 0
}
pub fn bitmap(&self) -> &B {
&self.bitmap
}
pub fn split_at(&self, mid: usize) -> Result<(Self, Self)> {
let end = self.offset(mid)?;
let start = unsafe { VolatileSlice::with_bitmap(self.addr, mid, self.bitmap.clone()) };
Ok((start, end))
}
pub fn subslice(&self, offset: usize, count: usize) -> Result<Self> {
let mem_end = compute_offset(offset, count)?;
if mem_end > self.len() {
return Err(Error::OutOfBounds { addr: mem_end });
}
unsafe {
Ok(VolatileSlice::with_bitmap(
(self.as_ptr() as usize + offset) as *mut u8,
count,
self.bitmap.slice_at(offset),
))
}
}
pub fn offset(&self, count: usize) -> Result<VolatileSlice<'a, B>> {
let new_addr = (self.addr as usize)
.checked_add(count)
.ok_or(Error::Overflow {
base: self.addr as usize,
offset: count,
})?;
let new_size = self
.size
.checked_sub(count)
.ok_or(Error::OutOfBounds { addr: new_addr })?;
unsafe {
Ok(VolatileSlice::with_bitmap(
new_addr as *mut u8,
new_size,
self.bitmap.slice_at(count),
))
}
}
pub fn copy_to<T>(&self, buf: &mut [T]) -> usize
where
T: ByteValued,
{
if size_of::<T>() == 1 {
let source = unsafe { self.as_slice() };
let dst = unsafe { from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, buf.len()) };
copy_slice(dst, source)
} else {
let count = self.size / size_of::<T>();
let source = self.get_array_ref::<T>(0, count).unwrap();
source.copy_to(buf)
}
}
pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
unsafe {
let count = min(self.size, slice.size);
copy(self.addr, slice.addr, count);
slice.bitmap.mark_dirty(0, count);
}
}
pub fn copy_from<T>(&self, buf: &[T])
where
T: ByteValued,
{
if size_of::<T>() == 1 {
let dst = unsafe { self.as_mut_slice() };
let src = unsafe { from_raw_parts(buf.as_ptr() as *const u8, buf.len()) };
let count = copy_slice(dst, src);
self.bitmap.mark_dirty(0, count * size_of::<T>());
} else {
let count = self.size / size_of::<T>();
let dest = self.get_array_ref::<T>(0, count).unwrap();
dest.copy_from(buf);
};
}
unsafe fn as_slice(&self) -> &[u8] {
from_raw_parts(self.addr, self.size)
}
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8] {
from_raw_parts_mut(self.addr, self.size)
}
fn check_alignment(&self, alignment: usize) -> Result<()> {
debug_assert!((alignment & (alignment - 1)) == 0);
if ((self.addr as usize) & (alignment - 1)) != 0 {
return Err(Error::Misaligned {
addr: self.addr as usize,
alignment,
});
}
Ok(())
}
}
impl<B: BitmapSlice> Bytes<usize> for VolatileSlice<'_, B> {
type E = Error;
fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
if buf.is_empty() {
return Ok(0);
}
if addr >= self.size {
return Err(Error::OutOfBounds { addr });
}
let slice = unsafe { self.as_mut_slice() }.split_at_mut(addr).1;
let count = copy_slice(slice, buf);
self.bitmap.mark_dirty(addr, count);
Ok(count)
}
fn read(&self, buf: &mut [u8], addr: usize) -> Result<usize> {
if buf.is_empty() {
return Ok(0);
}
if addr >= self.size {
return Err(Error::OutOfBounds { addr });
}
let slice = unsafe { self.as_slice() }.split_at(addr).1;
Ok(copy_slice(buf, slice))
}
fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> {
let len = self.write(buf, addr)?;
if len != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: len,
});
}
Ok(())
}
fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> {
let len = self.read(buf, addr)?;
if len != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: len,
});
}
Ok(())
}
fn read_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
let end = self.compute_end_offset(addr, count)?;
let bytes_read = unsafe {
let dst = &mut self.as_mut_slice()[addr..end];
loop {
match src.read(dst) {
Ok(n) => break n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(Error::IOError(e)),
}
}
};
self.bitmap.mark_dirty(addr, bytes_read);
Ok(bytes_read)
}
fn read_exact_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let end = self.compute_end_offset(addr, count)?;
let dst = unsafe { &mut self.as_mut_slice()[addr..end] };
let result = src.read_exact(dst).map_err(Error::IOError);
self.bitmap.mark_dirty(addr, count);
result
}
fn write_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
let src = &self.as_slice()[addr..end];
loop {
match dst.write(src) {
Ok(n) => break Ok(n),
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => break Err(Error::IOError(e)),
}
}
}
}
fn write_all_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
let src = &self.as_slice()[addr..end];
dst.write_all(src).map_err(Error::IOError)?;
}
Ok(())
}
fn store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()> {
self.get_atomic_ref::<T::A>(addr).map(|r| {
r.store(val.into(), order);
self.bitmap.mark_dirty(addr, size_of::<T>())
})
}
fn load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T> {
self.get_atomic_ref::<T::A>(addr)
.map(|r| r.load(order).into())
}
}
impl<B: BitmapSlice> VolatileMemory for VolatileSlice<'_, B> {
type B = B;
fn len(&self) -> usize {
self.size
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<B>> {
let _ = self.compute_end_offset(offset, count)?;
Ok(
unsafe {
VolatileSlice::with_bitmap(
(self.addr as usize + offset) as *mut u8,
count,
self.bitmap.slice_at(offset),
)
},
)
}
}
#[derive(Clone, Copy, Debug)]
pub struct VolatileRef<'a, T, B = ()> {
addr: *mut Packed<T>,
bitmap: B,
phantom: PhantomData<&'a T>,
}
impl<'a, T> VolatileRef<'a, T, ()>
where
T: ByteValued,
{
pub unsafe fn new(addr: *mut u8) -> Self {
Self::with_bitmap(addr, ())
}
}
#[allow(clippy::len_without_is_empty)]
impl<'a, T, B> VolatileRef<'a, T, B>
where
T: ByteValued,
B: BitmapSlice,
{
pub unsafe fn with_bitmap(addr: *mut u8, bitmap: B) -> Self {
VolatileRef {
addr: addr as *mut Packed<T>,
bitmap,
phantom: PhantomData,
}
}
pub fn as_ptr(&self) -> *mut u8 {
self.addr as *mut u8
}
pub fn len(&self) -> usize {
size_of::<T>()
}
pub fn bitmap(&self) -> &B {
&self.bitmap
}
#[inline(always)]
pub fn store(&self, v: T) {
unsafe { write_volatile(self.addr, Packed::<T>(v)) };
self.bitmap.mark_dirty(0, size_of::<T>())
}
#[inline(always)]
pub fn load(&self) -> T {
unsafe { read_volatile(self.addr).0 }
}
pub fn to_slice(&self) -> VolatileSlice<'a, B> {
unsafe {
VolatileSlice::with_bitmap(self.addr as *mut u8, size_of::<T>(), self.bitmap.clone())
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct VolatileArrayRef<'a, T, B = ()> {
addr: *mut u8,
nelem: usize,
bitmap: B,
phantom: PhantomData<&'a T>,
}
impl<'a, T> VolatileArrayRef<'a, T>
where
T: ByteValued,
{
pub unsafe fn new(addr: *mut u8, nelem: usize) -> Self {
Self::with_bitmap(addr, nelem, ())
}
}
impl<'a, T, B> VolatileArrayRef<'a, T, B>
where
T: ByteValued,
B: BitmapSlice,
{
pub unsafe fn with_bitmap(addr: *mut u8, nelem: usize, bitmap: B) -> Self {
VolatileArrayRef {
addr,
nelem,
bitmap,
phantom: PhantomData,
}
}
pub fn is_empty(&self) -> bool {
self.nelem == 0
}
pub fn len(&self) -> usize {
self.nelem
}
pub fn element_size(&self) -> usize {
size_of::<T>()
}
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
pub fn bitmap(&self) -> &B {
&self.bitmap
}
pub fn to_slice(&self) -> VolatileSlice<'a, B> {
unsafe {
VolatileSlice::with_bitmap(
self.addr,
self.nelem * self.element_size(),
self.bitmap.clone(),
)
}
}
pub fn ref_at(&self, index: usize) -> VolatileRef<'a, T, B> {
assert!(index < self.nelem);
unsafe {
let byteofs = (self.element_size() * index) as isize;
let ptr = self.as_ptr().offset(byteofs);
VolatileRef::with_bitmap(ptr, self.bitmap.slice_at(byteofs as usize))
}
}
pub fn load(&self, index: usize) -> T {
self.ref_at(index).load()
}
pub fn store(&self, index: usize, value: T) {
self.ref_at(index).store(value)
}
pub fn copy_to(&self, buf: &mut [T]) -> usize {
if size_of::<T>() == 1 {
let source = self.to_slice();
let src = unsafe { source.as_slice() };
let dst = unsafe { from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, buf.len()) };
return copy_slice(dst, src);
}
let mut addr = self.addr;
let mut i = 0;
for v in buf.iter_mut().take(self.len()) {
unsafe {
*v = read_volatile(addr as *const Packed<T>).0;
addr = addr.add(self.element_size());
};
i += 1;
}
i
}
pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
unsafe {
let count = min(self.len() * self.element_size(), slice.size);
copy(self.addr, slice.addr, count);
slice.bitmap.mark_dirty(0, count);
}
}
pub fn copy_from(&self, buf: &[T]) {
if size_of::<T>() == 1 {
let destination = self.to_slice();
let dst = unsafe { destination.as_mut_slice() };
let src = unsafe { from_raw_parts(buf.as_ptr() as *const u8, buf.len()) };
let count = copy_slice(dst, src);
self.bitmap.mark_dirty(0, count);
} else {
let mut addr = self.addr;
for &v in buf.iter().take(self.len()) {
unsafe {
write_volatile(addr as *mut Packed<T>, Packed::<T>(v));
addr = addr.add(self.element_size());
}
}
self.bitmap
.mark_dirty(0, addr as usize - self.addr as usize)
}
}
}
impl<'a, B: BitmapSlice> From<VolatileSlice<'a, B>> for VolatileArrayRef<'a, u8, B> {
fn from(slice: VolatileSlice<'a, B>) -> Self {
unsafe { VolatileArrayRef::with_bitmap(slice.as_ptr(), slice.len(), slice.bitmap) }
}
}
fn alignment(addr: usize) -> usize {
addr & (!addr + 1)
}
mod copy_slice_impl {
use super::*;
unsafe fn copy_single(align: usize, src_addr: usize, dst_addr: usize) {
match align {
8 => write_volatile(dst_addr as *mut u64, read_volatile(src_addr as *const u64)),
4 => write_volatile(dst_addr as *mut u32, read_volatile(src_addr as *const u32)),
2 => write_volatile(dst_addr as *mut u16, read_volatile(src_addr as *const u16)),
1 => write_volatile(dst_addr as *mut u8, read_volatile(src_addr as *const u8)),
_ => unreachable!(),
}
}
fn copy_slice_volatile(dst: &mut [u8], src: &[u8]) -> usize {
let total = min(src.len(), dst.len());
let mut left = total;
let mut src_addr = src.as_ptr() as usize;
let mut dst_addr = dst.as_ptr() as usize;
let align = min(alignment(src_addr), alignment(dst_addr));
let mut copy_aligned_slice = |min_align| {
while align >= min_align && left >= min_align {
unsafe { copy_single(min_align, src_addr, dst_addr) };
src_addr += min_align;
dst_addr += min_align;
left -= min_align;
}
};
if size_of::<usize>() > 4 {
copy_aligned_slice(8);
}
copy_aligned_slice(4);
copy_aligned_slice(2);
copy_aligned_slice(1);
total
}
pub(super) fn copy_slice(dst: &mut [u8], src: &[u8]) -> usize {
let total = min(src.len(), dst.len());
if total <= size_of::<usize>() {
copy_slice_volatile(dst, src);
} else {
dst[..total].copy_from_slice(&src[..total]);
}
total
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
use std::fs::File;
use std::mem::size_of_val;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Barrier};
use std::thread::spawn;
use matches::assert_matches;
use vmm_sys_util::tempfile::TempFile;
use crate::bitmap::tests::{
check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory,
};
use crate::bitmap::{AtomicBitmap, RefSlice};
#[derive(Clone)]
struct VecMem {
mem: Arc<[u8]>,
}
impl VecMem {
fn new(size: usize) -> VecMem {
VecMem {
mem: vec![0; size].into(),
}
}
}
impl VolatileMemory for VecMem {
type B = ();
fn len(&self) -> usize {
self.mem.len()
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<()>> {
let _ = self.compute_end_offset(offset, count)?;
Ok(unsafe {
VolatileSlice::new((self.mem.as_ptr() as usize + offset) as *mut _, count)
})
}
}
#[test]
fn test_display_error() {
assert_eq!(
format!("{}", Error::OutOfBounds { addr: 0x10 }),
"address 0x10 is out of bounds"
);
assert_eq!(
format!(
"{}",
Error::Overflow {
base: 0x0,
offset: 0x10
}
),
"address 0x0 offset by 0x10 would overflow"
);
assert_eq!(
format!(
"{}",
Error::TooBig {
nelements: 100_000,
size: 1_000_000_000
}
),
"100000 elements of size 1000000000 would overflow a usize"
);
assert_eq!(
format!(
"{}",
Error::Misaligned {
addr: 0x4,
alignment: 8
}
),
"address 0x4 is not aligned to 8"
);
assert_eq!(
format!(
"{}",
Error::PartialBuffer {
expected: 100,
completed: 90
}
),
"only used 90 bytes in 100 long buffer"
);
}
#[test]
fn misaligned_ref() {
let mut a = [0u8; 3];
let a_ref = &mut a[..];
unsafe {
assert!(
a_ref.aligned_as_ref::<u16>(0).is_err() ^ a_ref.aligned_as_ref::<u16>(1).is_err()
);
assert!(
a_ref.aligned_as_mut::<u16>(0).is_err() ^ a_ref.aligned_as_mut::<u16>(1).is_err()
);
}
}
#[test]
fn atomic_store() {
let mut a = [0usize; 1];
{
let a_ref = unsafe {
VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>())
};
let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
atomic.store(2usize, Ordering::Relaxed)
}
assert_eq!(a[0], 2);
}
#[test]
fn atomic_load() {
let mut a = [5usize; 1];
{
let a_ref = unsafe {
VolatileSlice::new(&mut a[0] as *mut usize as *mut u8,
size_of::<usize>())
};
let atomic = {
let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
assert_eq!(atomic.load(Ordering::Relaxed), 5usize);
atomic
};
atomic.load(Ordering::Relaxed);
} ;
}
#[test]
fn misaligned_atomic() {
let mut a = [5usize, 5usize];
let a_ref =
unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>()) };
assert!(a_ref.get_atomic_ref::<AtomicUsize>(0).is_ok());
assert!(a_ref.get_atomic_ref::<AtomicUsize>(1).is_err());
}
#[test]
fn ref_store() {
let mut a = [0u8; 1];
{
let a_ref = &mut a[..];
let v_ref = a_ref.get_ref(0).unwrap();
v_ref.store(2u8);
}
assert_eq!(a[0], 2);
}
#[test]
fn ref_load() {
let mut a = [5u8; 1];
{
let a_ref = &mut a[..];
let c = {
let v_ref = a_ref.get_ref::<u8>(0).unwrap();
assert_eq!(v_ref.load(), 5u8);
v_ref
};
c.load();
} ;
}
#[test]
fn ref_to_slice() {
let mut a = [1u8; 5];
let a_ref = &mut a[..];
let v_ref = a_ref.get_ref(1).unwrap();
v_ref.store(0x1234_5678u32);
let ref_slice = v_ref.to_slice();
assert_eq!(v_ref.as_ptr() as usize, ref_slice.as_ptr() as usize);
assert_eq!(v_ref.len(), ref_slice.len());
assert!(!ref_slice.is_empty());
}
#[test]
fn observe_mutate() {
let a = VecMem::new(1);
let a_clone = a.clone();
let v_ref = a.get_ref::<u8>(0).unwrap();
let barrier = Arc::new(Barrier::new(2));
let barrier1 = barrier.clone();
v_ref.store(99);
spawn(move || {
barrier1.wait();
let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
clone_v_ref.store(0);
barrier1.wait();
});
assert_eq!(v_ref.load(), 99);
barrier.wait();
barrier.wait();
assert_eq!(v_ref.load(), 0);
}
#[test]
fn mem_is_empty() {
let a = VecMem::new(100);
assert!(!a.is_empty());
let a = VecMem::new(0);
assert!(a.is_empty());
}
#[test]
fn slice_len() {
let mem = VecMem::new(100);
let slice = mem.get_slice(0, 27).unwrap();
assert_eq!(slice.len(), 27);
assert!(!slice.is_empty());
let slice = mem.get_slice(34, 27).unwrap();
assert_eq!(slice.len(), 27);
assert!(!slice.is_empty());
let slice = slice.get_slice(20, 5).unwrap();
assert_eq!(slice.len(), 5);
assert!(!slice.is_empty());
let slice = mem.get_slice(34, 0).unwrap();
assert!(slice.is_empty());
}
#[test]
fn slice_subslice() {
let mem = VecMem::new(100);
let slice = mem.get_slice(0, 100).unwrap();
assert!(slice.write(&[1; 80], 10).is_ok());
assert!(slice.subslice(0, 0).is_ok());
assert!(slice.subslice(0, 101).is_err());
assert!(slice.subslice(99, 0).is_ok());
assert!(slice.subslice(99, 1).is_ok());
assert!(slice.subslice(99, 2).is_err());
assert!(slice.subslice(100, 0).is_ok());
assert!(slice.subslice(100, 1).is_err());
assert!(slice.subslice(101, 0).is_err());
assert!(slice.subslice(101, 1).is_err());
assert!(slice.subslice(std::usize::MAX, 2).is_err());
assert!(slice.subslice(2, std::usize::MAX).is_err());
let maybe_offset_slice = slice.subslice(10, 80);
assert!(maybe_offset_slice.is_ok());
let offset_slice = maybe_offset_slice.unwrap();
assert_eq!(offset_slice.len(), 80);
let mut buf = [0; 80];
assert!(offset_slice.read(&mut buf, 0).is_ok());
assert_eq!(&buf[0..80], &[1; 80][0..80]);
}
#[test]
fn slice_offset() {
let mem = VecMem::new(100);
let slice = mem.get_slice(0, 100).unwrap();
assert!(slice.write(&[1; 80], 10).is_ok());
assert!(slice.offset(101).is_err());
let maybe_offset_slice = slice.offset(10);
assert!(maybe_offset_slice.is_ok());
let offset_slice = maybe_offset_slice.unwrap();
assert_eq!(offset_slice.len(), 90);
let mut buf = [0; 90];
assert!(offset_slice.read(&mut buf, 0).is_ok());
assert_eq!(&buf[0..80], &[1; 80][0..80]);
assert_eq!(&buf[80..90], &[0; 10][0..10]);
}
#[test]
fn slice_copy_to_u8() {
let mut a = [2u8, 4, 6, 8, 10];
let mut b = [0u8; 4];
let mut c = [0u8; 6];
let a_ref = &mut a[..];
let v_ref = a_ref.get_slice(0, a_ref.len()).unwrap();
v_ref.copy_to(&mut b[..]);
v_ref.copy_to(&mut c[..]);
assert_eq!(b[0..4], a_ref[0..4]);
assert_eq!(c[0..5], a_ref[0..5]);
}
#[test]
fn slice_copy_to_u16() {
let mut a = [0x01u16, 0x2, 0x03, 0x4, 0x5];
let mut b = [0u16; 4];
let mut c = [0u16; 6];
let a_ref = &mut a[..];
let v_ref = unsafe { VolatileSlice::new(a_ref.as_mut_ptr() as *mut u8, 9) };
v_ref.copy_to(&mut b[..]);
v_ref.copy_to(&mut c[..]);
assert_eq!(b[0..4], a_ref[0..4]);
assert_eq!(c[0..4], a_ref[0..4]);
assert_eq!(c[4], 0);
}
#[test]
fn slice_copy_from_u8() {
let a = [2u8, 4, 6, 8, 10];
let mut b = [0u8; 4];
let mut c = [0u8; 6];
let b_ref = &mut b[..];
let v_ref = b_ref.get_slice(0, b_ref.len()).unwrap();
v_ref.copy_from(&a[..]);
assert_eq!(b_ref[0..4], a[0..4]);
let c_ref = &mut c[..];
let v_ref = c_ref.get_slice(0, c_ref.len()).unwrap();
v_ref.copy_from(&a[..]);
assert_eq!(c_ref[0..5], a[0..5]);
}
#[test]
fn slice_copy_from_u16() {
let a = [2u16, 4, 6, 8, 10];
let mut b = [0u16; 4];
let mut c = [0u16; 6];
let b_ref = &mut b[..];
let v_ref = unsafe { VolatileSlice::new(b_ref.as_mut_ptr() as *mut u8, 8) };
v_ref.copy_from(&a[..]);
assert_eq!(b_ref[0..4], a[0..4]);
let c_ref = &mut c[..];
let v_ref = unsafe { VolatileSlice::new(c_ref.as_mut_ptr() as *mut u8, 9) };
v_ref.copy_from(&a[..]);
assert_eq!(c_ref[0..4], a[0..4]);
assert_eq!(c_ref[4], 0);
}
#[test]
fn slice_copy_to_volatile_slice() {
let mut a = [2u8, 4, 6, 8, 10];
let a_ref = &mut a[..];
let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
let mut b = [0u8; 4];
let b_ref = &mut b[..];
let b_slice = b_ref.get_slice(0, b_ref.len()).unwrap();
a_slice.copy_to_volatile_slice(b_slice);
assert_eq!(b, [2, 4, 6, 8]);
}
#[test]
fn slice_overflow_error() {
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_slice(MAX, 1).unwrap_err();
assert_matches!(
res,
Error::Overflow {
base: MAX,
offset: 1,
}
);
}
#[test]
fn slice_oob_error() {
let a = VecMem::new(100);
a.get_slice(50, 50).unwrap();
let res = a.get_slice(55, 50).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 105 });
}
#[test]
fn ref_overflow_error() {
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_ref::<u8>(MAX).unwrap_err();
assert_matches!(
res,
Error::Overflow {
base: MAX,
offset: 1,
}
);
}
#[test]
fn ref_oob_error() {
let a = VecMem::new(100);
a.get_ref::<u8>(99).unwrap();
let res = a.get_ref::<u16>(99).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 101 });
}
#[test]
fn ref_oob_too_large() {
let a = VecMem::new(3);
let res = a.get_ref::<u32>(0).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 4 });
}
#[test]
fn slice_store() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let r = a.get_ref(2).unwrap();
r.store(9u16);
assert_eq!(s.read_obj::<u16>(2).unwrap(), 9);
}
#[test]
fn test_write_past_end() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let res = s.write(&[1, 2, 3, 4, 5, 6], 0);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 5);
}
#[test]
fn slice_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let sample_buf = [1, 2, 3];
assert!(s.write(&sample_buf, 5).is_err());
assert!(s.write(&sample_buf, 2).is_ok());
let mut buf = [0u8; 3];
assert!(s.read(&mut buf, 5).is_err());
assert!(s.read_slice(&mut buf, 2).is_ok());
assert_eq!(buf, sample_buf);
assert_eq!(s.write(&[], 100).unwrap(), 0);
let buf: &mut [u8] = &mut [];
assert_eq!(s.read(buf, 4).unwrap(), 0);
let empty_mem = VecMem::new(0);
let empty = empty_mem.as_volatile_slice();
assert_eq!(empty.write(&[], 1).unwrap(), 0);
assert_eq!(empty.read(buf, 1).unwrap(), 0);
}
#[test]
fn obj_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
assert!(s.write_obj(55u16, 4).is_err());
assert!(s.write_obj(55u16, core::usize::MAX).is_err());
assert!(s.write_obj(55u16, 2).is_ok());
assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
assert!(s.read_obj::<u16>(4).is_err());
assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
}
#[test]
fn mem_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
assert!(s.write_obj(!0u32, 1).is_ok());
let mut file = if cfg!(unix) {
File::open(Path::new("/dev/zero")).unwrap()
} else {
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
assert!(s
.read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
.is_err());
assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
let mut f = TempFile::new().unwrap().into_file();
assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
let value = s.read_obj::<u32>(1).unwrap();
if cfg!(unix) {
assert_eq!(value, 0);
} else {
assert_eq!(value, 0x0090_5a4d);
}
let mut sink = Vec::new();
assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
assert!(s
.write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
.is_err());
format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
if cfg!(unix) {
assert_eq!(sink, vec![0; size_of::<u32>()]);
} else {
assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
};
}
#[test]
fn unaligned_read_and_write() {
let a = VecMem::new(7);
let s = a.as_volatile_slice();
let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4];
assert!(s.write_slice(&sample_buf, 0).is_ok());
let r = a.get_ref::<u32>(2).unwrap();
assert_eq!(r.load(), 0xAAAA_AAAA);
r.store(0x5555_5555);
let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4];
let mut buf: [u8; 7] = Default::default();
assert!(s.read_slice(&mut buf, 0).is_ok());
assert_eq!(buf, sample_buf);
}
#[test]
fn ref_array_from_slice() {
let mut a = [2, 4, 6, 8, 10];
let a_vec = a.to_vec();
let a_ref = &mut a[..];
let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
let a_array_ref: VolatileArrayRef<u8, ()> = a_slice.into();
for (i, entry) in a_vec.iter().enumerate() {
assert_eq!(&a_array_ref.load(i), entry);
}
}
#[test]
fn ref_array_store() {
let mut a = [0u8; 5];
{
let a_ref = &mut a[..];
let v_ref = a_ref.get_array_ref(1, 4).unwrap();
v_ref.store(1, 2u8);
v_ref.store(2, 4u8);
v_ref.store(3, 6u8);
}
let expected = [2u8, 4u8, 6u8];
assert_eq!(a[2..=4], expected);
}
#[test]
fn ref_array_load() {
let mut a = [0, 0, 2, 3, 10];
{
let a_ref = &mut a[..];
let c = {
let v_ref = a_ref.get_array_ref::<u8>(1, 4).unwrap();
assert_eq!(v_ref.load(1), 2u8);
assert_eq!(v_ref.load(2), 3u8);
assert_eq!(v_ref.load(3), 10u8);
v_ref
};
c.load(0);
} ;
}
#[test]
fn ref_array_overflow() {
let mut a = [0, 0, 2, 3, 10];
let a_ref = &mut a[..];
let res = a_ref.get_array_ref::<u32>(4, usize::MAX).unwrap_err();
assert_matches!(
res,
Error::TooBig {
nelements: usize::MAX,
size: 4,
}
);
}
#[test]
fn alignment() {
let a = [0u8; 64];
let a = &a[a.as_ptr().align_offset(32)] as *const u8 as usize;
assert!(super::alignment(a) >= 32);
assert_eq!(super::alignment(a + 9), 1);
assert_eq!(super::alignment(a + 30), 2);
assert_eq!(super::alignment(a + 12), 4);
assert_eq!(super::alignment(a + 8), 8);
}
#[test]
fn test_atomic_accesses() {
let a = VecMem::new(0x1000);
let s = a.as_volatile_slice();
crate::bytes::tests::check_atomic_accesses(s, 0, 0x1000);
}
#[test]
fn split_at() {
let mut mem = [0u8; 32];
let mem_ref = &mut mem[..];
let vslice = mem_ref.get_slice(0, 32).unwrap();
let (start, end) = vslice.split_at(8).unwrap();
assert_eq!(start.len(), 8);
assert_eq!(end.len(), 24);
let (start, end) = vslice.split_at(0).unwrap();
assert_eq!(start.len(), 0);
assert_eq!(end.len(), 32);
let (start, end) = vslice.split_at(31).unwrap();
assert_eq!(start.len(), 31);
assert_eq!(end.len(), 1);
let (start, end) = vslice.split_at(32).unwrap();
assert_eq!(start.len(), 32);
assert_eq!(end.len(), 0);
let err = vslice.split_at(33).unwrap_err();
assert_matches!(err, Error::OutOfBounds { addr: _ })
}
#[test]
fn test_volatile_slice_dirty_tracking() {
let val = 123u64;
let dirty_offset = 0x1000;
let dirty_len = size_of_val(&val);
let page_size = 0x1000;
let mut buf = vec![0u8; 0x10000];
{
let bitmap = AtomicBitmap::new(buf.len(), page_size);
let slice = unsafe {
VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap.slice_at(0))
};
test_bytes(
&slice,
|s: &VolatileSlice<RefSlice<AtomicBitmap>>,
start: usize,
len: usize,
clean: bool| { check_range(s.bitmap(), start, len, clean) },
|offset| offset,
0x1000,
);
}
{
let bitmap = AtomicBitmap::new(buf.len(), page_size);
let slice = unsafe {
VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap.slice_at(0))
};
test_volatile_memory(&slice);
}
let bitmap = AtomicBitmap::new(buf.len(), page_size);
let slice =
unsafe { VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap.slice_at(0)) };
let bitmap2 = AtomicBitmap::new(buf.len(), page_size);
let slice2 =
unsafe { VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap2.slice_at(0)) };
let bitmap3 = AtomicBitmap::new(buf.len(), page_size);
let slice3 =
unsafe { VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap3.slice_at(0)) };
assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
assert!(range_is_clean(slice2.bitmap(), 0, slice2.len()));
slice.write_obj(val, dirty_offset).unwrap();
assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
slice.copy_to_volatile_slice(slice2);
assert!(range_is_dirty(slice2.bitmap(), 0, slice2.len()));
{
let (s1, s2) = slice.split_at(dirty_offset).unwrap();
assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
}
{
let s = slice.subslice(dirty_offset, dirty_len).unwrap();
assert!(range_is_dirty(s.bitmap(), 0, s.len()));
}
{
let s = slice.offset(dirty_offset).unwrap();
assert!(range_is_dirty(s.bitmap(), 0, dirty_len));
}
{
let buf = vec![1u8; dirty_offset];
assert!(range_is_clean(slice.bitmap(), 0, dirty_offset));
slice.copy_from(&buf);
assert!(range_is_dirty(slice.bitmap(), 0, dirty_offset));
}
{
let val = 1u32;
let buf = vec![val; dirty_offset / size_of_val(&val)];
assert!(range_is_clean(slice3.bitmap(), 0, dirty_offset));
slice3.copy_from(&buf);
assert!(range_is_dirty(slice3.bitmap(), 0, dirty_offset));
}
}
#[test]
fn test_volatile_ref_dirty_tracking() {
let val = 123u64;
let mut buf = vec![val];
let page_size = 0x1000;
let bitmap = AtomicBitmap::new(size_of_val(&val), page_size);
let vref =
unsafe { VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0)) };
assert!(range_is_clean(vref.bitmap(), 0, vref.len()));
vref.store(val);
assert!(range_is_dirty(vref.bitmap(), 0, vref.len()));
}
fn test_volatile_array_ref_copy_from_tracking<T>(buf: &mut [T], index: usize, page_size: usize)
where
T: ByteValued + From<u8>,
{
let bitmap = AtomicBitmap::new(buf.len() * size_of::<T>(), page_size);
let arr = unsafe {
VolatileArrayRef::with_bitmap(
buf.as_mut_ptr() as *mut u8,
index + 1,
bitmap.slice_at(0),
)
};
let val = T::from(123);
let copy_buf = vec![val; index + 1];
assert!(range_is_clean(arr.bitmap(), 0, arr.len() * size_of::<T>()));
arr.copy_from(copy_buf.as_slice());
assert!(range_is_dirty(arr.bitmap(), 0, buf.len() * size_of::<T>()));
}
#[test]
fn test_volatile_array_ref_dirty_tracking() {
let val = 123u64;
let dirty_len = size_of_val(&val);
let index = 0x1000;
let dirty_offset = dirty_len * index;
let page_size = 0x1000;
let mut buf = vec![0u64; index + 1];
let mut byte_buf = vec![0u8; index + 1];
{
let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
let arr = unsafe {
VolatileArrayRef::with_bitmap(
buf.as_mut_ptr() as *mut u8,
index + 1,
bitmap.slice_at(0),
)
};
assert!(range_is_clean(arr.bitmap(), 0, arr.len() * dirty_len));
arr.ref_at(index).store(val);
assert!(range_is_dirty(arr.bitmap(), dirty_offset, dirty_len));
}
{
let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
let arr = unsafe {
VolatileArrayRef::with_bitmap(
buf.as_mut_ptr() as *mut u8,
index + 1,
bitmap.slice_at(0),
)
};
let slice = arr.to_slice();
assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
arr.store(index, val);
assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
}
test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, page_size);
test_volatile_array_ref_copy_from_tracking(&mut buf, index, page_size);
}
}