use crate::bitmap::{self, Bitmap};
use crate::guest_memory::{
Error as GuestMemoryError, GuestMemoryBackendSliceIterator, GuestMemorySliceIterator,
Result as GuestMemoryResult,
};
use crate::{
Address, GuestAddress, GuestMemory, GuestMemoryBackend, GuestMemoryRegion, GuestUsize,
Permissions, VolatileSlice,
};
use rangemap::RangeMap;
use std::cmp;
use std::fmt::{self, Debug};
use std::iter::FusedIterator;
use std::num::Wrapping;
use std::ops::{Deref, Range};
use std::sync::Arc;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(
"Cannot translate I/O virtual address range {:#x}+{}: {reason}",
iova_range.base.0,
iova_range.length,
)]
CannotResolve {
iova_range: IovaRange,
reason: String,
},
#[error(
"Expected {:#x}+{} to be a continuous I/O virtual address range, but only {continuous_length} bytes are",
iova_range.base.0,
iova_range.length,
)]
Fragmented {
iova_range: IovaRange,
continuous_length: usize,
},
#[error("IOMMU not configured correctly, cannot operate: {reason}")]
IommuMisconfigured {
reason: String,
},
}
pub trait Iommu: Debug + Send + Sync {
type IotlbGuard<'a>: Deref<Target = Iotlb> + 'a
where
Self: 'a;
fn translate(
&self,
iova: GuestAddress,
length: usize,
access: Permissions,
) -> Result<IotlbIterator<Self::IotlbGuard<'_>>, Error>;
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct IommuMapping {
target_source_diff: Wrapping<u64>,
permissions: Permissions,
}
#[derive(Debug, Default)]
pub struct Iotlb {
tlb: RangeMap<u64, IommuMapping>,
}
#[derive(Clone, Debug)]
pub struct IotlbIterator<D: Deref<Target = Iotlb>> {
iotlb: D,
range: Range<u64>,
access: Permissions,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct IovaRange {
pub base: GuestAddress,
pub length: usize,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct MappedRange {
pub base: GuestAddress,
pub length: usize,
}
#[derive(Clone, Debug)]
pub struct IotlbFails {
pub misses: Vec<IovaRange>,
pub access_fails: Vec<IovaRange>,
}
pub struct IommuMemory<M: GuestMemoryBackend, I: Iommu> {
backend: M,
iommu: Arc<I>,
use_iommu: bool,
bitmap: Arc<<M::R as GuestMemoryRegion>::B>,
}
impl IommuMapping {
fn new(source_base: u64, target_base: u64, permissions: Permissions) -> Self {
IommuMapping {
target_source_diff: Wrapping(target_base) - Wrapping(source_base),
permissions,
}
}
fn map(&self, iova: u64) -> u64 {
(Wrapping(iova) + self.target_source_diff).0
}
fn permissions(&self) -> Permissions {
self.permissions
}
}
impl Iotlb {
pub fn new() -> Self {
Default::default()
}
pub fn set_mapping(
&mut self,
iova: GuestAddress,
map_to: GuestAddress,
length: usize,
perm: Permissions,
) -> Result<(), Error> {
let mapping = IommuMapping::new(iova.0, map_to.0, perm);
self.tlb.insert(iova.0..(iova.0 + length as u64), mapping);
Ok(())
}
pub fn invalidate_mapping(&mut self, iova: GuestAddress, length: usize) {
self.tlb.remove(iova.0..(iova.0 + length as u64));
}
pub fn invalidate_all(&mut self) {
self.tlb.clear();
}
pub fn lookup<D: Deref<Target = Iotlb>>(
this: D,
iova: GuestAddress,
length: usize,
access: Permissions,
) -> Result<IotlbIterator<D>, IotlbFails> {
let full_range = iova.0..(iova.0 + length as u64);
let has_misses = this.tlb.gaps(&full_range).any(|_| true);
let has_access_fails = this
.tlb
.overlapping(full_range.clone())
.any(|(_, mapping)| !mapping.permissions().allow(access));
if has_misses || has_access_fails {
let misses = this
.tlb
.gaps(&full_range)
.map(|range| {
debug_assert!(range.start >= full_range.start && range.end <= full_range.end);
range.try_into().unwrap()
})
.collect::<Vec<_>>();
let access_fails = this
.tlb
.overlapping(full_range.clone())
.filter(|(_, mapping)| !mapping.permissions().allow(access))
.map(|(range, _)| {
let start = cmp::max(range.start, full_range.start);
let end = cmp::min(range.end, full_range.end);
(start..end).try_into().unwrap()
})
.collect::<Vec<_>>();
return Err(IotlbFails {
misses,
access_fails,
});
}
Ok(IotlbIterator {
iotlb: this,
range: full_range,
access,
})
}
}
impl<D: Deref<Target = Iotlb>> Iterator for IotlbIterator<D> {
type Item = MappedRange;
fn next(&mut self) -> Option<Self::Item> {
if self.range.is_empty() {
return None;
}
let (range, mapping) = self.iotlb.tlb.get_key_value(&self.range.start).unwrap();
assert!(mapping.permissions().allow(self.access));
let mapping_iova_start = self.range.start;
let mapping_iova_end = cmp::min(self.range.end, range.end);
let mapping_len = mapping_iova_end - mapping_iova_start;
self.range.start = mapping_iova_end;
Some(MappedRange {
base: GuestAddress(mapping.map(mapping_iova_start)),
length: mapping_len.try_into().unwrap(),
})
}
}
impl TryFrom<Range<u64>> for IovaRange {
type Error = <u64 as TryFrom<usize>>::Error;
fn try_from(range: Range<u64>) -> Result<Self, Self::Error> {
Ok(IovaRange {
base: GuestAddress(range.start),
length: (range.end - range.start).try_into()?,
})
}
}
impl<M: GuestMemoryBackend, I: Iommu> IommuMemory<M, I> {
pub fn new(
backend: M,
iommu: I,
use_iommu: bool,
bitmap: <Self as GuestMemory>::Bitmap,
) -> Self {
IommuMemory {
backend,
iommu: Arc::new(iommu),
use_iommu,
bitmap: Arc::new(bitmap),
}
}
pub fn with_replaced_backend(&self, new_backend: M) -> Self {
IommuMemory {
backend: new_backend,
iommu: Arc::clone(&self.iommu),
use_iommu: self.use_iommu,
bitmap: Arc::clone(&self.bitmap),
}
}
pub fn bitmap(&self) -> &Arc<<Self as GuestMemory>::Bitmap> {
&self.bitmap
}
pub fn set_iommu_enabled(&mut self, enabled: bool) {
self.use_iommu = enabled;
}
pub fn get_iommu_enabled(&self) -> bool {
self.use_iommu
}
pub fn iommu(&self) -> &Arc<I> {
&self.iommu
}
pub fn get_backend(&self) -> &M {
&self.backend
}
}
impl<M: GuestMemoryBackend + Clone, I: Iommu> Clone for IommuMemory<M, I> {
fn clone(&self) -> Self {
IommuMemory {
backend: self.backend.clone(),
iommu: Arc::clone(&self.iommu),
use_iommu: self.use_iommu,
bitmap: Arc::clone(&self.bitmap),
}
}
}
impl<M: GuestMemoryBackend + Debug, I: Iommu> Debug for IommuMemory<M, I>
where
<M::R as GuestMemoryRegion>::B: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IommuMemory")
.field("backend", &self.backend)
.field("iommu", &self.iommu)
.field("use_iommu", &self.use_iommu)
.field("bitmap", &self.bitmap)
.finish()
}
}
impl<M: GuestMemoryBackend + Default, I: Iommu + Default> Default for IommuMemory<M, I>
where
<M::R as GuestMemoryRegion>::B: Default,
{
fn default() -> Self {
IommuMemory {
backend: Default::default(),
iommu: Default::default(),
use_iommu: Default::default(),
bitmap: Default::default(),
}
}
}
impl<M: GuestMemoryBackend, I: Iommu> GuestMemory for IommuMemory<M, I> {
type PhysicalMemory = M;
type Bitmap = <M::R as GuestMemoryRegion>::B;
fn check_range(&self, addr: GuestAddress, count: usize, access: Permissions) -> bool {
if !self.use_iommu {
return self.backend.check_range(addr, count);
}
let Ok(mut translated_iter) = self.iommu.translate(addr, count, access) else {
return false;
};
translated_iter
.all(|translated| self.backend.check_range(translated.base, translated.length))
}
fn get_slices<'a>(
&'a self,
addr: GuestAddress,
count: usize,
access: Permissions,
) -> GuestMemoryResult<impl GuestMemorySliceIterator<'a, bitmap::BS<'a, Self::Bitmap>>> {
if self.use_iommu {
IommuMemorySliceIterator::virt(self, addr, count, access)
.map_err(GuestMemoryError::IommuError)
} else {
Ok(IommuMemorySliceIterator::phys(self, addr, count))
}
}
fn physical_memory(&self) -> Option<&Self::PhysicalMemory> {
if self.use_iommu {
None
} else {
Some(&self.backend)
}
}
}
pub struct IommuMemorySliceIterator<'a, M: GuestMemoryBackend, I: Iommu + 'a> {
iova: GuestAddress,
bitmap: Option<&'a <M::R as GuestMemoryRegion>::B>,
phys_mem: &'a M,
translation: Option<IotlbIterator<I::IotlbGuard<'a>>>,
current_translated_iter: Option<GuestMemoryBackendSliceIterator<'a, M>>,
}
impl<'a, M: GuestMemoryBackend, I: Iommu> IommuMemorySliceIterator<'a, M, I> {
fn phys(mem: &'a IommuMemory<M, I>, addr: GuestAddress, count: usize) -> Self {
IommuMemorySliceIterator {
iova: addr,
bitmap: None,
phys_mem: &mem.backend,
translation: None,
current_translated_iter: Some(mem.backend.get_slices(addr, count)),
}
}
fn virt(
mem: &'a IommuMemory<M, I>,
addr: GuestAddress,
count: usize,
access: Permissions,
) -> Result<Self, Error> {
let translation = mem.iommu.translate(addr, count, access)?;
Ok(IommuMemorySliceIterator {
iova: addr,
bitmap: Some(mem.bitmap.as_ref()),
phys_mem: &mem.backend,
translation: Some(translation),
current_translated_iter: None,
})
}
unsafe fn do_next(
&mut self,
) -> Option<GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>> {
loop {
if let Some(item) = self
.current_translated_iter
.as_mut()
.and_then(|iter| iter.next())
{
let mut item = match item {
Ok(item) => item,
Err(err) => return Some(Err(err)),
};
if let Some(bitmap) = self.bitmap.as_ref() {
let bitmap_slice = bitmap.slice_at(self.iova.0 as usize);
item = item.replace_bitmap(bitmap_slice);
}
self.iova = match self.iova.overflowing_add(item.len() as GuestUsize) {
(x @ GuestAddress(0), _) | (x, false) => x,
(_, true) => return Some(Err(GuestMemoryError::GuestAddressOverflow)),
};
return Some(Ok(item));
}
let next_mapping = self.translation.as_mut()?.next()?;
self.current_translated_iter = Some(
self.phys_mem
.get_slices(next_mapping.base, next_mapping.length),
);
}
}
}
impl<'a, M: GuestMemoryBackend, I: Iommu> Iterator for IommuMemorySliceIterator<'a, M, I> {
type Item = GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>;
fn next(&mut self) -> Option<Self::Item> {
match unsafe { self.do_next() } {
Some(Ok(slice)) => Some(Ok(slice)),
other => {
self.current_translated_iter.take();
self.translation.take();
other
}
}
}
}
impl<M: GuestMemoryBackend, I: Iommu> FusedIterator for IommuMemorySliceIterator<'_, M, I> {}
impl<'a, M: GuestMemoryBackend, I: Iommu> GuestMemorySliceIterator<'a, bitmap::MS<'a, M>>
for IommuMemorySliceIterator<'a, M, I>
{
}
impl<'a, M: GuestMemoryBackend + Debug, I: Iommu> Debug for IommuMemorySliceIterator<'a, M, I>
where
I::IotlbGuard<'a>: Debug,
<M::R as GuestMemoryRegion>::B: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IommuMemorySliceIterator")
.field("iova", &self.iova)
.field("bitmap", &self.bitmap)
.field("phys_mem", &self.phys_mem)
.field("translation", &self.translation)
.field("current_translated_iter", &self.current_translated_iter)
.finish()
}
}
#[cfg(test)]
mod tests {
#[cfg(feature = "backend-mmap")]
use super::IotlbIterator;
use super::{Error, IovaRange, MappedRange};
#[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
use crate::bitmap::AtomicBitmap;
#[cfg(feature = "backend-mmap")]
use crate::bitmap::NewBitmap;
#[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
use crate::GuestMemoryRegion;
#[cfg(feature = "backend-mmap")]
use crate::{
Bytes, GuestMemory, GuestMemoryError, GuestMemoryMmap, GuestMemoryResult, Iommu,
IommuMemory,
};
use crate::{GuestAddress, Iotlb, Permissions};
use std::fmt::Debug;
#[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
use std::num::NonZeroUsize;
use std::ops::Deref;
#[cfg(feature = "backend-mmap")]
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
#[cfg(feature = "backend-mmap")]
use std::sync::{RwLock, RwLockReadGuard};
#[derive(Debug)]
#[cfg(feature = "backend-mmap")]
struct SimpleIommu {
iotlb: RwLock<Iotlb>,
fail_base: AtomicU64,
fail_len: AtomicUsize,
fail_was_miss: AtomicBool,
next_map_to: AtomicU64,
}
#[cfg(feature = "backend-mmap")]
impl SimpleIommu {
fn new() -> Self {
SimpleIommu {
iotlb: Iotlb::new().into(),
fail_base: 0.into(),
fail_len: 0.into(),
fail_was_miss: false.into(),
next_map_to: 0.into(),
}
}
fn expect_mapping_request(&self, to_phys: GuestAddress) {
self.fail_base.store(0, Ordering::Relaxed);
self.fail_len.store(0, Ordering::Relaxed);
self.next_map_to.store(to_phys.0, Ordering::Relaxed);
}
fn verify_mapping_request(&self, virt: GuestAddress, len: usize, was_miss: bool) {
assert_eq!(self.fail_base.load(Ordering::Relaxed), virt.0);
assert_eq!(self.fail_len.load(Ordering::Relaxed), len);
assert_eq!(self.fail_was_miss.load(Ordering::Relaxed), was_miss);
}
}
#[cfg(feature = "backend-mmap")]
impl Iommu for SimpleIommu {
type IotlbGuard<'a> = RwLockReadGuard<'a, Iotlb>;
fn translate(
&self,
iova: GuestAddress,
length: usize,
access: Permissions,
) -> Result<IotlbIterator<Self::IotlbGuard<'_>>, Error> {
loop {
let mut fails =
match Iotlb::lookup(self.iotlb.read().unwrap(), iova, length, access) {
Ok(success) => return Ok(success),
Err(fails) => fails,
};
let miss = !fails.misses.is_empty();
let fail = fails
.misses
.pop()
.or_else(|| fails.access_fails.pop())
.expect("No failure reported, even though a failure happened");
self.fail_base.store(fail.base.0, Ordering::Relaxed);
self.fail_len.store(fail.length, Ordering::Relaxed);
self.fail_was_miss.store(miss, Ordering::Relaxed);
if !fails.misses.is_empty() || !fails.access_fails.is_empty() {
return Err(Error::CannotResolve {
iova_range: IovaRange { base: iova, length },
reason: "This IOMMU can only handle one failure per access".into(),
});
}
let map_to = self.next_map_to.swap(0, Ordering::Relaxed);
if map_to == 0 {
return Err(Error::CannotResolve {
iova_range: IovaRange {
base: fail.base,
length: fail.length,
},
reason: "No mapping provided for failed range".into(),
});
}
self.iotlb.write().unwrap().set_mapping(
fail.base,
GuestAddress(map_to),
fail.length,
access,
)?;
}
}
}
fn verify_hit(
iotlb: impl Deref<Target = Iotlb> + Debug,
iova: GuestAddress,
length: usize,
permissions: Permissions,
expected: impl IntoIterator<Item = MappedRange>,
) {
let mut iter = Iotlb::lookup(iotlb, iova, length, permissions)
.inspect_err(|err| panic!("Unexpected lookup error {err:?}"))
.unwrap();
for e in expected {
assert_eq!(iter.next(), Some(e));
}
assert_eq!(iter.next(), None);
}
fn verify_fail(
iotlb: impl Deref<Target = Iotlb> + Debug,
iova: GuestAddress,
length: usize,
permissions: Permissions,
expected_misses: impl IntoIterator<Item = IovaRange>,
expected_access_fails: impl IntoIterator<Item = IovaRange>,
) {
let fails = Iotlb::lookup(iotlb, iova, length, permissions)
.inspect(|hits| panic!("Expected error on lookup, found {hits:?}"))
.unwrap_err();
let mut miss_iter = fails.misses.into_iter();
for e in expected_misses {
assert_eq!(miss_iter.next(), Some(e));
}
assert_eq!(miss_iter.next(), None);
let mut accf_iter = fails.access_fails.into_iter();
for e in expected_access_fails {
assert_eq!(accf_iter.next(), Some(e));
}
assert_eq!(accf_iter.next(), None);
}
#[test]
fn test_iotlb_merge() -> Result<(), Error> {
const IOVA: GuestAddress = GuestAddress(42);
const PHYS: GuestAddress = GuestAddress(87);
const LEN_1: usize = 123;
const LEN_2: usize = 234;
let mut iotlb = Iotlb::new();
iotlb.set_mapping(IOVA, PHYS, LEN_1, Permissions::ReadWrite)?;
iotlb.set_mapping(
GuestAddress(IOVA.0 + LEN_1 as u64),
GuestAddress(PHYS.0 + LEN_1 as u64),
LEN_2,
Permissions::ReadWrite,
)?;
verify_hit(
&iotlb,
IOVA,
LEN_1 + LEN_2,
Permissions::ReadWrite,
[MappedRange {
base: PHYS,
length: LEN_1 + LEN_2,
}],
);
verify_hit(
&iotlb,
GuestAddress(IOVA.0 + LEN_1 as u64 - 1),
2,
Permissions::ReadWrite,
[MappedRange {
base: GuestAddress(PHYS.0 + LEN_1 as u64 - 1),
length: 2,
}],
);
Ok(())
}
#[test]
fn test_iotlb_nomerge_same_phys() -> Result<(), Error> {
const IOVA: GuestAddress = GuestAddress(42);
const PHYS: GuestAddress = GuestAddress(87);
const LEN_1: usize = 123;
const LEN_2: usize = 234;
let mut iotlb = Iotlb::new();
iotlb.set_mapping(IOVA, PHYS, LEN_1, Permissions::ReadWrite)?;
iotlb.set_mapping(
GuestAddress(IOVA.0 + LEN_1 as u64),
PHYS,
LEN_2,
Permissions::ReadWrite,
)?;
verify_hit(
&iotlb,
IOVA,
LEN_1 + LEN_2,
Permissions::ReadWrite,
[
MappedRange {
base: PHYS,
length: LEN_1,
},
MappedRange {
base: PHYS,
length: LEN_2,
},
],
);
Ok(())
}
#[test]
fn test_iotlb_perms() -> Result<(), Error> {
const IOVA_R: GuestAddress = GuestAddress(42);
const PHYS_R: GuestAddress = GuestAddress(87);
const LEN_R: usize = 123;
const IOVA_W: GuestAddress = GuestAddress(IOVA_R.0 + LEN_R as u64);
const PHYS_W: GuestAddress = GuestAddress(PHYS_R.0 + LEN_R as u64);
const LEN_W: usize = 234;
const IOVA_FULL: GuestAddress = IOVA_R;
const LEN_FULL: usize = LEN_R + LEN_W;
let mut iotlb = Iotlb::new();
iotlb.set_mapping(IOVA_R, PHYS_R, LEN_R, Permissions::Read)?;
iotlb.set_mapping(IOVA_W, PHYS_W, LEN_W, Permissions::Write)?;
verify_fail(
&iotlb,
IOVA_FULL,
LEN_FULL,
Permissions::ReadWrite,
[],
[
IovaRange {
base: IOVA_R,
length: LEN_R,
},
IovaRange {
base: IOVA_W,
length: LEN_W,
},
],
);
verify_fail(
&iotlb,
IOVA_FULL,
LEN_FULL,
Permissions::Read,
[],
[IovaRange {
base: IOVA_W,
length: LEN_W,
}],
);
verify_fail(
&iotlb,
IOVA_FULL,
LEN_FULL,
Permissions::Write,
[],
[IovaRange {
base: IOVA_R,
length: LEN_R,
}],
);
verify_hit(
&iotlb,
IOVA_FULL,
LEN_FULL,
Permissions::No,
[
MappedRange {
base: PHYS_R,
length: LEN_R,
},
MappedRange {
base: PHYS_W,
length: LEN_W,
},
],
);
verify_hit(
&iotlb,
IOVA_R,
LEN_R,
Permissions::Read,
[MappedRange {
base: PHYS_R,
length: LEN_R,
}],
);
verify_hit(
&iotlb,
IOVA_W,
LEN_W,
Permissions::Write,
[MappedRange {
base: PHYS_W,
length: LEN_W,
}],
);
Ok(())
}
#[test]
fn test_iotlb_invalidation() -> Result<(), Error> {
const IOVA: GuestAddress = GuestAddress(42);
const PHYS: GuestAddress = GuestAddress(87);
const LEN: usize = 123;
const INVAL_OFS: usize = LEN / 2;
const INVAL_LEN: usize = 3;
const IOVA_AT_INVAL: GuestAddress = GuestAddress(IOVA.0 + INVAL_OFS as u64);
const PHYS_AT_INVAL: GuestAddress = GuestAddress(PHYS.0 + INVAL_OFS as u64);
const IOVA_POST_INVAL: GuestAddress = GuestAddress(IOVA_AT_INVAL.0 + INVAL_LEN as u64);
const PHYS_POST_INVAL: GuestAddress = GuestAddress(PHYS_AT_INVAL.0 + INVAL_LEN as u64);
const POST_INVAL_LEN: usize = LEN - INVAL_OFS - INVAL_LEN;
let mut iotlb = Iotlb::new();
iotlb.set_mapping(IOVA, PHYS, LEN, Permissions::ReadWrite)?;
verify_hit(
&iotlb,
IOVA,
LEN,
Permissions::ReadWrite,
[MappedRange {
base: PHYS,
length: LEN,
}],
);
iotlb.invalidate_mapping(IOVA_AT_INVAL, INVAL_LEN);
verify_hit(
&iotlb,
IOVA,
INVAL_OFS,
Permissions::ReadWrite,
[MappedRange {
base: PHYS,
length: INVAL_OFS,
}],
);
verify_fail(
&iotlb,
IOVA,
LEN,
Permissions::ReadWrite,
[IovaRange {
base: IOVA_AT_INVAL,
length: INVAL_LEN,
}],
[],
);
verify_hit(
&iotlb,
IOVA_POST_INVAL,
POST_INVAL_LEN,
Permissions::ReadWrite,
[MappedRange {
base: PHYS_POST_INVAL,
length: POST_INVAL_LEN,
}],
);
iotlb.invalidate_all();
verify_fail(
&iotlb,
IOVA,
LEN,
Permissions::ReadWrite,
[IovaRange {
base: IOVA,
length: LEN,
}],
[],
);
Ok(())
}
#[cfg(feature = "backend-mmap")]
fn create_virt_memory<B: NewBitmap>(
virt_mapping: Option<(GuestAddress, Permissions)>,
value_offset: u8,
phys_regions: impl IntoIterator<Item = MappedRange>,
bitmap: B,
) -> IommuMemory<GuestMemoryMmap<B>, SimpleIommu> {
let phys_ranges = phys_regions
.into_iter()
.map(|range| (range.base, range.length))
.collect::<Vec<(GuestAddress, usize)>>();
let phys_mem = GuestMemoryMmap::<B>::from_ranges(&phys_ranges).unwrap();
let mut byte_val = value_offset;
for (base, len) in &phys_ranges {
let mut slices = phys_mem
.get_slices(*base, *len, Permissions::Write)
.inspect_err(|err| panic!("Failed to access memory: {err}"))
.unwrap();
let slice = slices
.next()
.unwrap()
.inspect_err(|err| panic!("Failed to access memory: {err}"))
.unwrap();
assert!(slices.next().is_none(), "Expected single slice");
for i in 0..*len {
slice.write(&[byte_val], i).unwrap();
byte_val = byte_val.wrapping_add(1);
}
}
let mem = IommuMemory::new(phys_mem, SimpleIommu::new(), true, bitmap);
assert!(mem.physical_memory().is_none());
if let Some((mut virt, perm)) = virt_mapping {
for (base, len) in phys_ranges {
let mut iotlb = mem.iommu().iotlb.write().unwrap();
iotlb.set_mapping(virt, base, len, perm).unwrap();
virt = GuestAddress(virt.0 + len as u64);
}
}
mem
}
#[cfg(feature = "backend-mmap")]
fn check_virt_mem_content(
mem: &impl GuestMemory,
start: GuestAddress,
len: usize,
value_offset: u8,
) -> GuestMemoryResult<()> {
let mut ref_value = value_offset;
for slice in mem.get_slices(start, len, Permissions::Read)? {
let slice = slice?;
let count = slice.len();
let mut data = vec![0u8; count];
slice.read(&mut data, 0).unwrap();
for val in data {
assert_eq!(val, ref_value);
ref_value = ref_value.wrapping_add(1);
}
}
Ok(())
}
#[cfg(feature = "backend-mmap")]
fn verify_virt_mem_content(
m: &impl GuestMemory,
start: GuestAddress,
len: usize,
value_offset: u8,
) {
check_virt_mem_content(m, start, len, value_offset).unwrap();
}
#[cfg(feature = "backend-mmap")]
fn verify_virt_mem_error(
m: &impl GuestMemory,
start: GuestAddress,
len: usize,
fail_start: Option<GuestAddress>,
fail_len: Option<usize>,
) {
let fail_start = fail_start.unwrap_or(start);
let fail_len = fail_len.unwrap_or(len - (fail_start.0 - start.0) as usize);
let err = check_virt_mem_content(m, start, len, 0).unwrap_err();
let GuestMemoryError::IommuError(Error::CannotResolve {
iova_range: failed_range,
reason: _,
}) = err
else {
panic!("Unexpected error: {err:?}");
};
assert_eq!(
failed_range,
IovaRange {
base: fail_start,
length: fail_len,
}
);
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_iommu_memory_pre_mapped() {
const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
const PHYS_LEN: usize = 128;
const VIRT_START: GuestAddress = GuestAddress(0x2a000);
const VIRT_LEN: usize = PHYS_LEN * 2;
const VIRT_POST_MAP: GuestAddress = GuestAddress(VIRT_START.0 + VIRT_LEN as u64);
let mem = create_virt_memory(
Some((VIRT_START, Permissions::Read)),
0,
[
MappedRange {
base: PHYS_START_1,
length: PHYS_LEN,
},
MappedRange {
base: PHYS_START_2,
length: PHYS_LEN,
},
],
(),
);
assert!(mem.check_range(VIRT_START, VIRT_LEN, Permissions::No));
assert!(mem.check_range(VIRT_START, VIRT_LEN, Permissions::Read));
assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Write));
assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::ReadWrite));
assert!(!mem.check_range(GuestAddress(VIRT_START.0 - 1), 1, Permissions::No));
assert!(!mem.check_range(VIRT_POST_MAP, 1, Permissions::No));
verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
verify_virt_mem_error(&mem, GuestAddress(VIRT_START.0 - 1), 1, None, None);
verify_virt_mem_error(&mem, VIRT_POST_MAP, 1, None, None);
verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN + 1, Some(VIRT_POST_MAP), None);
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_iommu_memory_live_mapped() {
const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
const PHYS_LEN: usize = 128;
const VIRT_START: GuestAddress = GuestAddress(0x2a000);
const VIRT_START_1: GuestAddress = VIRT_START;
const VIRT_START_2: GuestAddress = GuestAddress(VIRT_START.0 + PHYS_LEN as u64);
const VIRT_LEN: usize = PHYS_LEN * 2;
const VIRT_POST_MAP: GuestAddress = GuestAddress(VIRT_START.0 + VIRT_LEN as u64);
let mem = create_virt_memory(
None,
0,
[
MappedRange {
base: PHYS_START_1,
length: PHYS_LEN,
},
MappedRange {
base: PHYS_START_2,
length: PHYS_LEN,
},
],
(),
);
assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::No));
assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Read));
assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Write));
assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::ReadWrite));
assert!(!mem.check_range(GuestAddress(VIRT_START.0 - 1), 1, Permissions::No));
assert!(!mem.check_range(VIRT_POST_MAP, 1, Permissions::No));
verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN, None, None);
verify_virt_mem_error(&mem, GuestAddress(VIRT_START.0 - 1), 1, None, None);
verify_virt_mem_error(&mem, VIRT_POST_MAP, 1, None, None);
verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN + 1, None, None);
let iommu = mem.iommu();
iommu.expect_mapping_request(PHYS_START_1);
verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
iommu.verify_mapping_request(VIRT_START_1, PHYS_LEN, true);
iommu.expect_mapping_request(PHYS_START_2);
verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
iommu.verify_mapping_request(VIRT_START_2, PHYS_LEN, true);
iommu
.iotlb
.write()
.unwrap()
.set_mapping(VIRT_START_1, PHYS_START_1, PHYS_LEN, Permissions::Write)
.unwrap();
iommu.expect_mapping_request(PHYS_START_1);
verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
iommu.verify_mapping_request(VIRT_START_1, PHYS_LEN, false);
}
#[cfg(feature = "backend-mmap")]
#[test]
fn test_mem_replace() {
const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
const PHYS_LEN: usize = 128;
const VIRT_START: GuestAddress = GuestAddress(0x2a000);
let mem = create_virt_memory(
Some((VIRT_START, Permissions::Read)),
0,
[MappedRange {
base: PHYS_START_1,
length: PHYS_LEN,
}],
(),
);
verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
verify_virt_mem_error(
&mem,
VIRT_START,
PHYS_LEN * 2,
Some(GuestAddress(VIRT_START.0 + PHYS_LEN as u64)),
None,
);
let mut mem2 = create_virt_memory(
Some((VIRT_START, Permissions::Read)),
42,
[
MappedRange {
base: PHYS_START_1,
length: PHYS_LEN,
},
MappedRange {
base: PHYS_START_2,
length: PHYS_LEN,
},
],
(),
);
verify_virt_mem_content(&mem2, VIRT_START, PHYS_LEN * 2, 42);
let mem_cloned = mem.clone();
mem2.set_iommu_enabled(false);
let pmem2 = mem2.physical_memory().unwrap();
assert!(std::ptr::eq(pmem2, mem2.get_backend()));
let mem = mem.with_replaced_backend(pmem2.clone());
mem.iommu().expect_mapping_request(PHYS_START_2);
verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN * 2, 42);
mem.iommu().verify_mapping_request(
GuestAddress(VIRT_START.0 + PHYS_LEN as u64),
PHYS_LEN,
true,
);
verify_virt_mem_content(&mem_cloned, VIRT_START, PHYS_LEN, 0);
verify_hit(
mem_cloned.iommu().iotlb.read().unwrap(),
VIRT_START,
PHYS_LEN * 2,
Permissions::Read,
[
MappedRange {
base: PHYS_START_1,
length: PHYS_LEN,
},
MappedRange {
base: PHYS_START_2,
length: PHYS_LEN,
},
],
);
}
#[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
fn verify_mem_bitmap<
M: crate::GuestMemoryBackend<R = R>,
R: GuestMemoryRegion<B = AtomicBitmap>,
I: Iommu,
>(
mem: &IommuMemory<M, I>,
clean: impl IntoIterator<Item = usize>,
dirty: impl IntoIterator<Item = usize>,
) {
let bitmap = mem.bitmap();
for addr in clean {
if bitmap.is_addr_set(addr) {
panic!("Expected addr {addr:#x} to be clean, but is dirty");
}
}
for addr in dirty {
if !bitmap.is_addr_set(addr) {
panic!("Expected addr {addr:#x} to be dirty, but is clean");
}
bitmap.reset_addr_range(addr, 1);
}
}
#[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
#[test]
fn test_dirty_bitmap() {
const PAGE_SIZE: usize = 4096;
const PHYS_START: GuestAddress = GuestAddress(0x4000);
const PHYS_LEN: usize = PAGE_SIZE * 2;
const PHYS_PAGE_0: usize = PHYS_START.0 as usize;
const PHYS_PAGE_1: usize = PHYS_START.0 as usize + PAGE_SIZE;
const VIRT_START: GuestAddress = GuestAddress(0x2a000);
const VIRT_PAGE_0: usize = VIRT_START.0 as usize;
const VIRT_PAGE_1: usize = VIRT_START.0 as usize + PAGE_SIZE;
let bitmap = AtomicBitmap::new(
VIRT_START.0 as usize + PHYS_LEN,
NonZeroUsize::new(PAGE_SIZE).unwrap(),
);
let mem = create_virt_memory(
Some((VIRT_START, Permissions::ReadWrite)),
0,
[MappedRange {
base: PHYS_START,
length: PHYS_LEN,
}],
bitmap,
);
verify_mem_bitmap(
&mem,
[PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
[],
);
mem.bitmap().set_addr_range(PHYS_PAGE_0, 2 * PAGE_SIZE);
verify_mem_bitmap(&mem, [VIRT_PAGE_0, VIRT_PAGE_1], [PHYS_PAGE_0, PHYS_PAGE_1]);
verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
verify_mem_bitmap(
&mem,
[PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
[],
);
let mut slices = mem
.get_slices(VIRT_START, PHYS_LEN, Permissions::Write)
.inspect_err(|err| panic!("Failed to access memory: {err}"))
.unwrap();
let slice = slices
.next()
.unwrap()
.inspect_err(|err| panic!("Failed to access memory: {err}"))
.unwrap();
assert!(slices.next().is_none(), "Expected single slice");
verify_mem_bitmap(
&mem,
[PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
[],
);
slice
.store(42, 0, Ordering::Relaxed)
.inspect_err(|err| panic!("Writing to memory failed: {err}"))
.unwrap();
verify_mem_bitmap(&mem, [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_1], [VIRT_PAGE_0]);
slice
.store(23, PAGE_SIZE, Ordering::Relaxed)
.inspect_err(|err| panic!("Writing to memory failed: {err}"))
.unwrap();
verify_mem_bitmap(&mem, [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0], [VIRT_PAGE_1]);
}
}