1use crate::bitmap::{self, Bitmap};
16use crate::guest_memory::{
17 Error as GuestMemoryError, GuestMemoryBackendSliceIterator, GuestMemorySliceIterator,
18 Result as GuestMemoryResult,
19};
20use crate::{
21 Address, GuestAddress, GuestMemory, GuestMemoryBackend, GuestMemoryRegion, GuestUsize,
22 Permissions, VolatileSlice,
23};
24use rangemap::RangeMap;
25use std::cmp;
26use std::fmt::{self, Debug};
27use std::iter::FusedIterator;
28use std::num::Wrapping;
29use std::ops::{Deref, Range};
30use std::sync::Arc;
31
32#[derive(Debug, thiserror::Error)]
34pub enum Error {
35 #[error(
37 "Cannot translate I/O virtual address range {:#x}+{}: {reason}",
38 iova_range.base.0,
39 iova_range.length,
40 )]
41 CannotResolve {
42 iova_range: IovaRange,
44 reason: String,
46 },
47
48 #[error(
50 "Expected {:#x}+{} to be a continuous I/O virtual address range, but only {continuous_length} bytes are",
51 iova_range.base.0,
52 iova_range.length,
53 )]
54 Fragmented {
55 iova_range: IovaRange,
57 continuous_length: usize,
59 },
60
61 #[error("IOMMU not configured correctly, cannot operate: {reason}")]
63 IommuMisconfigured {
64 reason: String,
66 },
67}
68
69pub trait Iommu: Debug + Send + Sync {
77 type IotlbGuard<'a>: Deref<Target = Iotlb> + 'a
85 where
86 Self: 'a;
87
88 fn translate(
94 &self,
95 iova: GuestAddress,
96 length: usize,
97 access: Permissions,
98 ) -> Result<IotlbIterator<Self::IotlbGuard<'_>>, Error>;
99}
100
101#[derive(Clone, Copy, Debug, Eq, PartialEq)]
105struct IommuMapping {
106 target_source_diff: Wrapping<u64>,
119 permissions: Permissions,
121}
122
123#[derive(Debug, Default)]
131pub struct Iotlb {
132 tlb: RangeMap<u64, IommuMapping>,
140}
141
142#[derive(Clone, Debug)]
148pub struct IotlbIterator<D: Deref<Target = Iotlb>> {
149 iotlb: D,
151 range: Range<u64>,
153 access: Permissions,
155}
156
157#[derive(Clone, Debug, Eq, PartialEq)]
159pub struct IovaRange {
160 pub base: GuestAddress,
162 pub length: usize,
164}
165
166#[derive(Clone, Debug, Eq, PartialEq)]
168pub struct MappedRange {
169 pub base: GuestAddress,
171 pub length: usize,
173}
174
175#[derive(Clone, Debug)]
178pub struct IotlbFails {
179 pub misses: Vec<IovaRange>,
181 pub access_fails: Vec<IovaRange>,
183}
184
185pub struct IommuMemory<M: GuestMemoryBackend, I: Iommu> {
204 backend: M,
206 iommu: Arc<I>,
208 use_iommu: bool,
211 bitmap: Arc<<M::R as GuestMemoryRegion>::B>,
213}
214
215impl IommuMapping {
216 fn new(source_base: u64, target_base: u64, permissions: Permissions) -> Self {
218 IommuMapping {
219 target_source_diff: Wrapping(target_base) - Wrapping(source_base),
220 permissions,
221 }
222 }
223
224 fn map(&self, iova: u64) -> u64 {
226 (Wrapping(iova) + self.target_source_diff).0
227 }
228
229 fn permissions(&self) -> Permissions {
231 self.permissions
232 }
233}
234
235impl Iotlb {
236 pub fn new() -> Self {
238 Default::default()
239 }
240
241 pub fn set_mapping(
243 &mut self,
244 iova: GuestAddress,
245 map_to: GuestAddress,
246 length: usize,
247 perm: Permissions,
248 ) -> Result<(), Error> {
249 let mapping = IommuMapping::new(iova.0, map_to.0, perm);
257 self.tlb.insert(iova.0..(iova.0 + length as u64), mapping);
258
259 Ok(())
260 }
261
262 pub fn invalidate_mapping(&mut self, iova: GuestAddress, length: usize) {
264 self.tlb.remove(iova.0..(iova.0 + length as u64));
265 }
266
267 pub fn invalidate_all(&mut self) {
269 self.tlb.clear();
270 }
271
272 pub fn lookup<D: Deref<Target = Iotlb>>(
279 this: D,
280 iova: GuestAddress,
281 length: usize,
282 access: Permissions,
283 ) -> Result<IotlbIterator<D>, IotlbFails> {
284 let full_range = iova.0..(iova.0 + length as u64);
285
286 let has_misses = this.tlb.gaps(&full_range).any(|_| true);
287 let has_access_fails = this
288 .tlb
289 .overlapping(full_range.clone())
290 .any(|(_, mapping)| !mapping.permissions().allow(access));
291
292 if has_misses || has_access_fails {
293 let misses = this
294 .tlb
295 .gaps(&full_range)
296 .map(|range| {
297 debug_assert!(range.start >= full_range.start && range.end <= full_range.end);
299 range.try_into().unwrap()
300 })
301 .collect::<Vec<_>>();
302
303 let access_fails = this
304 .tlb
305 .overlapping(full_range.clone())
306 .filter(|(_, mapping)| !mapping.permissions().allow(access))
307 .map(|(range, _)| {
308 let start = cmp::max(range.start, full_range.start);
309 let end = cmp::min(range.end, full_range.end);
310 (start..end).try_into().unwrap()
311 })
312 .collect::<Vec<_>>();
313
314 return Err(IotlbFails {
315 misses,
316 access_fails,
317 });
318 }
319
320 Ok(IotlbIterator {
321 iotlb: this,
322 range: full_range,
323 access,
324 })
325 }
326}
327
328impl<D: Deref<Target = Iotlb>> Iterator for IotlbIterator<D> {
329 type Item = MappedRange;
331
332 fn next(&mut self) -> Option<Self::Item> {
333 if self.range.is_empty() {
343 return None;
344 }
345
346 let (range, mapping) = self.iotlb.tlb.get_key_value(&self.range.start).unwrap();
347
348 assert!(mapping.permissions().allow(self.access));
349
350 let mapping_iova_start = self.range.start;
351 let mapping_iova_end = cmp::min(self.range.end, range.end);
352 let mapping_len = mapping_iova_end - mapping_iova_start;
353
354 self.range.start = mapping_iova_end;
355
356 Some(MappedRange {
357 base: GuestAddress(mapping.map(mapping_iova_start)),
358 length: mapping_len.try_into().unwrap(),
359 })
360 }
361}
362
363impl TryFrom<Range<u64>> for IovaRange {
364 type Error = <u64 as TryFrom<usize>>::Error;
365
366 fn try_from(range: Range<u64>) -> Result<Self, Self::Error> {
367 Ok(IovaRange {
368 base: GuestAddress(range.start),
369 length: (range.end - range.start).try_into()?,
370 })
371 }
372}
373
374impl<M: GuestMemoryBackend, I: Iommu> IommuMemory<M, I> {
375 pub fn new(
377 backend: M,
378 iommu: I,
379 use_iommu: bool,
380 bitmap: <Self as GuestMemory>::Bitmap,
381 ) -> Self {
382 IommuMemory {
383 backend,
384 iommu: Arc::new(iommu),
385 use_iommu,
386 bitmap: Arc::new(bitmap),
387 }
388 }
389
390 pub fn with_replaced_backend(&self, new_backend: M) -> Self {
396 IommuMemory {
397 backend: new_backend,
398 iommu: Arc::clone(&self.iommu),
399 use_iommu: self.use_iommu,
400 bitmap: Arc::clone(&self.bitmap),
401 }
402 }
403
404 pub fn bitmap(&self) -> &Arc<<Self as GuestMemory>::Bitmap> {
408 &self.bitmap
409 }
410
411 pub fn set_iommu_enabled(&mut self, enabled: bool) {
416 self.use_iommu = enabled;
417 }
418
419 pub fn get_iommu_enabled(&self) -> bool {
424 self.use_iommu
425 }
426
427 pub fn iommu(&self) -> &Arc<I> {
429 &self.iommu
430 }
431
432 pub fn get_backend(&self) -> &M {
434 &self.backend
435 }
436}
437
438impl<M: GuestMemoryBackend + Clone, I: Iommu> Clone for IommuMemory<M, I> {
439 fn clone(&self) -> Self {
440 IommuMemory {
441 backend: self.backend.clone(),
442 iommu: Arc::clone(&self.iommu),
443 use_iommu: self.use_iommu,
444 bitmap: Arc::clone(&self.bitmap),
445 }
446 }
447}
448
449impl<M: GuestMemoryBackend + Debug, I: Iommu> Debug for IommuMemory<M, I>
450where
451 <M::R as GuestMemoryRegion>::B: Debug,
452{
453 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
454 f.debug_struct("IommuMemory")
455 .field("backend", &self.backend)
456 .field("iommu", &self.iommu)
457 .field("use_iommu", &self.use_iommu)
458 .field("bitmap", &self.bitmap)
459 .finish()
460 }
461}
462
463impl<M: GuestMemoryBackend + Default, I: Iommu + Default> Default for IommuMemory<M, I>
464where
465 <M::R as GuestMemoryRegion>::B: Default,
466{
467 fn default() -> Self {
468 IommuMemory {
469 backend: Default::default(),
470 iommu: Default::default(),
471 use_iommu: Default::default(),
472 bitmap: Default::default(),
473 }
474 }
475}
476
477impl<M: GuestMemoryBackend, I: Iommu> GuestMemory for IommuMemory<M, I> {
478 type PhysicalMemory = M;
479 type Bitmap = <M::R as GuestMemoryRegion>::B;
480
481 fn check_range(&self, addr: GuestAddress, count: usize, access: Permissions) -> bool {
482 if !self.use_iommu {
483 return self.backend.check_range(addr, count);
484 }
485
486 let Ok(mut translated_iter) = self.iommu.translate(addr, count, access) else {
487 return false;
488 };
489
490 translated_iter
491 .all(|translated| self.backend.check_range(translated.base, translated.length))
492 }
493
494 fn get_slices<'a>(
495 &'a self,
496 addr: GuestAddress,
497 count: usize,
498 access: Permissions,
499 ) -> GuestMemoryResult<impl GuestMemorySliceIterator<'a, bitmap::BS<'a, Self::Bitmap>>> {
500 if self.use_iommu {
501 IommuMemorySliceIterator::virt(self, addr, count, access)
502 .map_err(GuestMemoryError::IommuError)
503 } else {
504 Ok(IommuMemorySliceIterator::phys(self, addr, count))
505 }
506 }
507
508 fn physical_memory(&self) -> Option<&Self::PhysicalMemory> {
509 if self.use_iommu {
510 None
511 } else {
512 Some(&self.backend)
513 }
514 }
515}
516
517pub struct IommuMemorySliceIterator<'a, M: GuestMemoryBackend, I: Iommu + 'a> {
521 iova: GuestAddress,
523 bitmap: Option<&'a <M::R as GuestMemoryRegion>::B>,
525 phys_mem: &'a M,
527 translation: Option<IotlbIterator<I::IotlbGuard<'a>>>,
529 current_translated_iter: Option<GuestMemoryBackendSliceIterator<'a, M>>,
531}
532
533impl<'a, M: GuestMemoryBackend, I: Iommu> IommuMemorySliceIterator<'a, M, I> {
534 fn phys(mem: &'a IommuMemory<M, I>, addr: GuestAddress, count: usize) -> Self {
540 IommuMemorySliceIterator {
541 iova: addr,
542 bitmap: None,
543 phys_mem: &mem.backend,
544 translation: None,
545 current_translated_iter: Some(mem.backend.get_slices(addr, count)),
546 }
547 }
548
549 fn virt(
554 mem: &'a IommuMemory<M, I>,
555 addr: GuestAddress,
556 count: usize,
557 access: Permissions,
558 ) -> Result<Self, Error> {
559 let translation = mem.iommu.translate(addr, count, access)?;
560 Ok(IommuMemorySliceIterator {
561 iova: addr,
562 bitmap: Some(mem.bitmap.as_ref()),
563 phys_mem: &mem.backend,
564 translation: Some(translation),
565 current_translated_iter: None,
566 })
567 }
568
569 unsafe fn do_next(
586 &mut self,
587 ) -> Option<GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>> {
588 loop {
589 if let Some(item) = self
590 .current_translated_iter
591 .as_mut()
592 .and_then(|iter| iter.next())
593 {
594 let mut item = match item {
595 Ok(item) => item,
596 Err(err) => return Some(Err(err)),
597 };
598
599 if let Some(bitmap) = self.bitmap.as_ref() {
600 let bitmap_slice = bitmap.slice_at(self.iova.0 as usize);
601 item = item.replace_bitmap(bitmap_slice);
602 }
603
604 self.iova = match self.iova.overflowing_add(item.len() as GuestUsize) {
605 (x @ GuestAddress(0), _) | (x, false) => x,
606 (_, true) => return Some(Err(GuestMemoryError::GuestAddressOverflow)),
607 };
608
609 return Some(Ok(item));
610 }
611
612 let next_mapping = self.translation.as_mut()?.next()?;
613 self.current_translated_iter = Some(
614 self.phys_mem
615 .get_slices(next_mapping.base, next_mapping.length),
616 );
617 }
618 }
619}
620
621impl<'a, M: GuestMemoryBackend, I: Iommu> Iterator for IommuMemorySliceIterator<'a, M, I> {
622 type Item = GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>;
623
624 fn next(&mut self) -> Option<Self::Item> {
625 match unsafe { self.do_next() } {
628 Some(Ok(slice)) => Some(Ok(slice)),
629 other => {
630 self.current_translated_iter.take();
632 self.translation.take();
633 other
634 }
635 }
636 }
637}
638
639impl<M: GuestMemoryBackend, I: Iommu> FusedIterator for IommuMemorySliceIterator<'_, M, I> {}
645
646impl<'a, M: GuestMemoryBackend, I: Iommu> GuestMemorySliceIterator<'a, bitmap::MS<'a, M>>
647 for IommuMemorySliceIterator<'a, M, I>
648{
649}
650
651impl<'a, M: GuestMemoryBackend + Debug, I: Iommu> Debug for IommuMemorySliceIterator<'a, M, I>
652where
653 I::IotlbGuard<'a>: Debug,
654 <M::R as GuestMemoryRegion>::B: Debug,
655{
656 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
657 f.debug_struct("IommuMemorySliceIterator")
658 .field("iova", &self.iova)
659 .field("bitmap", &self.bitmap)
660 .field("phys_mem", &self.phys_mem)
661 .field("translation", &self.translation)
662 .field("current_translated_iter", &self.current_translated_iter)
663 .finish()
664 }
665}
666
667#[cfg(test)]
668mod tests {
669 #[cfg(feature = "backend-mmap")]
670 use super::IotlbIterator;
671 use super::{Error, IovaRange, MappedRange};
672 #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
673 use crate::bitmap::AtomicBitmap;
674 #[cfg(feature = "backend-mmap")]
675 use crate::bitmap::NewBitmap;
676 #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
677 use crate::GuestMemoryRegion;
678 #[cfg(feature = "backend-mmap")]
679 use crate::{
680 Bytes, GuestMemory, GuestMemoryError, GuestMemoryMmap, GuestMemoryResult, Iommu,
681 IommuMemory,
682 };
683 use crate::{GuestAddress, Iotlb, Permissions};
684 use std::fmt::Debug;
685 #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
686 use std::num::NonZeroUsize;
687 use std::ops::Deref;
688 #[cfg(feature = "backend-mmap")]
689 use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
690 #[cfg(feature = "backend-mmap")]
691 use std::sync::{RwLock, RwLockReadGuard};
692
693 #[derive(Debug)]
694 #[cfg(feature = "backend-mmap")]
695 struct SimpleIommu {
696 iotlb: RwLock<Iotlb>,
697 fail_base: AtomicU64,
699 fail_len: AtomicUsize,
701 fail_was_miss: AtomicBool,
703 next_map_to: AtomicU64,
705 }
706
707 #[cfg(feature = "backend-mmap")]
708 impl SimpleIommu {
709 fn new() -> Self {
710 SimpleIommu {
711 iotlb: Iotlb::new().into(),
712 fail_base: 0.into(),
713 fail_len: 0.into(),
714 fail_was_miss: false.into(),
715 next_map_to: 0.into(),
716 }
717 }
718
719 fn expect_mapping_request(&self, to_phys: GuestAddress) {
720 self.fail_base.store(0, Ordering::Relaxed);
722 self.fail_len.store(0, Ordering::Relaxed);
723 self.next_map_to.store(to_phys.0, Ordering::Relaxed);
724 }
725
726 fn verify_mapping_request(&self, virt: GuestAddress, len: usize, was_miss: bool) {
727 assert_eq!(self.fail_base.load(Ordering::Relaxed), virt.0);
728 assert_eq!(self.fail_len.load(Ordering::Relaxed), len);
729 assert_eq!(self.fail_was_miss.load(Ordering::Relaxed), was_miss);
730 }
731 }
732
733 #[cfg(feature = "backend-mmap")]
734 impl Iommu for SimpleIommu {
735 type IotlbGuard<'a> = RwLockReadGuard<'a, Iotlb>;
736
737 fn translate(
738 &self,
739 iova: GuestAddress,
740 length: usize,
741 access: Permissions,
742 ) -> Result<IotlbIterator<Self::IotlbGuard<'_>>, Error> {
743 loop {
744 let mut fails =
745 match Iotlb::lookup(self.iotlb.read().unwrap(), iova, length, access) {
746 Ok(success) => return Ok(success),
747 Err(fails) => fails,
748 };
749 let miss = !fails.misses.is_empty();
750 let fail = fails
751 .misses
752 .pop()
753 .or_else(|| fails.access_fails.pop())
754 .expect("No failure reported, even though a failure happened");
755 self.fail_base.store(fail.base.0, Ordering::Relaxed);
756 self.fail_len.store(fail.length, Ordering::Relaxed);
757 self.fail_was_miss.store(miss, Ordering::Relaxed);
758
759 if !fails.misses.is_empty() || !fails.access_fails.is_empty() {
760 return Err(Error::CannotResolve {
761 iova_range: IovaRange { base: iova, length },
762 reason: "This IOMMU can only handle one failure per access".into(),
763 });
764 }
765
766 let map_to = self.next_map_to.swap(0, Ordering::Relaxed);
767 if map_to == 0 {
768 return Err(Error::CannotResolve {
769 iova_range: IovaRange {
770 base: fail.base,
771 length: fail.length,
772 },
773 reason: "No mapping provided for failed range".into(),
774 });
775 }
776
777 self.iotlb.write().unwrap().set_mapping(
778 fail.base,
779 GuestAddress(map_to),
780 fail.length,
781 access,
782 )?;
783 }
784 }
785 }
786
787 fn verify_hit(
789 iotlb: impl Deref<Target = Iotlb> + Debug,
790 iova: GuestAddress,
791 length: usize,
792 permissions: Permissions,
793 expected: impl IntoIterator<Item = MappedRange>,
794 ) {
795 let mut iter = Iotlb::lookup(iotlb, iova, length, permissions)
796 .inspect_err(|err| panic!("Unexpected lookup error {err:?}"))
797 .unwrap();
798
799 for e in expected {
800 assert_eq!(iter.next(), Some(e));
801 }
802 assert_eq!(iter.next(), None);
803 }
804
805 fn verify_fail(
808 iotlb: impl Deref<Target = Iotlb> + Debug,
809 iova: GuestAddress,
810 length: usize,
811 permissions: Permissions,
812 expected_misses: impl IntoIterator<Item = IovaRange>,
813 expected_access_fails: impl IntoIterator<Item = IovaRange>,
814 ) {
815 let fails = Iotlb::lookup(iotlb, iova, length, permissions)
816 .inspect(|hits| panic!("Expected error on lookup, found {hits:?}"))
817 .unwrap_err();
818
819 let mut miss_iter = fails.misses.into_iter();
820 for e in expected_misses {
821 assert_eq!(miss_iter.next(), Some(e));
822 }
823 assert_eq!(miss_iter.next(), None);
824
825 let mut accf_iter = fails.access_fails.into_iter();
826 for e in expected_access_fails {
827 assert_eq!(accf_iter.next(), Some(e));
828 }
829 assert_eq!(accf_iter.next(), None);
830 }
831
832 #[test]
834 fn test_iotlb_merge() -> Result<(), Error> {
835 const IOVA: GuestAddress = GuestAddress(42);
836 const PHYS: GuestAddress = GuestAddress(87);
837 const LEN_1: usize = 123;
838 const LEN_2: usize = 234;
839
840 let mut iotlb = Iotlb::new();
841 iotlb.set_mapping(IOVA, PHYS, LEN_1, Permissions::ReadWrite)?;
842 iotlb.set_mapping(
843 GuestAddress(IOVA.0 + LEN_1 as u64),
844 GuestAddress(PHYS.0 + LEN_1 as u64),
845 LEN_2,
846 Permissions::ReadWrite,
847 )?;
848
849 verify_hit(
850 &iotlb,
851 IOVA,
852 LEN_1 + LEN_2,
853 Permissions::ReadWrite,
854 [MappedRange {
855 base: PHYS,
856 length: LEN_1 + LEN_2,
857 }],
858 );
859
860 verify_hit(
862 &iotlb,
863 GuestAddress(IOVA.0 + LEN_1 as u64 - 1),
864 2,
865 Permissions::ReadWrite,
866 [MappedRange {
867 base: GuestAddress(PHYS.0 + LEN_1 as u64 - 1),
868 length: 2,
869 }],
870 );
871
872 Ok(())
873 }
874
875 #[test]
878 fn test_iotlb_nomerge_same_phys() -> Result<(), Error> {
879 const IOVA: GuestAddress = GuestAddress(42);
880 const PHYS: GuestAddress = GuestAddress(87);
881 const LEN_1: usize = 123;
882 const LEN_2: usize = 234;
883
884 let mut iotlb = Iotlb::new();
885 iotlb.set_mapping(IOVA, PHYS, LEN_1, Permissions::ReadWrite)?;
886 iotlb.set_mapping(
887 GuestAddress(IOVA.0 + LEN_1 as u64),
888 PHYS,
889 LEN_2,
890 Permissions::ReadWrite,
891 )?;
892
893 verify_hit(
894 &iotlb,
895 IOVA,
896 LEN_1 + LEN_2,
897 Permissions::ReadWrite,
898 [
899 MappedRange {
900 base: PHYS,
901 length: LEN_1,
902 },
903 MappedRange {
904 base: PHYS,
905 length: LEN_2,
906 },
907 ],
908 );
909
910 Ok(())
911 }
912
913 #[test]
915 fn test_iotlb_perms() -> Result<(), Error> {
916 const IOVA_R: GuestAddress = GuestAddress(42);
917 const PHYS_R: GuestAddress = GuestAddress(87);
918 const LEN_R: usize = 123;
919 const IOVA_W: GuestAddress = GuestAddress(IOVA_R.0 + LEN_R as u64);
920 const PHYS_W: GuestAddress = GuestAddress(PHYS_R.0 + LEN_R as u64);
921 const LEN_W: usize = 234;
922 const IOVA_FULL: GuestAddress = IOVA_R;
923 const LEN_FULL: usize = LEN_R + LEN_W;
924
925 let mut iotlb = Iotlb::new();
926 iotlb.set_mapping(IOVA_R, PHYS_R, LEN_R, Permissions::Read)?;
927 iotlb.set_mapping(IOVA_W, PHYS_W, LEN_W, Permissions::Write)?;
928
929 verify_fail(
931 &iotlb,
932 IOVA_FULL,
933 LEN_FULL,
934 Permissions::ReadWrite,
935 [],
936 [
937 IovaRange {
938 base: IOVA_R,
939 length: LEN_R,
940 },
941 IovaRange {
942 base: IOVA_W,
943 length: LEN_W,
944 },
945 ],
946 );
947
948 verify_fail(
950 &iotlb,
951 IOVA_FULL,
952 LEN_FULL,
953 Permissions::Read,
954 [],
955 [IovaRange {
956 base: IOVA_W,
957 length: LEN_W,
958 }],
959 );
960
961 verify_fail(
963 &iotlb,
964 IOVA_FULL,
965 LEN_FULL,
966 Permissions::Write,
967 [],
968 [IovaRange {
969 base: IOVA_R,
970 length: LEN_R,
971 }],
972 );
973
974 verify_hit(
976 &iotlb,
977 IOVA_FULL,
978 LEN_FULL,
979 Permissions::No,
980 [
981 MappedRange {
982 base: PHYS_R,
983 length: LEN_R,
984 },
985 MappedRange {
986 base: PHYS_W,
987 length: LEN_W,
988 },
989 ],
990 );
991
992 verify_hit(
994 &iotlb,
995 IOVA_R,
996 LEN_R,
997 Permissions::Read,
998 [MappedRange {
999 base: PHYS_R,
1000 length: LEN_R,
1001 }],
1002 );
1003
1004 verify_hit(
1006 &iotlb,
1007 IOVA_W,
1008 LEN_W,
1009 Permissions::Write,
1010 [MappedRange {
1011 base: PHYS_W,
1012 length: LEN_W,
1013 }],
1014 );
1015
1016 Ok(())
1017 }
1018
1019 #[test]
1021 fn test_iotlb_invalidation() -> Result<(), Error> {
1022 const IOVA: GuestAddress = GuestAddress(42);
1023 const PHYS: GuestAddress = GuestAddress(87);
1024 const LEN: usize = 123;
1025 const INVAL_OFS: usize = LEN / 2;
1026 const INVAL_LEN: usize = 3;
1027 const IOVA_AT_INVAL: GuestAddress = GuestAddress(IOVA.0 + INVAL_OFS as u64);
1028 const PHYS_AT_INVAL: GuestAddress = GuestAddress(PHYS.0 + INVAL_OFS as u64);
1029 const IOVA_POST_INVAL: GuestAddress = GuestAddress(IOVA_AT_INVAL.0 + INVAL_LEN as u64);
1030 const PHYS_POST_INVAL: GuestAddress = GuestAddress(PHYS_AT_INVAL.0 + INVAL_LEN as u64);
1031 const POST_INVAL_LEN: usize = LEN - INVAL_OFS - INVAL_LEN;
1032
1033 let mut iotlb = Iotlb::new();
1034 iotlb.set_mapping(IOVA, PHYS, LEN, Permissions::ReadWrite)?;
1035 verify_hit(
1036 &iotlb,
1037 IOVA,
1038 LEN,
1039 Permissions::ReadWrite,
1040 [MappedRange {
1041 base: PHYS,
1042 length: LEN,
1043 }],
1044 );
1045
1046 iotlb.invalidate_mapping(IOVA_AT_INVAL, INVAL_LEN);
1049 verify_hit(
1050 &iotlb,
1051 IOVA,
1052 INVAL_OFS,
1053 Permissions::ReadWrite,
1054 [MappedRange {
1055 base: PHYS,
1056 length: INVAL_OFS,
1057 }],
1058 );
1059 verify_fail(
1060 &iotlb,
1061 IOVA,
1062 LEN,
1063 Permissions::ReadWrite,
1064 [IovaRange {
1065 base: IOVA_AT_INVAL,
1066 length: INVAL_LEN,
1067 }],
1068 [],
1069 );
1070 verify_hit(
1071 &iotlb,
1072 IOVA_POST_INVAL,
1073 POST_INVAL_LEN,
1074 Permissions::ReadWrite,
1075 [MappedRange {
1076 base: PHYS_POST_INVAL,
1077 length: POST_INVAL_LEN,
1078 }],
1079 );
1080
1081 iotlb.invalidate_all();
1083 verify_fail(
1084 &iotlb,
1085 IOVA,
1086 LEN,
1087 Permissions::ReadWrite,
1088 [IovaRange {
1089 base: IOVA,
1090 length: LEN,
1091 }],
1092 [],
1093 );
1094
1095 Ok(())
1096 }
1097
1098 #[cfg(feature = "backend-mmap")]
1103 fn create_virt_memory<B: NewBitmap>(
1104 virt_mapping: Option<(GuestAddress, Permissions)>,
1105 value_offset: u8,
1106 phys_regions: impl IntoIterator<Item = MappedRange>,
1107 bitmap: B,
1108 ) -> IommuMemory<GuestMemoryMmap<B>, SimpleIommu> {
1109 let phys_ranges = phys_regions
1110 .into_iter()
1111 .map(|range| (range.base, range.length))
1112 .collect::<Vec<(GuestAddress, usize)>>();
1113 let phys_mem = GuestMemoryMmap::<B>::from_ranges(&phys_ranges).unwrap();
1114
1115 let mut byte_val = value_offset;
1116 for (base, len) in &phys_ranges {
1117 let mut slices = phys_mem
1118 .get_slices(*base, *len, Permissions::Write)
1119 .inspect_err(|err| panic!("Failed to access memory: {err}"))
1120 .unwrap();
1121 let slice = slices
1122 .next()
1123 .unwrap()
1124 .inspect_err(|err| panic!("Failed to access memory: {err}"))
1125 .unwrap();
1126 assert!(slices.next().is_none(), "Expected single slice");
1127
1128 for i in 0..*len {
1129 slice.write(&[byte_val], i).unwrap();
1130 byte_val = byte_val.wrapping_add(1);
1131 }
1132 }
1133
1134 let mem = IommuMemory::new(phys_mem, SimpleIommu::new(), true, bitmap);
1135
1136 assert!(mem.physical_memory().is_none());
1138
1139 if let Some((mut virt, perm)) = virt_mapping {
1140 for (base, len) in phys_ranges {
1141 let mut iotlb = mem.iommu().iotlb.write().unwrap();
1142 iotlb.set_mapping(virt, base, len, perm).unwrap();
1143 virt = GuestAddress(virt.0 + len as u64);
1144 }
1145 }
1146
1147 mem
1148 }
1149
1150 #[cfg(feature = "backend-mmap")]
1158 fn check_virt_mem_content(
1159 mem: &impl GuestMemory,
1160 start: GuestAddress,
1161 len: usize,
1162 value_offset: u8,
1163 ) -> GuestMemoryResult<()> {
1164 let mut ref_value = value_offset;
1165 for slice in mem.get_slices(start, len, Permissions::Read)? {
1166 let slice = slice?;
1167
1168 let count = slice.len();
1169 let mut data = vec![0u8; count];
1170 slice.read(&mut data, 0).unwrap();
1171 for val in data {
1172 assert_eq!(val, ref_value);
1173 ref_value = ref_value.wrapping_add(1);
1174 }
1175 }
1176
1177 Ok(())
1178 }
1179
1180 #[cfg(feature = "backend-mmap")]
1181 fn verify_virt_mem_content(
1182 m: &impl GuestMemory,
1183 start: GuestAddress,
1184 len: usize,
1185 value_offset: u8,
1186 ) {
1187 check_virt_mem_content(m, start, len, value_offset).unwrap();
1188 }
1189
1190 #[cfg(feature = "backend-mmap")]
1196 fn verify_virt_mem_error(
1197 m: &impl GuestMemory,
1198 start: GuestAddress,
1199 len: usize,
1200 fail_start: Option<GuestAddress>,
1201 fail_len: Option<usize>,
1202 ) {
1203 let fail_start = fail_start.unwrap_or(start);
1204 let fail_len = fail_len.unwrap_or(len - (fail_start.0 - start.0) as usize);
1205 let err = check_virt_mem_content(m, start, len, 0).unwrap_err();
1206 let GuestMemoryError::IommuError(Error::CannotResolve {
1207 iova_range: failed_range,
1208 reason: _,
1209 }) = err
1210 else {
1211 panic!("Unexpected error: {err:?}");
1212 };
1213 assert_eq!(
1214 failed_range,
1215 IovaRange {
1216 base: fail_start,
1217 length: fail_len,
1218 }
1219 );
1220 }
1221
1222 #[cfg(feature = "backend-mmap")]
1224 #[test]
1225 fn test_iommu_memory_pre_mapped() {
1226 const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
1227 const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
1228 const PHYS_LEN: usize = 128;
1229 const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1230 const VIRT_LEN: usize = PHYS_LEN * 2;
1231 const VIRT_POST_MAP: GuestAddress = GuestAddress(VIRT_START.0 + VIRT_LEN as u64);
1232
1233 let mem = create_virt_memory(
1234 Some((VIRT_START, Permissions::Read)),
1235 0,
1236 [
1237 MappedRange {
1238 base: PHYS_START_1,
1239 length: PHYS_LEN,
1240 },
1241 MappedRange {
1242 base: PHYS_START_2,
1243 length: PHYS_LEN,
1244 },
1245 ],
1246 (),
1247 );
1248
1249 assert!(mem.check_range(VIRT_START, VIRT_LEN, Permissions::No));
1250 assert!(mem.check_range(VIRT_START, VIRT_LEN, Permissions::Read));
1251 assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Write));
1252 assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::ReadWrite));
1253 assert!(!mem.check_range(GuestAddress(VIRT_START.0 - 1), 1, Permissions::No));
1254 assert!(!mem.check_range(VIRT_POST_MAP, 1, Permissions::No));
1255
1256 verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
1257 verify_virt_mem_error(&mem, GuestAddress(VIRT_START.0 - 1), 1, None, None);
1258 verify_virt_mem_error(&mem, VIRT_POST_MAP, 1, None, None);
1259 verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN + 1, Some(VIRT_POST_MAP), None);
1260 }
1261
1262 #[cfg(feature = "backend-mmap")]
1264 #[test]
1265 fn test_iommu_memory_live_mapped() {
1266 const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
1267 const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
1268 const PHYS_LEN: usize = 128;
1269 const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1270 const VIRT_START_1: GuestAddress = VIRT_START;
1271 const VIRT_START_2: GuestAddress = GuestAddress(VIRT_START.0 + PHYS_LEN as u64);
1272 const VIRT_LEN: usize = PHYS_LEN * 2;
1273 const VIRT_POST_MAP: GuestAddress = GuestAddress(VIRT_START.0 + VIRT_LEN as u64);
1274
1275 let mem = create_virt_memory(
1276 None,
1277 0,
1278 [
1279 MappedRange {
1280 base: PHYS_START_1,
1281 length: PHYS_LEN,
1282 },
1283 MappedRange {
1284 base: PHYS_START_2,
1285 length: PHYS_LEN,
1286 },
1287 ],
1288 (),
1289 );
1290
1291 assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::No));
1292 assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Read));
1293 assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Write));
1294 assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::ReadWrite));
1295 assert!(!mem.check_range(GuestAddress(VIRT_START.0 - 1), 1, Permissions::No));
1296 assert!(!mem.check_range(VIRT_POST_MAP, 1, Permissions::No));
1297
1298 verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN, None, None);
1299 verify_virt_mem_error(&mem, GuestAddress(VIRT_START.0 - 1), 1, None, None);
1300 verify_virt_mem_error(&mem, VIRT_POST_MAP, 1, None, None);
1301 verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN + 1, None, None);
1302
1303 let iommu = mem.iommu();
1304
1305 iommu.expect_mapping_request(PHYS_START_1);
1308 verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
1309 iommu.verify_mapping_request(VIRT_START_1, PHYS_LEN, true);
1310
1311 iommu.expect_mapping_request(PHYS_START_2);
1312 verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
1313 iommu.verify_mapping_request(VIRT_START_2, PHYS_LEN, true);
1314
1315 iommu
1317 .iotlb
1318 .write()
1319 .unwrap()
1320 .set_mapping(VIRT_START_1, PHYS_START_1, PHYS_LEN, Permissions::Write)
1321 .unwrap();
1322
1323 iommu.expect_mapping_request(PHYS_START_1);
1324 verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
1325 iommu.verify_mapping_request(VIRT_START_1, PHYS_LEN, false);
1326 }
1327
1328 #[cfg(feature = "backend-mmap")]
1330 #[test]
1331 fn test_mem_replace() {
1332 const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
1333 const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
1334 const PHYS_LEN: usize = 128;
1335 const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1336
1337 let mem = create_virt_memory(
1340 Some((VIRT_START, Permissions::Read)),
1341 0,
1342 [MappedRange {
1343 base: PHYS_START_1,
1344 length: PHYS_LEN,
1345 }],
1346 (),
1347 );
1348
1349 verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
1350 verify_virt_mem_error(
1351 &mem,
1352 VIRT_START,
1353 PHYS_LEN * 2,
1354 Some(GuestAddress(VIRT_START.0 + PHYS_LEN as u64)),
1355 None,
1356 );
1357
1358 let mut mem2 = create_virt_memory(
1359 Some((VIRT_START, Permissions::Read)),
1360 42,
1361 [
1362 MappedRange {
1363 base: PHYS_START_1,
1364 length: PHYS_LEN,
1365 },
1366 MappedRange {
1367 base: PHYS_START_2,
1368 length: PHYS_LEN,
1369 },
1370 ],
1371 (),
1372 );
1373
1374 verify_virt_mem_content(&mem2, VIRT_START, PHYS_LEN * 2, 42);
1375
1376 let mem_cloned = mem.clone();
1378
1379 mem2.set_iommu_enabled(false);
1381 let pmem2 = mem2.physical_memory().unwrap();
1382 assert!(std::ptr::eq(pmem2, mem2.get_backend()));
1383 let mem = mem.with_replaced_backend(pmem2.clone());
1384
1385 mem.iommu().expect_mapping_request(PHYS_START_2);
1388 verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN * 2, 42);
1389 mem.iommu().verify_mapping_request(
1390 GuestAddress(VIRT_START.0 + PHYS_LEN as u64),
1391 PHYS_LEN,
1392 true,
1393 );
1394
1395 verify_virt_mem_content(&mem_cloned, VIRT_START, PHYS_LEN, 0);
1397 verify_hit(
1399 mem_cloned.iommu().iotlb.read().unwrap(),
1400 VIRT_START,
1401 PHYS_LEN * 2,
1402 Permissions::Read,
1403 [
1404 MappedRange {
1405 base: PHYS_START_1,
1406 length: PHYS_LEN,
1407 },
1408 MappedRange {
1409 base: PHYS_START_2,
1410 length: PHYS_LEN,
1411 },
1412 ],
1413 );
1414 }
1417
1418 #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
1424 fn verify_mem_bitmap<
1425 M: crate::GuestMemoryBackend<R = R>,
1426 R: GuestMemoryRegion<B = AtomicBitmap>,
1427 I: Iommu,
1428 >(
1429 mem: &IommuMemory<M, I>,
1430 clean: impl IntoIterator<Item = usize>,
1431 dirty: impl IntoIterator<Item = usize>,
1432 ) {
1433 let bitmap = mem.bitmap();
1434 for addr in clean {
1435 if bitmap.is_addr_set(addr) {
1436 panic!("Expected addr {addr:#x} to be clean, but is dirty");
1437 }
1438 }
1439 for addr in dirty {
1440 if !bitmap.is_addr_set(addr) {
1441 panic!("Expected addr {addr:#x} to be dirty, but is clean");
1442 }
1443 bitmap.reset_addr_range(addr, 1);
1444 }
1445 }
1446
1447 #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
1448 #[test]
1449 fn test_dirty_bitmap() {
1450 const PAGE_SIZE: usize = 4096;
1451 const PHYS_START: GuestAddress = GuestAddress(0x4000);
1452 const PHYS_LEN: usize = PAGE_SIZE * 2;
1453 const PHYS_PAGE_0: usize = PHYS_START.0 as usize;
1454 const PHYS_PAGE_1: usize = PHYS_START.0 as usize + PAGE_SIZE;
1455 const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1456 const VIRT_PAGE_0: usize = VIRT_START.0 as usize;
1457 const VIRT_PAGE_1: usize = VIRT_START.0 as usize + PAGE_SIZE;
1458
1459 let bitmap = AtomicBitmap::new(
1460 VIRT_START.0 as usize + PHYS_LEN,
1461 NonZeroUsize::new(PAGE_SIZE).unwrap(),
1462 );
1463
1464 let mem = create_virt_memory(
1465 Some((VIRT_START, Permissions::ReadWrite)),
1466 0,
1467 [MappedRange {
1468 base: PHYS_START,
1469 length: PHYS_LEN,
1470 }],
1471 bitmap,
1472 );
1473
1474 verify_mem_bitmap(
1477 &mem,
1478 [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
1479 [],
1480 );
1481
1482 mem.bitmap().set_addr_range(PHYS_PAGE_0, 2 * PAGE_SIZE);
1485 verify_mem_bitmap(&mem, [VIRT_PAGE_0, VIRT_PAGE_1], [PHYS_PAGE_0, PHYS_PAGE_1]);
1486
1487 verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
1489 verify_mem_bitmap(
1490 &mem,
1491 [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
1492 [],
1493 );
1494
1495 let mut slices = mem
1498 .get_slices(VIRT_START, PHYS_LEN, Permissions::Write)
1499 .inspect_err(|err| panic!("Failed to access memory: {err}"))
1500 .unwrap();
1501 let slice = slices
1502 .next()
1503 .unwrap()
1504 .inspect_err(|err| panic!("Failed to access memory: {err}"))
1505 .unwrap();
1506 assert!(slices.next().is_none(), "Expected single slice");
1507
1508 verify_mem_bitmap(
1509 &mem,
1510 [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
1511 [],
1512 );
1513
1514 slice
1515 .store(42, 0, Ordering::Relaxed)
1516 .inspect_err(|err| panic!("Writing to memory failed: {err}"))
1517 .unwrap();
1518 verify_mem_bitmap(&mem, [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_1], [VIRT_PAGE_0]);
1519
1520 slice
1521 .store(23, PAGE_SIZE, Ordering::Relaxed)
1522 .inspect_err(|err| panic!("Writing to memory failed: {err}"))
1523 .unwrap();
1524 verify_mem_bitmap(&mem, [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0], [VIRT_PAGE_1]);
1525 }
1526}