vm_memory/
iommu.rs

1// Copyright (C) 2025 Red Hat. All rights reserved.
2//
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE-BSD-3-Clause file.
5//
6// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
7
8//! Provide an interface for IOMMUs enabling I/O virtual address (IOVA) translation.
9//!
10//! All IOMMUs consist of an IOTLB ([`Iotlb`]), which is backed by a data source that can deliver
11//! all mappings.  For example, for vhost-user, that data source is the vhost-user front-end; i.e.
12//! IOTLB misses require sending a notification to the front-end and awaiting a reply that supplies
13//! the desired mapping.
14
15use crate::bitmap::{self, Bitmap};
16use crate::guest_memory::{
17    Error as GuestMemoryError, GuestMemoryBackendSliceIterator, GuestMemorySliceIterator,
18    Result as GuestMemoryResult,
19};
20use crate::{
21    Address, GuestAddress, GuestMemory, GuestMemoryBackend, GuestMemoryRegion, GuestUsize,
22    Permissions, VolatileSlice,
23};
24use rangemap::RangeMap;
25use std::cmp;
26use std::fmt::{self, Debug};
27use std::iter::FusedIterator;
28use std::num::Wrapping;
29use std::ops::{Deref, Range};
30use std::sync::Arc;
31
32/// Errors associated with IOMMU address translation.
33#[derive(Debug, thiserror::Error)]
34pub enum Error {
35    /// Lookup cannot be resolved.
36    #[error(
37        "Cannot translate I/O virtual address range {:#x}+{}: {reason}",
38        iova_range.base.0,
39        iova_range.length,
40    )]
41    CannotResolve {
42        /// IOVA range that could not be resolved
43        iova_range: IovaRange,
44        /// Some human-readable specifics about the reason
45        reason: String,
46    },
47
48    /// Wanted to translate an IOVA range into a single slice, but the range is fragmented.
49    #[error(
50        "Expected {:#x}+{} to be a continuous I/O virtual address range, but only {continuous_length} bytes are",
51        iova_range.base.0,
52        iova_range.length,
53    )]
54    Fragmented {
55        /// Full IOVA range that was to be translated
56        iova_range: IovaRange,
57        /// Length of the continuous head (i.e. the first fragment)
58        continuous_length: usize,
59    },
60
61    /// IOMMU is not configured correctly, and so cannot translate addresses.
62    #[error("IOMMU not configured correctly, cannot operate: {reason}")]
63    IommuMisconfigured {
64        /// Some human-readable specifics about the misconfiguration
65        reason: String,
66    },
67}
68
69/// An IOMMU, allowing translation of I/O virtual addresses (IOVAs).
70///
71/// Generally, `Iommu` implementaions consist of an [`Iotlb`], which is supposed to be consulted
72/// first for lookup requests.  All misses and access failures then should be resolved by looking
73/// up the affected ranges in the actual IOMMU (which has all current mappings) and putting the
74/// results back into the IOTLB.  A subsequent lookup in the IOTLB should result in a full
75/// translation, which can then be returned.
76pub trait Iommu: Debug + Send + Sync {
77    /// `Deref` type associated with the type that internally wraps the `Iotlb`.
78    ///
79    /// For example, the `Iommu` may keep the `Iotlb` wrapped in an `RwLock`, making this type
80    /// `RwLockReadGuard<'a, Iotlb>`.
81    ///
82    /// We need this specific type instead of a plain reference so that [`IotlbIterator`] can
83    /// actually own the reference and prolong its lifetime.
84    type IotlbGuard<'a>: Deref<Target = Iotlb> + 'a
85    where
86        Self: 'a;
87
88    /// Translate the given range for the given access into the underlying address space.
89    ///
90    /// Any translation request is supposed to be fully served by an internal [`Iotlb`] instance.
91    /// Any misses or access failures should result in a lookup in the full IOMMU structures,
92    /// filling the IOTLB with the results, and then repeating the lookup in there.
93    fn translate(
94        &self,
95        iova: GuestAddress,
96        length: usize,
97        access: Permissions,
98    ) -> Result<IotlbIterator<Self::IotlbGuard<'_>>, Error>;
99}
100
101/// Mapping target in an IOMMU/IOTLB.
102///
103/// This is the data to which each entry in an IOMMU/IOTLB maps.
104#[derive(Clone, Copy, Debug, Eq, PartialEq)]
105struct IommuMapping {
106    /// Difference between the mapped and the IOVA address, i.e. what to add to an IOVA address to
107    /// get the mapped adrress.
108    ///
109    /// We cannot store the more obvious mapped base address for this range because that would
110    /// allow rangemap to wrongfully merge consecutive map entries if they are a duplicate mapping
111    /// (which does happen).  Storing the difference ensures that entries are only merged when they
112    /// are indeed consecutive.
113    ///
114    /// Note that we make no granularity restrictions (i.e. do not operate on a unit like pages),
115    /// so the source and target address may have arbitrary alignment.  That is why both fields
116    /// here need to be separate and we cannot merge the two bits that are `permissions` with this
117    /// base address into a single `u64` field.
118    target_source_diff: Wrapping<u64>,
119    /// Allowed access for the mapped range
120    permissions: Permissions,
121}
122
123/// Provides an IOTLB.
124///
125/// The IOTLB caches IOMMU mappings.  It must be preemptively updated whenever mappings are
126/// restricted or removed; in contrast, adding mappings or making them more permissive does not
127/// require preemptive updates, as subsequent accesses that violate the previous (more restrictive)
128/// permissions will trigger TLB misses or access failures, which is then supposed to result in an
129/// update from the outer [`Iommu`] object that performs the translation.
130#[derive(Debug, Default)]
131pub struct Iotlb {
132    /// Mappings of which we know.
133    ///
134    /// Note that the vhost(-user) specification makes no mention of a specific page size, even
135    /// though in practice the IOVA address space will be organized in terms of pages.  However, we
136    /// cannot really rely on that (or any specific page size; it could be 4k, the guest page size,
137    /// or the host page size), so we need to be able to handle continuous ranges of any
138    /// granularity.
139    tlb: RangeMap<u64, IommuMapping>,
140}
141
142/// Iterates over a range of valid IOTLB mappings that together constitute a continuous range in
143/// I/O virtual address space.
144///
145/// Returned by [`Iotlb::lookup()`] and [`Iommu::translate()`] in case translation was successful
146/// (i.e. the whole requested range is mapped and permits the given access).
147#[derive(Clone, Debug)]
148pub struct IotlbIterator<D: Deref<Target = Iotlb>> {
149    /// IOTLB that provides these mapings
150    iotlb: D,
151    /// I/O virtual address range left to iterate over
152    range: Range<u64>,
153    /// Requested access permissions
154    access: Permissions,
155}
156
157/// Representation of an IOVA memory range (i.e. in the I/O virtual address space).
158#[derive(Clone, Debug, Eq, PartialEq)]
159pub struct IovaRange {
160    /// IOVA base address
161    pub base: GuestAddress,
162    /// Length (in bytes) of this range
163    pub length: usize,
164}
165
166/// Representation of a mapped memory range in the underlying address space.
167#[derive(Clone, Debug, Eq, PartialEq)]
168pub struct MappedRange {
169    /// Base address in the underlying address space
170    pub base: GuestAddress,
171    /// Length (in bytes) of this mapping
172    pub length: usize,
173}
174
175/// Lists the subranges in I/O virtual address space that turned out to not be accessible when
176/// trying to access an IOVA range.
177#[derive(Clone, Debug)]
178pub struct IotlbFails {
179    /// Subranges not mapped at all
180    pub misses: Vec<IovaRange>,
181    /// Subranges that are mapped, but do not allow the requested access mode
182    pub access_fails: Vec<IovaRange>,
183}
184
185/// [`GuestMemory`] type that consists of an underlying [`GuestMemoryBackend`] object plus an [`Iommu`].
186///
187/// The underlying [`GuestMemoryBackend`] is basically the physical memory, and the [`Iommu`] translates
188/// the I/O virtual address space that `IommuMemory` provides into that underlying physical address
189/// space.
190///
191/// Note that this type’s implementation of memory write tracking (“logging”) is specific to what
192/// is required by vhost-user:
193/// - When the IOMMU is disabled ([`IommuMemory::set_iommu_enabled()`]), writes to memory are
194///   tracked by the underlying [`GuestMemoryBackend`] in its bitmap(s).
195/// - When it is enabled, they are instead tracked in the [`IommuMemory`]’s dirty bitmap; the
196///   offset in the bitmap is calculated from the write’s IOVA.
197///
198/// That is, there are two bitmap levels, one in the underlying [`GuestMemoryBackend`], and one in
199/// [`IommuMemory`].  The former is used when the IOMMU is disabled, the latter when it is enabled.
200///
201/// If you need a different model (e.g. always use the [`GuestMemoryBackend`] bitmaps), you should not use
202/// this type.
203pub struct IommuMemory<M: GuestMemoryBackend, I: Iommu> {
204    /// Physical memory
205    backend: M,
206    /// IOMMU to translate IOVAs into physical addresses
207    iommu: Arc<I>,
208    /// Whether the IOMMU is even to be used or not; disabling it makes this a pass-through to
209    /// `backend`.
210    use_iommu: bool,
211    /// Dirty bitmap to use for IOVA accesses
212    bitmap: Arc<<M::R as GuestMemoryRegion>::B>,
213}
214
215impl IommuMapping {
216    /// Create a new mapping.
217    fn new(source_base: u64, target_base: u64, permissions: Permissions) -> Self {
218        IommuMapping {
219            target_source_diff: Wrapping(target_base) - Wrapping(source_base),
220            permissions,
221        }
222    }
223
224    /// Map the given source address (IOVA) to its corresponding target address.
225    fn map(&self, iova: u64) -> u64 {
226        (Wrapping(iova) + self.target_source_diff).0
227    }
228
229    /// Return the permissions for this mapping.
230    fn permissions(&self) -> Permissions {
231        self.permissions
232    }
233}
234
235impl Iotlb {
236    /// Create a new empty instance.
237    pub fn new() -> Self {
238        Default::default()
239    }
240
241    /// Change the mapping of the given IOVA range.
242    pub fn set_mapping(
243        &mut self,
244        iova: GuestAddress,
245        map_to: GuestAddress,
246        length: usize,
247        perm: Permissions,
248    ) -> Result<(), Error> {
249        // Soft TODO: We may want to evict old entries here once the TLB grows to a certain size,
250        // but that will require LRU book-keeping.  However, this is left for the future, because:
251        // - this TLB is not implemented in hardware, so we do not really have strong entry count
252        //   constraints, and
253        // - it seems like at least Linux guests invalidate mappings often, automatically limiting
254        //   our entry count.
255
256        let mapping = IommuMapping::new(iova.0, map_to.0, perm);
257        self.tlb.insert(iova.0..(iova.0 + length as u64), mapping);
258
259        Ok(())
260    }
261
262    /// Remove any mapping in the given IOVA range.
263    pub fn invalidate_mapping(&mut self, iova: GuestAddress, length: usize) {
264        self.tlb.remove(iova.0..(iova.0 + length as u64));
265    }
266
267    /// Remove all mappings.
268    pub fn invalidate_all(&mut self) {
269        self.tlb.clear();
270    }
271
272    /// Perform a lookup for the given range and the given `access` mode.
273    ///
274    /// If the whole range is mapped and accessible, return an iterator over all mappings.
275    ///
276    /// If any part of the range is not mapped or does not permit the given access mode, return an
277    /// `Err(_)` that contains a list of all such subranges.
278    pub fn lookup<D: Deref<Target = Iotlb>>(
279        this: D,
280        iova: GuestAddress,
281        length: usize,
282        access: Permissions,
283    ) -> Result<IotlbIterator<D>, IotlbFails> {
284        let full_range = iova.0..(iova.0 + length as u64);
285
286        let has_misses = this.tlb.gaps(&full_range).any(|_| true);
287        let has_access_fails = this
288            .tlb
289            .overlapping(full_range.clone())
290            .any(|(_, mapping)| !mapping.permissions().allow(access));
291
292        if has_misses || has_access_fails {
293            let misses = this
294                .tlb
295                .gaps(&full_range)
296                .map(|range| {
297                    // Gaps are always cut down to the range given to `gaps()`
298                    debug_assert!(range.start >= full_range.start && range.end <= full_range.end);
299                    range.try_into().unwrap()
300                })
301                .collect::<Vec<_>>();
302
303            let access_fails = this
304                .tlb
305                .overlapping(full_range.clone())
306                .filter(|(_, mapping)| !mapping.permissions().allow(access))
307                .map(|(range, _)| {
308                    let start = cmp::max(range.start, full_range.start);
309                    let end = cmp::min(range.end, full_range.end);
310                    (start..end).try_into().unwrap()
311                })
312                .collect::<Vec<_>>();
313
314            return Err(IotlbFails {
315                misses,
316                access_fails,
317            });
318        }
319
320        Ok(IotlbIterator {
321            iotlb: this,
322            range: full_range,
323            access,
324        })
325    }
326}
327
328impl<D: Deref<Target = Iotlb>> Iterator for IotlbIterator<D> {
329    /// Addresses in the underlying address space
330    type Item = MappedRange;
331
332    fn next(&mut self) -> Option<Self::Item> {
333        // Note that we can expect the whole IOVA range to be mapped with the right access flags.
334        // The `IotlbIterator` is created by `Iotlb::lookup()` only if the whole range is mapped
335        // accessibly; we have a permanent reference to `Iotlb`, so the range cannot be invalidated
336        // in the meantime.
337        // Another note: It is tempting to have `IotlbIterator` wrap around the
338        // `rangemap::Overlapping` iterator, but that just takes a (lifetimed) reference to the
339        // map, not an owned reference (like RwLockReadGuard), which we want to use; so using that
340        // would probably require self-referential structs.
341
342        if self.range.is_empty() {
343            return None;
344        }
345
346        let (range, mapping) = self.iotlb.tlb.get_key_value(&self.range.start).unwrap();
347
348        assert!(mapping.permissions().allow(self.access));
349
350        let mapping_iova_start = self.range.start;
351        let mapping_iova_end = cmp::min(self.range.end, range.end);
352        let mapping_len = mapping_iova_end - mapping_iova_start;
353
354        self.range.start = mapping_iova_end;
355
356        Some(MappedRange {
357            base: GuestAddress(mapping.map(mapping_iova_start)),
358            length: mapping_len.try_into().unwrap(),
359        })
360    }
361}
362
363impl TryFrom<Range<u64>> for IovaRange {
364    type Error = <u64 as TryFrom<usize>>::Error;
365
366    fn try_from(range: Range<u64>) -> Result<Self, Self::Error> {
367        Ok(IovaRange {
368            base: GuestAddress(range.start),
369            length: (range.end - range.start).try_into()?,
370        })
371    }
372}
373
374impl<M: GuestMemoryBackend, I: Iommu> IommuMemory<M, I> {
375    /// Create a new `IommuMemory` instance.
376    pub fn new(
377        backend: M,
378        iommu: I,
379        use_iommu: bool,
380        bitmap: <Self as GuestMemory>::Bitmap,
381    ) -> Self {
382        IommuMemory {
383            backend,
384            iommu: Arc::new(iommu),
385            use_iommu,
386            bitmap: Arc::new(bitmap),
387        }
388    }
389
390    /// Create a new version of `self` with the underlying physical memory replaced.
391    ///
392    /// Note that the inner `Arc` references to the IOMMU and bitmap are cloned, i.e. both the
393    /// existing and the new `IommuMemory` object will share the IOMMU and bitmap instances.  (The
394    /// `use_iommu` flag however is copied, so is independent between the two instances.)
395    pub fn with_replaced_backend(&self, new_backend: M) -> Self {
396        IommuMemory {
397            backend: new_backend,
398            iommu: Arc::clone(&self.iommu),
399            use_iommu: self.use_iommu,
400            bitmap: Arc::clone(&self.bitmap),
401        }
402    }
403
404    /// Return a reference to the IOVA address space's dirty bitmap.
405    ///
406    /// This bitmap tracks write accesses done while the IOMMU is enabled.
407    pub fn bitmap(&self) -> &Arc<<Self as GuestMemory>::Bitmap> {
408        &self.bitmap
409    }
410
411    /// Enable or disable the IOMMU.
412    ///
413    /// Disabling the IOMMU switches to pass-through mode, where every access is done directly on
414    /// the underlying physical memory.
415    pub fn set_iommu_enabled(&mut self, enabled: bool) {
416        self.use_iommu = enabled;
417    }
418
419    /// Check whether the IOMMU is enabled.
420    ///
421    /// If the IOMMU is disabled, we operate in pass-through mode, where every access is done
422    /// directly on the underlying physical memory.
423    pub fn get_iommu_enabled(&self) -> bool {
424        self.use_iommu
425    }
426
427    /// Return a reference to the IOMMU.
428    pub fn iommu(&self) -> &Arc<I> {
429        &self.iommu
430    }
431
432    /// Return a reference to the underlying physical memory object.
433    pub fn get_backend(&self) -> &M {
434        &self.backend
435    }
436}
437
438impl<M: GuestMemoryBackend + Clone, I: Iommu> Clone for IommuMemory<M, I> {
439    fn clone(&self) -> Self {
440        IommuMemory {
441            backend: self.backend.clone(),
442            iommu: Arc::clone(&self.iommu),
443            use_iommu: self.use_iommu,
444            bitmap: Arc::clone(&self.bitmap),
445        }
446    }
447}
448
449impl<M: GuestMemoryBackend + Debug, I: Iommu> Debug for IommuMemory<M, I>
450where
451    <M::R as GuestMemoryRegion>::B: Debug,
452{
453    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
454        f.debug_struct("IommuMemory")
455            .field("backend", &self.backend)
456            .field("iommu", &self.iommu)
457            .field("use_iommu", &self.use_iommu)
458            .field("bitmap", &self.bitmap)
459            .finish()
460    }
461}
462
463impl<M: GuestMemoryBackend + Default, I: Iommu + Default> Default for IommuMemory<M, I>
464where
465    <M::R as GuestMemoryRegion>::B: Default,
466{
467    fn default() -> Self {
468        IommuMemory {
469            backend: Default::default(),
470            iommu: Default::default(),
471            use_iommu: Default::default(),
472            bitmap: Default::default(),
473        }
474    }
475}
476
477impl<M: GuestMemoryBackend, I: Iommu> GuestMemory for IommuMemory<M, I> {
478    type PhysicalMemory = M;
479    type Bitmap = <M::R as GuestMemoryRegion>::B;
480
481    fn check_range(&self, addr: GuestAddress, count: usize, access: Permissions) -> bool {
482        if !self.use_iommu {
483            return self.backend.check_range(addr, count);
484        }
485
486        let Ok(mut translated_iter) = self.iommu.translate(addr, count, access) else {
487            return false;
488        };
489
490        translated_iter
491            .all(|translated| self.backend.check_range(translated.base, translated.length))
492    }
493
494    fn get_slices<'a>(
495        &'a self,
496        addr: GuestAddress,
497        count: usize,
498        access: Permissions,
499    ) -> GuestMemoryResult<impl GuestMemorySliceIterator<'a, bitmap::BS<'a, Self::Bitmap>>> {
500        if self.use_iommu {
501            IommuMemorySliceIterator::virt(self, addr, count, access)
502                .map_err(GuestMemoryError::IommuError)
503        } else {
504            Ok(IommuMemorySliceIterator::phys(self, addr, count))
505        }
506    }
507
508    fn physical_memory(&self) -> Option<&Self::PhysicalMemory> {
509        if self.use_iommu {
510            None
511        } else {
512            Some(&self.backend)
513        }
514    }
515}
516
517/// Iterates over [`VolatileSlice`]s that together form an area in an `IommuMemory`.
518///
519/// Returned by [`IommuMemory::get_slices()`]
520pub struct IommuMemorySliceIterator<'a, M: GuestMemoryBackend, I: Iommu + 'a> {
521    /// Current IOVA (needed to access the right slice of the IOVA space dirty bitmap)
522    iova: GuestAddress,
523    /// IOVA space dirty bitmap
524    bitmap: Option<&'a <M::R as GuestMemoryRegion>::B>,
525    /// Underlying physical memory (i.e. not the `IommuMemory`)
526    phys_mem: &'a M,
527    /// IOMMU translation result (i.e. remaining physical regions to visit)
528    translation: Option<IotlbIterator<I::IotlbGuard<'a>>>,
529    /// Iterator in the currently visited physical region
530    current_translated_iter: Option<GuestMemoryBackendSliceIterator<'a, M>>,
531}
532
533impl<'a, M: GuestMemoryBackend, I: Iommu> IommuMemorySliceIterator<'a, M, I> {
534    /// Create an iterator over the physical region `[addr, addr + count)`.
535    ///
536    /// “Physical” means that the IOMMU is not used to translate this address range.  The resulting
537    /// iterator is effectively the same as would be returned by [`GuestMemoryBackend::get_slices()`] on
538    /// the underlying physical memory for the given address range.
539    fn phys(mem: &'a IommuMemory<M, I>, addr: GuestAddress, count: usize) -> Self {
540        IommuMemorySliceIterator {
541            iova: addr,
542            bitmap: None,
543            phys_mem: &mem.backend,
544            translation: None,
545            current_translated_iter: Some(mem.backend.get_slices(addr, count)),
546        }
547    }
548
549    /// Create an iterator over the IOVA region `[addr, addr + count)`.
550    ///
551    /// This address range is translated using the IOMMU, and the resulting mappings are then
552    /// separately visited via [`GuestMemoryBackend::get_slices()`].
553    fn virt(
554        mem: &'a IommuMemory<M, I>,
555        addr: GuestAddress,
556        count: usize,
557        access: Permissions,
558    ) -> Result<Self, Error> {
559        let translation = mem.iommu.translate(addr, count, access)?;
560        Ok(IommuMemorySliceIterator {
561            iova: addr,
562            bitmap: Some(mem.bitmap.as_ref()),
563            phys_mem: &mem.backend,
564            translation: Some(translation),
565            current_translated_iter: None,
566        })
567    }
568
569    /// Helper function for [`<Self as Iterator>::next()`](IommuMemorySliceIterator::next).
570    ///
571    /// Get the next slice and update the internal state.  If there is an element left in
572    /// `self.current_translated_iter`, return that; otherwise, move to the next mapping left in
573    /// `self.translation` until there are no more mappings left.
574    ///
575    /// If both fields are `None`, always return `None`.
576    ///
577    /// # Safety
578    ///
579    /// This function never resets `self.current_translated_iter` or `self.translation` to `None`,
580    /// particularly not in case of error; calling this function with these fields not reset after
581    /// an error is ill-defined, so the caller must check the return value, and in case of an
582    /// error, reset these fields to `None`.
583    ///
584    /// (This is why this function exists, so this reset can happen in a single central location.)
585    unsafe fn do_next(
586        &mut self,
587    ) -> Option<GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>> {
588        loop {
589            if let Some(item) = self
590                .current_translated_iter
591                .as_mut()
592                .and_then(|iter| iter.next())
593            {
594                let mut item = match item {
595                    Ok(item) => item,
596                    Err(err) => return Some(Err(err)),
597                };
598
599                if let Some(bitmap) = self.bitmap.as_ref() {
600                    let bitmap_slice = bitmap.slice_at(self.iova.0 as usize);
601                    item = item.replace_bitmap(bitmap_slice);
602                }
603
604                self.iova = match self.iova.overflowing_add(item.len() as GuestUsize) {
605                    (x @ GuestAddress(0), _) | (x, false) => x,
606                    (_, true) => return Some(Err(GuestMemoryError::GuestAddressOverflow)),
607                };
608
609                return Some(Ok(item));
610            }
611
612            let next_mapping = self.translation.as_mut()?.next()?;
613            self.current_translated_iter = Some(
614                self.phys_mem
615                    .get_slices(next_mapping.base, next_mapping.length),
616            );
617        }
618    }
619}
620
621impl<'a, M: GuestMemoryBackend, I: Iommu> Iterator for IommuMemorySliceIterator<'a, M, I> {
622    type Item = GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>;
623
624    fn next(&mut self) -> Option<Self::Item> {
625        // SAFETY:
626        // We reset `current_translated_iter` and `translation` to `None` in case of error
627        match unsafe { self.do_next() } {
628            Some(Ok(slice)) => Some(Ok(slice)),
629            other => {
630                // On error (or end), clear both so iteration remains stopped
631                self.current_translated_iter.take();
632                self.translation.take();
633                other
634            }
635        }
636    }
637}
638
639/// This iterator continues to return `None` when exhausted.
640///
641/// [`<Self as Iterator>::next()`](IommuMemorySliceIterator::next) sets both
642/// `self.current_translated_iter` and `self.translation` to `None` when returning anything but
643/// `Some(Ok(_))`, ensuring that it will only return `None` from that point on.
644impl<M: GuestMemoryBackend, I: Iommu> FusedIterator for IommuMemorySliceIterator<'_, M, I> {}
645
646impl<'a, M: GuestMemoryBackend, I: Iommu> GuestMemorySliceIterator<'a, bitmap::MS<'a, M>>
647    for IommuMemorySliceIterator<'a, M, I>
648{
649}
650
651impl<'a, M: GuestMemoryBackend + Debug, I: Iommu> Debug for IommuMemorySliceIterator<'a, M, I>
652where
653    I::IotlbGuard<'a>: Debug,
654    <M::R as GuestMemoryRegion>::B: Debug,
655{
656    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
657        f.debug_struct("IommuMemorySliceIterator")
658            .field("iova", &self.iova)
659            .field("bitmap", &self.bitmap)
660            .field("phys_mem", &self.phys_mem)
661            .field("translation", &self.translation)
662            .field("current_translated_iter", &self.current_translated_iter)
663            .finish()
664    }
665}
666
667#[cfg(test)]
668mod tests {
669    #[cfg(feature = "backend-mmap")]
670    use super::IotlbIterator;
671    use super::{Error, IovaRange, MappedRange};
672    #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
673    use crate::bitmap::AtomicBitmap;
674    #[cfg(feature = "backend-mmap")]
675    use crate::bitmap::NewBitmap;
676    #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
677    use crate::GuestMemoryRegion;
678    #[cfg(feature = "backend-mmap")]
679    use crate::{
680        Bytes, GuestMemory, GuestMemoryError, GuestMemoryMmap, GuestMemoryResult, Iommu,
681        IommuMemory,
682    };
683    use crate::{GuestAddress, Iotlb, Permissions};
684    use std::fmt::Debug;
685    #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
686    use std::num::NonZeroUsize;
687    use std::ops::Deref;
688    #[cfg(feature = "backend-mmap")]
689    use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
690    #[cfg(feature = "backend-mmap")]
691    use std::sync::{RwLock, RwLockReadGuard};
692
693    #[derive(Debug)]
694    #[cfg(feature = "backend-mmap")]
695    struct SimpleIommu {
696        iotlb: RwLock<Iotlb>,
697        /// Records the last fail event's base IOVA
698        fail_base: AtomicU64,
699        /// Records the last fail event's length
700        fail_len: AtomicUsize,
701        /// Records whether the last fail event was a miss
702        fail_was_miss: AtomicBool,
703        /// What base physical address to map to on the next fail event (0 means return error)
704        next_map_to: AtomicU64,
705    }
706
707    #[cfg(feature = "backend-mmap")]
708    impl SimpleIommu {
709        fn new() -> Self {
710            SimpleIommu {
711                iotlb: Iotlb::new().into(),
712                fail_base: 0.into(),
713                fail_len: 0.into(),
714                fail_was_miss: false.into(),
715                next_map_to: 0.into(),
716            }
717        }
718
719        fn expect_mapping_request(&self, to_phys: GuestAddress) {
720            // Clear failed range info so it can be tested after the request
721            self.fail_base.store(0, Ordering::Relaxed);
722            self.fail_len.store(0, Ordering::Relaxed);
723            self.next_map_to.store(to_phys.0, Ordering::Relaxed);
724        }
725
726        fn verify_mapping_request(&self, virt: GuestAddress, len: usize, was_miss: bool) {
727            assert_eq!(self.fail_base.load(Ordering::Relaxed), virt.0);
728            assert_eq!(self.fail_len.load(Ordering::Relaxed), len);
729            assert_eq!(self.fail_was_miss.load(Ordering::Relaxed), was_miss);
730        }
731    }
732
733    #[cfg(feature = "backend-mmap")]
734    impl Iommu for SimpleIommu {
735        type IotlbGuard<'a> = RwLockReadGuard<'a, Iotlb>;
736
737        fn translate(
738            &self,
739            iova: GuestAddress,
740            length: usize,
741            access: Permissions,
742        ) -> Result<IotlbIterator<Self::IotlbGuard<'_>>, Error> {
743            loop {
744                let mut fails =
745                    match Iotlb::lookup(self.iotlb.read().unwrap(), iova, length, access) {
746                        Ok(success) => return Ok(success),
747                        Err(fails) => fails,
748                    };
749                let miss = !fails.misses.is_empty();
750                let fail = fails
751                    .misses
752                    .pop()
753                    .or_else(|| fails.access_fails.pop())
754                    .expect("No failure reported, even though a failure happened");
755                self.fail_base.store(fail.base.0, Ordering::Relaxed);
756                self.fail_len.store(fail.length, Ordering::Relaxed);
757                self.fail_was_miss.store(miss, Ordering::Relaxed);
758
759                if !fails.misses.is_empty() || !fails.access_fails.is_empty() {
760                    return Err(Error::CannotResolve {
761                        iova_range: IovaRange { base: iova, length },
762                        reason: "This IOMMU can only handle one failure per access".into(),
763                    });
764                }
765
766                let map_to = self.next_map_to.swap(0, Ordering::Relaxed);
767                if map_to == 0 {
768                    return Err(Error::CannotResolve {
769                        iova_range: IovaRange {
770                            base: fail.base,
771                            length: fail.length,
772                        },
773                        reason: "No mapping provided for failed range".into(),
774                    });
775                }
776
777                self.iotlb.write().unwrap().set_mapping(
778                    fail.base,
779                    GuestAddress(map_to),
780                    fail.length,
781                    access,
782                )?;
783            }
784        }
785    }
786
787    /// Verify that `iova`+`length` is mapped to `expected`.
788    fn verify_hit(
789        iotlb: impl Deref<Target = Iotlb> + Debug,
790        iova: GuestAddress,
791        length: usize,
792        permissions: Permissions,
793        expected: impl IntoIterator<Item = MappedRange>,
794    ) {
795        let mut iter = Iotlb::lookup(iotlb, iova, length, permissions)
796            .inspect_err(|err| panic!("Unexpected lookup error {err:?}"))
797            .unwrap();
798
799        for e in expected {
800            assert_eq!(iter.next(), Some(e));
801        }
802        assert_eq!(iter.next(), None);
803    }
804
805    /// Verify that trying to look up `iova`+`length` results in misses at `expected_misses` and
806    /// access failures (permission-related) at `expected_access_fails`.
807    fn verify_fail(
808        iotlb: impl Deref<Target = Iotlb> + Debug,
809        iova: GuestAddress,
810        length: usize,
811        permissions: Permissions,
812        expected_misses: impl IntoIterator<Item = IovaRange>,
813        expected_access_fails: impl IntoIterator<Item = IovaRange>,
814    ) {
815        let fails = Iotlb::lookup(iotlb, iova, length, permissions)
816            .inspect(|hits| panic!("Expected error on lookup, found {hits:?}"))
817            .unwrap_err();
818
819        let mut miss_iter = fails.misses.into_iter();
820        for e in expected_misses {
821            assert_eq!(miss_iter.next(), Some(e));
822        }
823        assert_eq!(miss_iter.next(), None);
824
825        let mut accf_iter = fails.access_fails.into_iter();
826        for e in expected_access_fails {
827            assert_eq!(accf_iter.next(), Some(e));
828        }
829        assert_eq!(accf_iter.next(), None);
830    }
831
832    /// Enter adjacent IOTLB entries and verify they are merged into a single one.
833    #[test]
834    fn test_iotlb_merge() -> Result<(), Error> {
835        const IOVA: GuestAddress = GuestAddress(42);
836        const PHYS: GuestAddress = GuestAddress(87);
837        const LEN_1: usize = 123;
838        const LEN_2: usize = 234;
839
840        let mut iotlb = Iotlb::new();
841        iotlb.set_mapping(IOVA, PHYS, LEN_1, Permissions::ReadWrite)?;
842        iotlb.set_mapping(
843            GuestAddress(IOVA.0 + LEN_1 as u64),
844            GuestAddress(PHYS.0 + LEN_1 as u64),
845            LEN_2,
846            Permissions::ReadWrite,
847        )?;
848
849        verify_hit(
850            &iotlb,
851            IOVA,
852            LEN_1 + LEN_2,
853            Permissions::ReadWrite,
854            [MappedRange {
855                base: PHYS,
856                length: LEN_1 + LEN_2,
857            }],
858        );
859
860        // Also check just a partial range
861        verify_hit(
862            &iotlb,
863            GuestAddress(IOVA.0 + LEN_1 as u64 - 1),
864            2,
865            Permissions::ReadWrite,
866            [MappedRange {
867                base: GuestAddress(PHYS.0 + LEN_1 as u64 - 1),
868                length: 2,
869            }],
870        );
871
872        Ok(())
873    }
874
875    /// Test that adjacent IOTLB entries that map to the same physical address are not merged into
876    /// a single entry.
877    #[test]
878    fn test_iotlb_nomerge_same_phys() -> Result<(), Error> {
879        const IOVA: GuestAddress = GuestAddress(42);
880        const PHYS: GuestAddress = GuestAddress(87);
881        const LEN_1: usize = 123;
882        const LEN_2: usize = 234;
883
884        let mut iotlb = Iotlb::new();
885        iotlb.set_mapping(IOVA, PHYS, LEN_1, Permissions::ReadWrite)?;
886        iotlb.set_mapping(
887            GuestAddress(IOVA.0 + LEN_1 as u64),
888            PHYS,
889            LEN_2,
890            Permissions::ReadWrite,
891        )?;
892
893        verify_hit(
894            &iotlb,
895            IOVA,
896            LEN_1 + LEN_2,
897            Permissions::ReadWrite,
898            [
899                MappedRange {
900                    base: PHYS,
901                    length: LEN_1,
902                },
903                MappedRange {
904                    base: PHYS,
905                    length: LEN_2,
906                },
907            ],
908        );
909
910        Ok(())
911    }
912
913    /// Test permission handling
914    #[test]
915    fn test_iotlb_perms() -> Result<(), Error> {
916        const IOVA_R: GuestAddress = GuestAddress(42);
917        const PHYS_R: GuestAddress = GuestAddress(87);
918        const LEN_R: usize = 123;
919        const IOVA_W: GuestAddress = GuestAddress(IOVA_R.0 + LEN_R as u64);
920        const PHYS_W: GuestAddress = GuestAddress(PHYS_R.0 + LEN_R as u64);
921        const LEN_W: usize = 234;
922        const IOVA_FULL: GuestAddress = IOVA_R;
923        const LEN_FULL: usize = LEN_R + LEN_W;
924
925        let mut iotlb = Iotlb::new();
926        iotlb.set_mapping(IOVA_R, PHYS_R, LEN_R, Permissions::Read)?;
927        iotlb.set_mapping(IOVA_W, PHYS_W, LEN_W, Permissions::Write)?;
928
929        // Test 1: Access whole range as R+W, should completely fail
930        verify_fail(
931            &iotlb,
932            IOVA_FULL,
933            LEN_FULL,
934            Permissions::ReadWrite,
935            [],
936            [
937                IovaRange {
938                    base: IOVA_R,
939                    length: LEN_R,
940                },
941                IovaRange {
942                    base: IOVA_W,
943                    length: LEN_W,
944                },
945            ],
946        );
947
948        // Test 2: Access whole range as R-only, should fail on second part
949        verify_fail(
950            &iotlb,
951            IOVA_FULL,
952            LEN_FULL,
953            Permissions::Read,
954            [],
955            [IovaRange {
956                base: IOVA_W,
957                length: LEN_W,
958            }],
959        );
960
961        // Test 3: Access whole range W-only, should fail on second part
962        verify_fail(
963            &iotlb,
964            IOVA_FULL,
965            LEN_FULL,
966            Permissions::Write,
967            [],
968            [IovaRange {
969                base: IOVA_R,
970                length: LEN_R,
971            }],
972        );
973
974        // Test 4: Access whole range w/o perms, should succeed
975        verify_hit(
976            &iotlb,
977            IOVA_FULL,
978            LEN_FULL,
979            Permissions::No,
980            [
981                MappedRange {
982                    base: PHYS_R,
983                    length: LEN_R,
984                },
985                MappedRange {
986                    base: PHYS_W,
987                    length: LEN_W,
988                },
989            ],
990        );
991
992        // Test 5: Access R range as R, should succeed
993        verify_hit(
994            &iotlb,
995            IOVA_R,
996            LEN_R,
997            Permissions::Read,
998            [MappedRange {
999                base: PHYS_R,
1000                length: LEN_R,
1001            }],
1002        );
1003
1004        // Test 6: Access W range as W, should succeed
1005        verify_hit(
1006            &iotlb,
1007            IOVA_W,
1008            LEN_W,
1009            Permissions::Write,
1010            [MappedRange {
1011                base: PHYS_W,
1012                length: LEN_W,
1013            }],
1014        );
1015
1016        Ok(())
1017    }
1018
1019    /// Test IOTLB invalidation
1020    #[test]
1021    fn test_iotlb_invalidation() -> Result<(), Error> {
1022        const IOVA: GuestAddress = GuestAddress(42);
1023        const PHYS: GuestAddress = GuestAddress(87);
1024        const LEN: usize = 123;
1025        const INVAL_OFS: usize = LEN / 2;
1026        const INVAL_LEN: usize = 3;
1027        const IOVA_AT_INVAL: GuestAddress = GuestAddress(IOVA.0 + INVAL_OFS as u64);
1028        const PHYS_AT_INVAL: GuestAddress = GuestAddress(PHYS.0 + INVAL_OFS as u64);
1029        const IOVA_POST_INVAL: GuestAddress = GuestAddress(IOVA_AT_INVAL.0 + INVAL_LEN as u64);
1030        const PHYS_POST_INVAL: GuestAddress = GuestAddress(PHYS_AT_INVAL.0 + INVAL_LEN as u64);
1031        const POST_INVAL_LEN: usize = LEN - INVAL_OFS - INVAL_LEN;
1032
1033        let mut iotlb = Iotlb::new();
1034        iotlb.set_mapping(IOVA, PHYS, LEN, Permissions::ReadWrite)?;
1035        verify_hit(
1036            &iotlb,
1037            IOVA,
1038            LEN,
1039            Permissions::ReadWrite,
1040            [MappedRange {
1041                base: PHYS,
1042                length: LEN,
1043            }],
1044        );
1045
1046        // Invalidate something in the middle; expect mapping at the start, then miss, then further
1047        // mapping
1048        iotlb.invalidate_mapping(IOVA_AT_INVAL, INVAL_LEN);
1049        verify_hit(
1050            &iotlb,
1051            IOVA,
1052            INVAL_OFS,
1053            Permissions::ReadWrite,
1054            [MappedRange {
1055                base: PHYS,
1056                length: INVAL_OFS,
1057            }],
1058        );
1059        verify_fail(
1060            &iotlb,
1061            IOVA,
1062            LEN,
1063            Permissions::ReadWrite,
1064            [IovaRange {
1065                base: IOVA_AT_INVAL,
1066                length: INVAL_LEN,
1067            }],
1068            [],
1069        );
1070        verify_hit(
1071            &iotlb,
1072            IOVA_POST_INVAL,
1073            POST_INVAL_LEN,
1074            Permissions::ReadWrite,
1075            [MappedRange {
1076                base: PHYS_POST_INVAL,
1077                length: POST_INVAL_LEN,
1078            }],
1079        );
1080
1081        // And invalidate everything; expect full miss
1082        iotlb.invalidate_all();
1083        verify_fail(
1084            &iotlb,
1085            IOVA,
1086            LEN,
1087            Permissions::ReadWrite,
1088            [IovaRange {
1089                base: IOVA,
1090                length: LEN,
1091            }],
1092            [],
1093        );
1094
1095        Ok(())
1096    }
1097
1098    /// Create `IommuMemory` backed by multiple physical regions, all mapped into a single virtual
1099    /// region (if `virt_start`/`virt_perm` are given).
1100    ///
1101    /// Memory is filled with incrementing (overflowing) bytes, starting with value `value_offset`.
1102    #[cfg(feature = "backend-mmap")]
1103    fn create_virt_memory<B: NewBitmap>(
1104        virt_mapping: Option<(GuestAddress, Permissions)>,
1105        value_offset: u8,
1106        phys_regions: impl IntoIterator<Item = MappedRange>,
1107        bitmap: B,
1108    ) -> IommuMemory<GuestMemoryMmap<B>, SimpleIommu> {
1109        let phys_ranges = phys_regions
1110            .into_iter()
1111            .map(|range| (range.base, range.length))
1112            .collect::<Vec<(GuestAddress, usize)>>();
1113        let phys_mem = GuestMemoryMmap::<B>::from_ranges(&phys_ranges).unwrap();
1114
1115        let mut byte_val = value_offset;
1116        for (base, len) in &phys_ranges {
1117            let mut slices = phys_mem
1118                .get_slices(*base, *len, Permissions::Write)
1119                .inspect_err(|err| panic!("Failed to access memory: {err}"))
1120                .unwrap();
1121            let slice = slices
1122                .next()
1123                .unwrap()
1124                .inspect_err(|err| panic!("Failed to access memory: {err}"))
1125                .unwrap();
1126            assert!(slices.next().is_none(), "Expected single slice");
1127
1128            for i in 0..*len {
1129                slice.write(&[byte_val], i).unwrap();
1130                byte_val = byte_val.wrapping_add(1);
1131            }
1132        }
1133
1134        let mem = IommuMemory::new(phys_mem, SimpleIommu::new(), true, bitmap);
1135
1136        // IOMMU is in use, this will be `None`
1137        assert!(mem.physical_memory().is_none());
1138
1139        if let Some((mut virt, perm)) = virt_mapping {
1140            for (base, len) in phys_ranges {
1141                let mut iotlb = mem.iommu().iotlb.write().unwrap();
1142                iotlb.set_mapping(virt, base, len, perm).unwrap();
1143                virt = GuestAddress(virt.0 + len as u64);
1144            }
1145        }
1146
1147        mem
1148    }
1149
1150    /// Verify the byte contents at `start`+`len`.  Assume the initial byte value to be
1151    /// `value_offset`.
1152    ///
1153    /// Each byte is expected to be incremented over the last (as created by
1154    /// `create_virt_memory()`).
1155    ///
1156    /// Return an error if mapping fails, but just panic if there is a content mismatch.
1157    #[cfg(feature = "backend-mmap")]
1158    fn check_virt_mem_content(
1159        mem: &impl GuestMemory,
1160        start: GuestAddress,
1161        len: usize,
1162        value_offset: u8,
1163    ) -> GuestMemoryResult<()> {
1164        let mut ref_value = value_offset;
1165        for slice in mem.get_slices(start, len, Permissions::Read)? {
1166            let slice = slice?;
1167
1168            let count = slice.len();
1169            let mut data = vec![0u8; count];
1170            slice.read(&mut data, 0).unwrap();
1171            for val in data {
1172                assert_eq!(val, ref_value);
1173                ref_value = ref_value.wrapping_add(1);
1174            }
1175        }
1176
1177        Ok(())
1178    }
1179
1180    #[cfg(feature = "backend-mmap")]
1181    fn verify_virt_mem_content(
1182        m: &impl GuestMemory,
1183        start: GuestAddress,
1184        len: usize,
1185        value_offset: u8,
1186    ) {
1187        check_virt_mem_content(m, start, len, value_offset).unwrap();
1188    }
1189
1190    /// Verify that trying to read from `start`+`len` fails (because of `CannotResolve`).
1191    ///
1192    /// The reported failed-to-map range is checked to be `fail_start`+`fail_len`.  `fail_start`
1193    /// defaults to `start`, `fail_len` defaults to the remaining length of the whole mapping
1194    /// starting at `fail_start` (i.e. `start + len - fail_start`).
1195    #[cfg(feature = "backend-mmap")]
1196    fn verify_virt_mem_error(
1197        m: &impl GuestMemory,
1198        start: GuestAddress,
1199        len: usize,
1200        fail_start: Option<GuestAddress>,
1201        fail_len: Option<usize>,
1202    ) {
1203        let fail_start = fail_start.unwrap_or(start);
1204        let fail_len = fail_len.unwrap_or(len - (fail_start.0 - start.0) as usize);
1205        let err = check_virt_mem_content(m, start, len, 0).unwrap_err();
1206        let GuestMemoryError::IommuError(Error::CannotResolve {
1207            iova_range: failed_range,
1208            reason: _,
1209        }) = err
1210        else {
1211            panic!("Unexpected error: {err:?}");
1212        };
1213        assert_eq!(
1214            failed_range,
1215            IovaRange {
1216                base: fail_start,
1217                length: fail_len,
1218            }
1219        );
1220    }
1221
1222    /// Test `IommuMemory`, with pre-filled mappings.
1223    #[cfg(feature = "backend-mmap")]
1224    #[test]
1225    fn test_iommu_memory_pre_mapped() {
1226        const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
1227        const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
1228        const PHYS_LEN: usize = 128;
1229        const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1230        const VIRT_LEN: usize = PHYS_LEN * 2;
1231        const VIRT_POST_MAP: GuestAddress = GuestAddress(VIRT_START.0 + VIRT_LEN as u64);
1232
1233        let mem = create_virt_memory(
1234            Some((VIRT_START, Permissions::Read)),
1235            0,
1236            [
1237                MappedRange {
1238                    base: PHYS_START_1,
1239                    length: PHYS_LEN,
1240                },
1241                MappedRange {
1242                    base: PHYS_START_2,
1243                    length: PHYS_LEN,
1244                },
1245            ],
1246            (),
1247        );
1248
1249        assert!(mem.check_range(VIRT_START, VIRT_LEN, Permissions::No));
1250        assert!(mem.check_range(VIRT_START, VIRT_LEN, Permissions::Read));
1251        assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Write));
1252        assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::ReadWrite));
1253        assert!(!mem.check_range(GuestAddress(VIRT_START.0 - 1), 1, Permissions::No));
1254        assert!(!mem.check_range(VIRT_POST_MAP, 1, Permissions::No));
1255
1256        verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
1257        verify_virt_mem_error(&mem, GuestAddress(VIRT_START.0 - 1), 1, None, None);
1258        verify_virt_mem_error(&mem, VIRT_POST_MAP, 1, None, None);
1259        verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN + 1, Some(VIRT_POST_MAP), None);
1260    }
1261
1262    /// Test `IommuMemory`, with mappings created through the IOMMU on the fly.
1263    #[cfg(feature = "backend-mmap")]
1264    #[test]
1265    fn test_iommu_memory_live_mapped() {
1266        const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
1267        const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
1268        const PHYS_LEN: usize = 128;
1269        const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1270        const VIRT_START_1: GuestAddress = VIRT_START;
1271        const VIRT_START_2: GuestAddress = GuestAddress(VIRT_START.0 + PHYS_LEN as u64);
1272        const VIRT_LEN: usize = PHYS_LEN * 2;
1273        const VIRT_POST_MAP: GuestAddress = GuestAddress(VIRT_START.0 + VIRT_LEN as u64);
1274
1275        let mem = create_virt_memory(
1276            None,
1277            0,
1278            [
1279                MappedRange {
1280                    base: PHYS_START_1,
1281                    length: PHYS_LEN,
1282                },
1283                MappedRange {
1284                    base: PHYS_START_2,
1285                    length: PHYS_LEN,
1286                },
1287            ],
1288            (),
1289        );
1290
1291        assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::No));
1292        assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Read));
1293        assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::Write));
1294        assert!(!mem.check_range(VIRT_START, VIRT_LEN, Permissions::ReadWrite));
1295        assert!(!mem.check_range(GuestAddress(VIRT_START.0 - 1), 1, Permissions::No));
1296        assert!(!mem.check_range(VIRT_POST_MAP, 1, Permissions::No));
1297
1298        verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN, None, None);
1299        verify_virt_mem_error(&mem, GuestAddress(VIRT_START.0 - 1), 1, None, None);
1300        verify_virt_mem_error(&mem, VIRT_POST_MAP, 1, None, None);
1301        verify_virt_mem_error(&mem, VIRT_START, VIRT_LEN + 1, None, None);
1302
1303        let iommu = mem.iommu();
1304
1305        // Can only map one region at a time (with `SimpleIommu`), so only access `PHYS_LEN` first,
1306        // not `VIRT_LEN`
1307        iommu.expect_mapping_request(PHYS_START_1);
1308        verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
1309        iommu.verify_mapping_request(VIRT_START_1, PHYS_LEN, true);
1310
1311        iommu.expect_mapping_request(PHYS_START_2);
1312        verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
1313        iommu.verify_mapping_request(VIRT_START_2, PHYS_LEN, true);
1314
1315        // Also check invalid access failure
1316        iommu
1317            .iotlb
1318            .write()
1319            .unwrap()
1320            .set_mapping(VIRT_START_1, PHYS_START_1, PHYS_LEN, Permissions::Write)
1321            .unwrap();
1322
1323        iommu.expect_mapping_request(PHYS_START_1);
1324        verify_virt_mem_content(&mem, VIRT_START, VIRT_LEN, 0);
1325        iommu.verify_mapping_request(VIRT_START_1, PHYS_LEN, false);
1326    }
1327
1328    /// Test replacing the physical memory of an `IommuMemory`.
1329    #[cfg(feature = "backend-mmap")]
1330    #[test]
1331    fn test_mem_replace() {
1332        const PHYS_START_1: GuestAddress = GuestAddress(0x4000);
1333        const PHYS_START_2: GuestAddress = GuestAddress(0x8000);
1334        const PHYS_LEN: usize = 128;
1335        const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1336
1337        // Note only one physical region.  `mem2` will have two, to see that this pattern
1338        // (`with_replaced_backend()`) can be used to e.g. extend physical memory.
1339        let mem = create_virt_memory(
1340            Some((VIRT_START, Permissions::Read)),
1341            0,
1342            [MappedRange {
1343                base: PHYS_START_1,
1344                length: PHYS_LEN,
1345            }],
1346            (),
1347        );
1348
1349        verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
1350        verify_virt_mem_error(
1351            &mem,
1352            VIRT_START,
1353            PHYS_LEN * 2,
1354            Some(GuestAddress(VIRT_START.0 + PHYS_LEN as u64)),
1355            None,
1356        );
1357
1358        let mut mem2 = create_virt_memory(
1359            Some((VIRT_START, Permissions::Read)),
1360            42,
1361            [
1362                MappedRange {
1363                    base: PHYS_START_1,
1364                    length: PHYS_LEN,
1365                },
1366                MappedRange {
1367                    base: PHYS_START_2,
1368                    length: PHYS_LEN,
1369                },
1370            ],
1371            (),
1372        );
1373
1374        verify_virt_mem_content(&mem2, VIRT_START, PHYS_LEN * 2, 42);
1375
1376        // Clone `mem` before replacing its physical memory, to see that works
1377        let mem_cloned = mem.clone();
1378
1379        // Use `mem2`'s physical memory for `mem`
1380        mem2.set_iommu_enabled(false);
1381        let pmem2 = mem2.physical_memory().unwrap();
1382        assert!(std::ptr::eq(pmem2, mem2.get_backend()));
1383        let mem = mem.with_replaced_backend(pmem2.clone());
1384
1385        // The physical memory has been replaced, but `mem` still uses its old IOMMU, so the
1386        // mapping for everything past VIRT_START + PHYS_LEN does not yet exist.
1387        mem.iommu().expect_mapping_request(PHYS_START_2);
1388        verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN * 2, 42);
1389        mem.iommu().verify_mapping_request(
1390            GuestAddress(VIRT_START.0 + PHYS_LEN as u64),
1391            PHYS_LEN,
1392            true,
1393        );
1394
1395        // Verify `mem`'s clone still is the same (though it does use the same IOMMU)
1396        verify_virt_mem_content(&mem_cloned, VIRT_START, PHYS_LEN, 0);
1397        // See, it's the same IOMMU (i.e. it has a mapping PHYS_START_2):
1398        verify_hit(
1399            mem_cloned.iommu().iotlb.read().unwrap(),
1400            VIRT_START,
1401            PHYS_LEN * 2,
1402            Permissions::Read,
1403            [
1404                MappedRange {
1405                    base: PHYS_START_1,
1406                    length: PHYS_LEN,
1407                },
1408                MappedRange {
1409                    base: PHYS_START_2,
1410                    length: PHYS_LEN,
1411                },
1412            ],
1413        );
1414        // (But we cannot access that mapping because `mem_cloned`'s physical memory does not
1415        // contain that physical range.)
1416    }
1417
1418    /// In `mem`'s dirty bitmap, verify that the given `clean` addresses are clean, and the `dirty`
1419    /// addresses are dirty.  Auto-clear the dirty addresses checked.
1420    ///
1421    /// Cannot import `GuestMemoryBackend` in this module, as that would interfere with `GuestMemory` for
1422    /// methods that have the same name between the two.
1423    #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
1424    fn verify_mem_bitmap<
1425        M: crate::GuestMemoryBackend<R = R>,
1426        R: GuestMemoryRegion<B = AtomicBitmap>,
1427        I: Iommu,
1428    >(
1429        mem: &IommuMemory<M, I>,
1430        clean: impl IntoIterator<Item = usize>,
1431        dirty: impl IntoIterator<Item = usize>,
1432    ) {
1433        let bitmap = mem.bitmap();
1434        for addr in clean {
1435            if bitmap.is_addr_set(addr) {
1436                panic!("Expected addr {addr:#x} to be clean, but is dirty");
1437            }
1438        }
1439        for addr in dirty {
1440            if !bitmap.is_addr_set(addr) {
1441                panic!("Expected addr {addr:#x} to be dirty, but is clean");
1442            }
1443            bitmap.reset_addr_range(addr, 1);
1444        }
1445    }
1446
1447    #[cfg(all(feature = "backend-bitmap", feature = "backend-mmap"))]
1448    #[test]
1449    fn test_dirty_bitmap() {
1450        const PAGE_SIZE: usize = 4096;
1451        const PHYS_START: GuestAddress = GuestAddress(0x4000);
1452        const PHYS_LEN: usize = PAGE_SIZE * 2;
1453        const PHYS_PAGE_0: usize = PHYS_START.0 as usize;
1454        const PHYS_PAGE_1: usize = PHYS_START.0 as usize + PAGE_SIZE;
1455        const VIRT_START: GuestAddress = GuestAddress(0x2a000);
1456        const VIRT_PAGE_0: usize = VIRT_START.0 as usize;
1457        const VIRT_PAGE_1: usize = VIRT_START.0 as usize + PAGE_SIZE;
1458
1459        let bitmap = AtomicBitmap::new(
1460            VIRT_START.0 as usize + PHYS_LEN,
1461            NonZeroUsize::new(PAGE_SIZE).unwrap(),
1462        );
1463
1464        let mem = create_virt_memory(
1465            Some((VIRT_START, Permissions::ReadWrite)),
1466            0,
1467            [MappedRange {
1468                base: PHYS_START,
1469                length: PHYS_LEN,
1470            }],
1471            bitmap,
1472        );
1473
1474        // Check bitmap is cleared before everything -- through the whole test, the physical ranges
1475        // should remain clean as the bitmap is only supposed to track IOVAs
1476        verify_mem_bitmap(
1477            &mem,
1478            [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
1479            [],
1480        );
1481
1482        // Just to be sure, check that PHYS_PAGE_0 and PHYS_PAGE_1 technically can be dirtied,
1483        // though, or testing them would not be really useful
1484        mem.bitmap().set_addr_range(PHYS_PAGE_0, 2 * PAGE_SIZE);
1485        verify_mem_bitmap(&mem, [VIRT_PAGE_0, VIRT_PAGE_1], [PHYS_PAGE_0, PHYS_PAGE_1]);
1486
1487        // Just read from memory, should not dirty bitmap
1488        verify_virt_mem_content(&mem, VIRT_START, PHYS_LEN, 0);
1489        verify_mem_bitmap(
1490            &mem,
1491            [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
1492            [],
1493        );
1494
1495        // Verify that writing to a writeable slice causes dirtying, i.e. that the `VolatileSlice`
1496        // returned here correctly dirties the bitmap when written to
1497        let mut slices = mem
1498            .get_slices(VIRT_START, PHYS_LEN, Permissions::Write)
1499            .inspect_err(|err| panic!("Failed to access memory: {err}"))
1500            .unwrap();
1501        let slice = slices
1502            .next()
1503            .unwrap()
1504            .inspect_err(|err| panic!("Failed to access memory: {err}"))
1505            .unwrap();
1506        assert!(slices.next().is_none(), "Expected single slice");
1507
1508        verify_mem_bitmap(
1509            &mem,
1510            [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0, VIRT_PAGE_1],
1511            [],
1512        );
1513
1514        slice
1515            .store(42, 0, Ordering::Relaxed)
1516            .inspect_err(|err| panic!("Writing to memory failed: {err}"))
1517            .unwrap();
1518        verify_mem_bitmap(&mem, [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_1], [VIRT_PAGE_0]);
1519
1520        slice
1521            .store(23, PAGE_SIZE, Ordering::Relaxed)
1522            .inspect_err(|err| panic!("Writing to memory failed: {err}"))
1523            .unwrap();
1524        verify_mem_bitmap(&mem, [PHYS_PAGE_0, PHYS_PAGE_1, VIRT_PAGE_0], [VIRT_PAGE_1]);
1525    }
1526}