vm_memory/guest_memory.rs
1// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2//
3// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4//
5// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6// Use of this source code is governed by a BSD-style license that can be
7// found in the LICENSE-BSD-3-Clause file.
8//
9// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10
11//! Traits to track and access the physical memory of the guest.
12//!
13//! To make the abstraction as generic as possible, all the core traits declared here only define
14//! methods to access guest's memory, and never define methods to manage (create, delete, insert,
15//! remove etc) guest's memory. This way, the guest memory consumers (virtio device drivers,
16//! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically
17//! a hypervisor).
18//!
19//! Traits and Structs
20//! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA).
21//! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a
22//! region.
23//! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's
24//! physical memory.
25//! - [`GuestMemoryBackend`](trait.GuestMemoryBackend.html): represent a collection of `GuestMemoryRegion`
26//! objects.
27//! The main responsibilities of the `GuestMemoryBackend` trait are:
28//! - hide the detail of accessing guest's physical address.
29//! - map a request address to a `GuestMemoryRegion` object and relay the request to it.
30//! - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
31//!
32//! Whenever a collection of `GuestMemoryRegion` objects is mutable,
33//! [`GuestAddressSpace`](trait.GuestAddressSpace.html) should be implemented
34//! for clients to obtain a [`GuestMemoryBackend`] reference or smart pointer.
35//!
36//! The `GuestMemoryRegion` trait has an associated `B: Bitmap` type which is used to handle
37//! dirty bitmap tracking. Backends are free to define the granularity (or whether tracking is
38//! actually performed at all). Those that do implement tracking functionality are expected to
39//! ensure the correctness of the underlying `Bytes` implementation. The user has to explicitly
40//! record (using the handle returned by `GuestRegionMmap::bitmap`) write accesses performed
41//! via pointers, references, or slices returned by methods of `GuestMemoryBackend`,`GuestMemoryRegion`,
42//! `VolatileSlice`, `VolatileRef`, or `VolatileArrayRef`.
43
44use std::convert::From;
45use std::fs::File;
46use std::io;
47use std::iter::FusedIterator;
48use std::mem::size_of;
49use std::ops::{BitAnd, BitOr, Deref};
50use std::rc::Rc;
51use std::sync::atomic::Ordering;
52use std::sync::Arc;
53
54use crate::address::{Address, AddressValue};
55use crate::bitmap::{Bitmap, BitmapSlice, BS, MS};
56use crate::bytes::{AtomicAccess, Bytes};
57use crate::io::{ReadVolatile, WriteVolatile};
58#[cfg(feature = "iommu")]
59use crate::iommu::Error as IommuError;
60use crate::volatile_memory::{self, VolatileSlice};
61use crate::GuestMemoryRegion;
62
63/// Errors associated with handling guest memory accesses.
64#[allow(missing_docs)]
65#[derive(Debug, thiserror::Error)]
66pub enum Error {
67 /// Failure in finding a guest address in any memory regions mapped by this guest.
68 #[error("Guest memory error: invalid guest address {}",.0.raw_value())]
69 InvalidGuestAddress(GuestAddress),
70 /// Couldn't read/write from the given source.
71 #[error("Guest memory error: {0}")]
72 IOError(io::Error),
73 /// Incomplete read or write.
74 #[error("Guest memory error: only used {completed} bytes in {expected} long buffer")]
75 PartialBuffer { expected: usize, completed: usize },
76 /// Requested backend address is out of range.
77 #[error("Guest memory error: invalid backend address")]
78 InvalidBackendAddress,
79 /// Host virtual address not available.
80 #[error("Guest memory error: host virtual address not available")]
81 HostAddressNotAvailable,
82 /// The length returned by the callback passed to `try_access` is outside the address range.
83 #[error(
84 "The length returned by the callback passed to `try_access` is outside the address range."
85 )]
86 CallbackOutOfRange,
87 /// The address to be read by `try_access` is outside the address range.
88 #[error("The address to be read by `try_access` is outside the address range")]
89 GuestAddressOverflow,
90 #[cfg(feature = "iommu")]
91 /// IOMMU translation error
92 #[error("IOMMU failed to translate guest address: {0}")]
93 IommuError(IommuError),
94}
95
96impl From<volatile_memory::Error> for Error {
97 fn from(e: volatile_memory::Error) -> Self {
98 match e {
99 volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
100 volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
101 volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress,
102 volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress,
103 volatile_memory::Error::IOError(e) => Error::IOError(e),
104 volatile_memory::Error::PartialBuffer {
105 expected,
106 completed,
107 } => Error::PartialBuffer {
108 expected,
109 completed,
110 },
111 }
112 }
113}
114
115/// Result of guest memory operations.
116pub type Result<T> = std::result::Result<T, Error>;
117
118/// Represents a guest physical address (GPA).
119///
120/// # Notes:
121/// On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity,
122/// `u64` is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual
123/// machine.
124#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
125pub struct GuestAddress(pub u64);
126impl_address_ops!(GuestAddress, u64);
127
128/// Represents an offset inside a region.
129#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
130pub struct MemoryRegionAddress(pub u64);
131impl_address_ops!(MemoryRegionAddress, u64);
132
133/// Type of the raw value stored in a `GuestAddress` object.
134pub type GuestUsize = <GuestAddress as AddressValue>::V;
135
136/// Represents the start point within a `File` that backs a `GuestMemoryRegion`.
137#[derive(Clone, Debug)]
138pub struct FileOffset {
139 file: Arc<File>,
140 start: u64,
141}
142
143impl FileOffset {
144 /// Creates a new `FileOffset` object.
145 pub fn new(file: File, start: u64) -> Self {
146 FileOffset::from_arc(Arc::new(file), start)
147 }
148
149 /// Creates a new `FileOffset` object based on an exiting `Arc<File>`.
150 pub fn from_arc(file: Arc<File>, start: u64) -> Self {
151 FileOffset { file, start }
152 }
153
154 /// Returns a reference to the inner `File` object.
155 pub fn file(&self) -> &File {
156 self.file.as_ref()
157 }
158
159 /// Return a reference to the inner `Arc<File>` object.
160 pub fn arc(&self) -> &Arc<File> {
161 &self.file
162 }
163
164 /// Returns the start offset within the file.
165 pub fn start(&self) -> u64 {
166 self.start
167 }
168}
169
170/// `GuestAddressSpace` provides a way to retrieve a `GuestMemoryBackend` object.
171/// The vm-memory crate already provides trivial implementation for
172/// references to `GuestMemoryBackend` or reference-counted `GuestMemoryBackend` objects,
173/// but the trait can also be implemented by any other struct in order
174/// to provide temporary access to a snapshot of the memory map.
175///
176/// In order to support generic mutable memory maps, devices (or other things
177/// that access memory) should store the memory as a `GuestAddressSpace<M>`.
178/// This example shows that references can also be used as the `GuestAddressSpace`
179/// implementation, providing a zero-cost abstraction whenever immutable memory
180/// maps are sufficient.
181///
182/// # Examples (uses the `backend-mmap` and `backend-atomic` features)
183///
184/// ```
185/// # #[cfg(feature = "backend-mmap")]
186/// # {
187/// # use std::sync::Arc;
188/// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryBackend, GuestMemoryMmap};
189/// #
190/// pub struct VirtioDevice<AS: GuestAddressSpace> {
191/// mem: Option<AS>,
192/// }
193///
194/// impl<AS: GuestAddressSpace> VirtioDevice<AS> {
195/// fn new() -> Self {
196/// VirtioDevice { mem: None }
197/// }
198/// fn activate(&mut self, mem: AS) {
199/// self.mem = Some(mem)
200/// }
201/// }
202///
203/// fn get_mmap() -> GuestMemoryMmap<()> {
204/// let start_addr = GuestAddress(0x1000);
205/// GuestMemoryMmap::from_ranges(&vec![(start_addr, 0x400)])
206/// .expect("Could not create guest memory")
207/// }
208///
209/// // Using `VirtioDevice` with an immutable GuestMemoryMmap:
210/// let mut for_immutable_mmap = VirtioDevice::<&GuestMemoryMmap<()>>::new();
211/// let mmap = get_mmap();
212/// for_immutable_mmap.activate(&mmap);
213/// let mut another = VirtioDevice::<&GuestMemoryMmap<()>>::new();
214/// another.activate(&mmap);
215///
216/// # #[cfg(feature = "backend-atomic")]
217/// # {
218/// # use vm_memory::GuestMemoryAtomic;
219/// // Using `VirtioDevice` with a mutable GuestMemoryMmap:
220/// let mut for_mutable_mmap = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
221/// let atomic = GuestMemoryAtomic::new(get_mmap());
222/// for_mutable_mmap.activate(atomic.clone());
223/// let mut another = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
224/// another.activate(atomic.clone());
225///
226/// // atomic can be modified here...
227/// # }
228/// # }
229/// ```
230pub trait GuestAddressSpace: Clone {
231 /// The type that will be used to access guest memory.
232 type M: GuestMemory;
233
234 /// A type that provides access to the memory.
235 type T: Clone + Deref<Target = Self::M>;
236
237 /// Return an object (e.g. a reference or guard) that can be used
238 /// to access memory through this address space. The object provides
239 /// a consistent snapshot of the memory map.
240 fn memory(&self) -> Self::T;
241}
242
243impl<M: GuestMemory> GuestAddressSpace for &M {
244 type M = M;
245 type T = Self;
246
247 fn memory(&self) -> Self {
248 self
249 }
250}
251
252impl<M: GuestMemory> GuestAddressSpace for Rc<M> {
253 type M = M;
254 type T = Self;
255
256 fn memory(&self) -> Self {
257 self.clone()
258 }
259}
260
261impl<M: GuestMemory> GuestAddressSpace for Arc<M> {
262 type M = M;
263 type T = Self;
264
265 fn memory(&self) -> Self {
266 self.clone()
267 }
268}
269
270/// `GuestMemoryBackend` represents a container for an *immutable* collection of
271/// `GuestMemoryRegion` objects. `GuestMemoryBackend` provides the `Bytes<GuestAddress>`
272/// trait to hide the details of accessing guest memory by physical address.
273/// Interior mutability is not allowed for implementations of `GuestMemoryBackend` so
274/// that they always provide a consistent view of the memory map.
275///
276/// The task of the `GuestMemoryBackend` trait are:
277/// - map a request address to a `GuestMemoryRegion` object and relay the request to it.
278/// - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
279pub trait GuestMemoryBackend {
280 /// Type of objects hosted by the address space.
281 type R: GuestMemoryRegion;
282
283 /// Returns the number of regions in the collection.
284 fn num_regions(&self) -> usize {
285 self.iter().count()
286 }
287
288 /// Returns the region containing the specified address or `None`.
289 fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
290 self.iter()
291 .find(|region| addr >= region.start_addr() && addr <= region.last_addr())
292 }
293
294 /// Gets an iterator over the entries in the collection.
295 ///
296 /// # Examples
297 ///
298 /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
299 /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
300 /// `backend-mmap` feature)
301 ///
302 /// ```
303 /// # #[cfg(feature = "backend-mmap")]
304 /// # {
305 /// # use vm_memory::{GuestAddress, GuestMemoryBackend, GuestMemoryRegion, GuestMemoryMmap};
306 /// #
307 /// let start_addr1 = GuestAddress(0x0);
308 /// let start_addr2 = GuestAddress(0x400);
309 /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
310 /// .expect("Could not create guest memory");
311 ///
312 /// let total_size = gm
313 /// .iter()
314 /// .map(|region| region.len() / 1024)
315 /// .fold(0, |acc, size| acc + size);
316 /// assert_eq!(3, total_size)
317 /// # }
318 /// ```
319 fn iter(&self) -> impl Iterator<Item = &Self::R>;
320
321 /// Returns the maximum (inclusive) address managed by the
322 /// [`GuestMemoryBackend`](trait.GuestMemoryBackend.html).
323 ///
324 /// # Examples (uses the `backend-mmap` feature)
325 ///
326 /// ```
327 /// # #[cfg(feature = "backend-mmap")]
328 /// # {
329 /// # use vm_memory::{Address, GuestAddress, GuestMemoryBackend, GuestMemoryMmap};
330 /// #
331 /// let start_addr = GuestAddress(0x1000);
332 /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
333 /// .expect("Could not create guest memory");
334 ///
335 /// assert_eq!(start_addr.checked_add(0x3ff), Some(gm.last_addr()));
336 /// # }
337 /// ```
338 fn last_addr(&self) -> GuestAddress {
339 self.iter()
340 .map(GuestMemoryRegion::last_addr)
341 .fold(GuestAddress(0), std::cmp::max)
342 }
343
344 /// Tries to convert an absolute address to a relative address within the corresponding region.
345 ///
346 /// Returns `None` if `addr` isn't present within the memory of the guest.
347 fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
348 self.find_region(addr)
349 .map(|r| (r, r.to_region_addr(addr).unwrap()))
350 }
351
352 /// Returns `true` if the given address is present within the memory of the guest.
353 fn address_in_range(&self, addr: GuestAddress) -> bool {
354 self.find_region(addr).is_some()
355 }
356
357 /// Returns the given address if it is present within the memory of the guest.
358 fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
359 self.find_region(addr).map(|_| addr)
360 }
361
362 /// Check whether the range [base, base + len) is valid.
363 fn check_range(&self, base: GuestAddress, len: usize) -> bool {
364 // get_slices() ensures that if no error happens, the cumulative length of all slices
365 // equal `len`.
366 self.get_slices(base, len).all(|r| r.is_ok())
367 }
368
369 /// Returns the address plus the offset if it is present within the memory of the guest.
370 fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
371 base.checked_add(offset as u64)
372 .and_then(|addr| self.check_address(addr))
373 }
374
375 /// Invokes callback `f` to handle data in the address range `[addr, addr + count)`.
376 ///
377 /// The address range `[addr, addr + count)` may span more than one
378 /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object, or even have holes in it.
379 /// So [`try_access()`](trait.GuestMemoryBackend.html#method.try_access) invokes the callback 'f'
380 /// for each [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object involved and returns:
381 /// - the error code returned by the callback 'f'
382 /// - the size of the already handled data when encountering the first hole
383 /// - the size of the already handled data when the whole range has been handled
384 #[deprecated(
385 since = "0.17.0",
386 note = "supplemented by external iterator `get_slices()`"
387 )]
388 fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
389 where
390 F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
391 {
392 let mut cur = addr;
393 let mut total = 0;
394 while let Some(region) = self.find_region(cur) {
395 let start = region.to_region_addr(cur).unwrap();
396 let cap = region.len() - start.raw_value();
397 let len = std::cmp::min(cap, (count - total) as GuestUsize);
398 match f(total, len as usize, start, region) {
399 // no more data
400 Ok(0) => return Ok(total),
401 // made some progress
402 Ok(len) => {
403 total = match total.checked_add(len) {
404 Some(x) if x < count => x,
405 Some(x) if x == count => return Ok(x),
406 _ => return Err(Error::CallbackOutOfRange),
407 };
408 cur = match cur.overflowing_add(len as GuestUsize) {
409 (x @ GuestAddress(0), _) | (x, false) => x,
410 (_, true) => return Err(Error::GuestAddressOverflow),
411 };
412 }
413 // error happened
414 e => return e,
415 }
416 }
417 if total == 0 {
418 Err(Error::InvalidGuestAddress(addr))
419 } else {
420 Ok(total)
421 }
422 }
423
424 /// Get the host virtual address corresponding to the guest address.
425 ///
426 /// Some [`GuestMemoryBackend`](trait.GuestMemoryBackend.html) implementations, like `GuestMemoryMmap`,
427 /// have the capability to mmap the guest address range into virtual address space of the host
428 /// for direct access, so the corresponding host virtual address may be passed to other
429 /// subsystems.
430 ///
431 /// # Note
432 /// The underlying guest memory is not protected from memory aliasing, which breaks the
433 /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
434 /// concurrent accesses to the underlying guest memory.
435 ///
436 /// # Arguments
437 /// * `addr` - Guest address to convert.
438 ///
439 /// # Examples (uses the `backend-mmap` feature)
440 ///
441 /// ```
442 /// # #[cfg(feature = "backend-mmap")]
443 /// # {
444 /// # use vm_memory::{GuestAddress, GuestMemoryBackend, GuestMemoryMmap};
445 /// #
446 /// # let start_addr = GuestAddress(0x1000);
447 /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x500)])
448 /// # .expect("Could not create guest memory");
449 /// #
450 /// let addr = gm
451 /// .get_host_address(GuestAddress(0x1200))
452 /// .expect("Could not get host address");
453 /// println!("Host address is {:p}", addr);
454 /// # }
455 /// ```
456 fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> {
457 self.to_region_addr(addr)
458 .ok_or(Error::InvalidGuestAddress(addr))
459 .and_then(|(r, addr)| r.get_host_address(addr))
460 }
461
462 /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
463 /// `addr`.
464 fn get_slice(
465 &self,
466 addr: GuestAddress,
467 count: usize,
468 ) -> Result<VolatileSlice<'_, MS<'_, Self>>> {
469 self.to_region_addr(addr)
470 .ok_or(Error::InvalidGuestAddress(addr))
471 .and_then(|(r, addr)| r.get_slice(addr, count))
472 }
473
474 /// Returns an iterator over [`VolatileSlice`](struct.VolatileSlice.html)s, together covering
475 /// `count` bytes starting at `addr`.
476 ///
477 /// Iterating in this way is necessary because the given address range may be fragmented across
478 /// multiple [`GuestMemoryRegion`]s.
479 ///
480 /// The iterator’s items are wrapped in [`Result`], i.e. errors are reported on individual
481 /// items. If there is no such error, the cumulative length of all items will be equal to
482 /// `count`. If `count` is 0, an empty iterator will be returned.
483 fn get_slices<'a>(
484 &'a self,
485 addr: GuestAddress,
486 count: usize,
487 ) -> GuestMemoryBackendSliceIterator<'a, Self> {
488 GuestMemoryBackendSliceIterator {
489 mem: self,
490 addr,
491 count,
492 }
493 }
494}
495
496/// Iterates over [`VolatileSlice`]s that together form a guest memory area.
497///
498/// Returned by [`GuestMemoryBackend::get_slices()`].
499#[derive(Debug)]
500pub struct GuestMemoryBackendSliceIterator<'a, M: GuestMemoryBackend + ?Sized> {
501 /// Underlying memory
502 mem: &'a M,
503 /// Next address in the guest memory area
504 addr: GuestAddress,
505 /// Remaining bytes in the guest memory area
506 count: usize,
507}
508
509impl<'a, M: GuestMemoryBackend + ?Sized> GuestMemoryBackendSliceIterator<'a, M> {
510 /// Helper function for [`<Self as Iterator>::next()`](GuestMemoryBackendSliceIterator::next).
511 ///
512 /// Get the next slice (i.e. the one starting from `self.addr` with a length up to
513 /// `self.count`) and update the internal state.
514 ///
515 /// # Safety
516 ///
517 /// This function does not reset to `self.count` to 0 in case of error, i.e. will not stop
518 /// iterating. Actual behavior after an error is ill-defined, so the caller must check the
519 /// return value, and in case of an error, reset `self.count` to 0.
520 ///
521 /// (This is why this function exists, so this resetting can be done in a single central
522 /// location.)
523 unsafe fn do_next(&mut self) -> Option<Result<VolatileSlice<'a, MS<'a, M>>>> {
524 if self.count == 0 {
525 return None;
526 }
527
528 let Some((region, start)) = self.mem.to_region_addr(self.addr) else {
529 return Some(Err(Error::InvalidGuestAddress(self.addr)));
530 };
531
532 let cap = region.len() - start.raw_value();
533 let len = std::cmp::min(cap as usize, self.count);
534
535 self.count -= len;
536 self.addr = match self.addr.overflowing_add(len as GuestUsize) {
537 (x @ GuestAddress(0), _) | (x, false) => x,
538 (_, true) => return Some(Err(Error::GuestAddressOverflow)),
539 };
540
541 Some(region.get_slice(start, len).inspect(|s| {
542 assert_eq!(
543 s.len(),
544 len,
545 "get_slice() returned a slice with wrong length"
546 )
547 }))
548 }
549
550 /// Adapts this [`GuestMemoryBackendSliceIterator`] to return `None` (e.g. gracefully terminate)
551 /// when it encounters an error after successfully producing at least one slice.
552 /// Return an error if requesting the first slice returns an error.
553 pub fn stop_on_error(self) -> Result<impl Iterator<Item = VolatileSlice<'a, MS<'a, M>>>> {
554 <Self as GuestMemorySliceIterator<'a, MS<'a, M>>>::stop_on_error(self)
555 }
556}
557
558impl<'a, M: GuestMemoryBackend + ?Sized> Iterator for GuestMemoryBackendSliceIterator<'a, M> {
559 type Item = Result<VolatileSlice<'a, MS<'a, M>>>;
560
561 fn next(&mut self) -> Option<Self::Item> {
562 // SAFETY:
563 // We reset `self.count` to 0 on error
564 match unsafe { self.do_next() } {
565 Some(Ok(slice)) => Some(Ok(slice)),
566 other => {
567 // On error (or end), reset to 0 so iteration remains stopped
568 self.count = 0;
569 other
570 }
571 }
572 }
573}
574
575impl<'a, M: GuestMemoryBackend + ?Sized> GuestMemorySliceIterator<'a, MS<'a, M>>
576 for GuestMemoryBackendSliceIterator<'a, M>
577{
578}
579
580/// This iterator continues to return `None` when exhausted.
581///
582/// [`<Self as Iterator>::next()`](GuestMemoryBackendSliceIterator::next) sets `self.count` to 0 when
583/// returning `None`, ensuring that it will only return `None` from that point on.
584impl<M: GuestMemoryBackend + ?Sized> FusedIterator for GuestMemoryBackendSliceIterator<'_, M> {}
585
586/// Allow accessing [`GuestMemory`] (and [`GuestMemoryBackend`]) objects via [`Bytes`].
587///
588/// Thanks to the [blanket implementation of `GuestMemory` for all `GuestMemoryBackend`
589/// types](../guest_memory/trait.GuestMemory.html#impl-GuestMemory-for-M), this blanket implementation
590/// extends to all [`GuestMemoryBackend`] types.
591impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
592 type E = Error;
593
594 fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
595 self.get_slices(addr, buf.len(), Permissions::Write)?
596 .stop_on_error()?
597 .try_fold(0, |acc, slice| Ok(acc + slice.write(&buf[acc..], 0)?))
598 }
599
600 fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
601 self.get_slices(addr, buf.len(), Permissions::Read)?
602 .stop_on_error()?
603 .try_fold(0, |acc, slice| Ok(acc + slice.read(&mut buf[acc..], 0)?))
604 }
605
606 /// # Examples
607 ///
608 /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature)
609 ///
610 /// ```
611 /// # #[cfg(feature = "backend-mmap")]
612 /// # {
613 /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
614 /// #
615 /// # let start_addr = GuestAddress(0x1000);
616 /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
617 /// # .expect("Could not create guest memory");
618 /// #
619 /// gm.write_slice(&[1, 2, 3, 4, 5], start_addr)
620 /// .expect("Could not write slice to guest memory");
621 /// # }
622 /// ```
623 fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
624 let res = self.write(buf, addr)?;
625 if res != buf.len() {
626 return Err(Error::PartialBuffer {
627 expected: buf.len(),
628 completed: res,
629 });
630 }
631 Ok(())
632 }
633
634 /// # Examples
635 ///
636 /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature)
637 ///
638 /// ```
639 /// # #[cfg(feature = "backend-mmap")]
640 /// # {
641 /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
642 /// #
643 /// let start_addr = GuestAddress(0x1000);
644 /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
645 /// .expect("Could not create guest memory");
646 /// let buf = &mut [0u8; 16];
647 ///
648 /// gm.read_slice(buf, start_addr)
649 /// .expect("Could not read slice from guest memory");
650 /// # }
651 /// ```
652 fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
653 let res = self.read(buf, addr)?;
654 if res != buf.len() {
655 return Err(Error::PartialBuffer {
656 expected: buf.len(),
657 completed: res,
658 });
659 }
660 Ok(())
661 }
662
663 fn read_volatile_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
664 where
665 F: ReadVolatile,
666 {
667 self.get_slices(addr, count, Permissions::Write)?
668 .stop_on_error()?
669 .try_fold(0, |acc, slice| {
670 Ok(acc + slice.read_volatile_from(0, src, slice.len())?)
671 })
672 }
673
674 fn read_exact_volatile_from<F>(
675 &self,
676 addr: GuestAddress,
677 src: &mut F,
678 count: usize,
679 ) -> Result<()>
680 where
681 F: ReadVolatile,
682 {
683 let res = self.read_volatile_from(addr, src, count)?;
684 if res != count {
685 return Err(Error::PartialBuffer {
686 expected: count,
687 completed: res,
688 });
689 }
690 Ok(())
691 }
692
693 fn write_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
694 where
695 F: WriteVolatile,
696 {
697 self.get_slices(addr, count, Permissions::Read)?
698 .stop_on_error()?
699 .try_fold(0, |acc, slice| {
700 // For a non-RAM region, reading could have side effects, so we
701 // must use write_all().
702 slice.write_all_volatile_to(0, dst, slice.len())?;
703 Ok(acc + slice.len())
704 })
705 }
706
707 fn write_all_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
708 where
709 F: WriteVolatile,
710 {
711 let res = self.write_volatile_to(addr, dst, count)?;
712 if res != count {
713 return Err(Error::PartialBuffer {
714 expected: count,
715 completed: res,
716 });
717 }
718 Ok(())
719 }
720
721 fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
722 // No need to check past the first iterator item: It either has the size of `O`, then there
723 // can be no further items; or it does not, and then `VolatileSlice::store()` will fail.
724 self.get_slices(addr, size_of::<O>(), Permissions::Write)?
725 .next()
726 .unwrap()? // count > 0 never produces an empty iterator
727 .store(val, 0, order)
728 .map_err(Into::into)
729 }
730
731 fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
732 // No need to check past the first iterator item: It either has the size of `O`, then there
733 // can be no further items; or it does not, and then `VolatileSlice::store()` will fail.
734 self.get_slices(addr, size_of::<O>(), Permissions::Read)?
735 .next()
736 .unwrap()? // count > 0 never produces an empty iterator
737 .load(0, order)
738 .map_err(Into::into)
739 }
740}
741
742/// Permissions for accessing virtual memory.
743#[derive(Clone, Copy, Debug, Eq, PartialEq)]
744#[repr(u8)]
745pub enum Permissions {
746 /// No permissions
747 No = 0b00,
748 /// Read-only
749 Read = 0b01,
750 /// Write-only
751 Write = 0b10,
752 /// Allow both reading and writing
753 ReadWrite = 0b11,
754}
755
756impl Permissions {
757 /// Convert the numerical representation into the enum.
758 ///
759 /// # Panics
760 ///
761 /// Panics if `raw` is not a valid representation of any `Permissions` variant.
762 fn from_repr(raw: u8) -> Self {
763 use Permissions::*;
764
765 match raw {
766 value if value == No as u8 => No,
767 value if value == Read as u8 => Read,
768 value if value == Write as u8 => Write,
769 value if value == ReadWrite as u8 => ReadWrite,
770 _ => panic!("{raw:x} is not a valid raw Permissions value"),
771 }
772 }
773
774 /// Check whether the permissions `self` allow the given `access`.
775 pub fn allow(&self, access: Self) -> bool {
776 *self & access == access
777 }
778
779 /// Check whether the permissions `self` include write access.
780 pub fn has_write(&self) -> bool {
781 *self & Permissions::Write == Permissions::Write
782 }
783}
784
785impl std::ops::BitOr for Permissions {
786 type Output = Permissions;
787
788 /// Return the union of `self` and `rhs`.
789 fn bitor(self, rhs: Permissions) -> Self::Output {
790 Self::from_repr(self as u8 | rhs as u8)
791 }
792}
793
794impl std::ops::BitAnd for Permissions {
795 type Output = Permissions;
796
797 /// Return the intersection of `self` and `rhs`.
798 fn bitand(self, rhs: Permissions) -> Self::Output {
799 Self::from_repr(self as u8 & rhs as u8)
800 }
801}
802
803/// Represents virtual I/O memory.
804///
805/// `GuestMemory` is generally backed by some “physical” `GuestMemoryBackend`, which then consists for
806/// `GuestMemoryRegion` objects. However, the mapping from I/O virtual addresses (IOVAs) to
807/// physical addresses may be arbitrarily fragmented. Translation is done via an IOMMU.
808///
809/// Note in contrast to `GuestMemoryBackend`:
810/// - Any IOVA range may consist of arbitrarily many underlying ranges in physical memory.
811/// - Accessing an IOVA requires passing the intended access mode, and the IOMMU will check whether
812/// the given access mode is permitted for the given IOVA.
813/// - The translation result for a given IOVA may change over time (i.e. the physical address
814/// associated with an IOVA may change).
815pub trait GuestMemory {
816 /// Underlying `GuestMemoryBackend` type.
817 type PhysicalMemory: GuestMemoryBackend + ?Sized;
818 /// Dirty bitmap type for tracking writes to the IOVA address space.
819 type Bitmap: Bitmap;
820
821 /// Return `true` if `addr..(addr + count)` is accessible with `access`.
822 fn check_range(&self, addr: GuestAddress, count: usize, access: Permissions) -> bool;
823
824 /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
825 /// `addr`.
826 ///
827 /// Note that because of the fragmented nature of virtual memory, it can easily happen that the
828 /// range `[addr, addr + count)` is not backed by a continuous region in our own virtual
829 /// memory, which will make generating the slice impossible.
830 ///
831 /// The iterator’s items are wrapped in [`Result`], i.e. there may be errors reported on
832 /// individual items. If there is no such error, the cumulative length of all items will be
833 /// equal to `count`. Any error will end iteration immediately, i.e. there are no items past
834 /// the first error.
835 ///
836 /// If `count` is 0, an empty iterator will be returned.
837 fn get_slices<'a>(
838 &'a self,
839 addr: GuestAddress,
840 count: usize,
841 access: Permissions,
842 ) -> Result<impl GuestMemorySliceIterator<'a, BS<'a, Self::Bitmap>>>;
843
844 /// If this virtual memory is just a plain `GuestMemoryBackend` object underneath without an IOMMU
845 /// translation layer in between, return that `GuestMemoryBackend` object.
846 fn physical_memory(&self) -> Option<&Self::PhysicalMemory> {
847 None
848 }
849}
850
851/// Iterates over [`VolatileSlice`]s that together form an I/O memory area.
852///
853/// Returned by [`GuestMemory::get_slices()`].
854pub trait GuestMemorySliceIterator<'a, B: BitmapSlice>:
855 Iterator<Item = Result<VolatileSlice<'a, B>>> + FusedIterator + Sized
856{
857 /// Adapts this [`GuestMemorySliceIterator`] to return `None` (e.g. gracefully terminate) when it
858 /// encounters an error after successfully producing at least one slice.
859 /// Return an error if requesting the first slice returns an error.
860 fn stop_on_error(self) -> Result<impl Iterator<Item = VolatileSlice<'a, B>>> {
861 let mut peek = self.peekable();
862 if let Some(err) = peek.next_if(Result::is_err) {
863 return Err(err.unwrap_err());
864 }
865 Ok(peek.filter_map(Result::ok))
866 }
867}
868
869/// Allow accessing every [`GuestMemoryBackend`] via [`GuestMemory`].
870///
871/// [`GuestMemory`] is a generalization of [`GuestMemoryBackend`]: Every object implementing the former is a
872/// subset of an object implementing the latter (there always is an underlying [`GuestMemoryBackend`]),
873/// with an opaque internal mapping on top, e.g. provided by an IOMMU.
874///
875/// Every [`GuestMemoryBackend`] is therefore trivially also an [`GuestMemory`], assuming a complete identity
876/// mapping (which we must assume, so that accessing such objects via either trait will yield the
877/// same result): Basically, all [`GuestMemory`] methods are implemented as trivial wrappers around
878/// the same [`GuestMemoryBackend`] methods (if available), discarding the `access` parameter.
879impl<M: GuestMemoryBackend + ?Sized> GuestMemory for M {
880 type PhysicalMemory = M;
881 type Bitmap = <M::R as GuestMemoryRegion>::B;
882
883 fn check_range(&self, addr: GuestAddress, count: usize, _access: Permissions) -> bool {
884 <M as GuestMemoryBackend>::check_range(self, addr, count)
885 }
886
887 fn get_slices<'a>(
888 &'a self,
889 addr: GuestAddress,
890 count: usize,
891 _access: Permissions,
892 ) -> Result<impl GuestMemorySliceIterator<'a, BS<'a, Self::Bitmap>>> {
893 Ok(<M as GuestMemoryBackend>::get_slices(self, addr, count))
894 }
895
896 fn physical_memory(&self) -> Option<&Self::PhysicalMemory> {
897 Some(self)
898 }
899}
900
901#[cfg(test)]
902mod tests {
903 #![allow(clippy::undocumented_unsafe_blocks)]
904
905 // Note that `GuestMemory` is tested primarily in src/iommu.rs via `IommuMemory`.
906
907 use super::*;
908 #[cfg(feature = "backend-mmap")]
909 use crate::bytes::ByteValued;
910 #[cfg(feature = "backend-mmap")]
911 use crate::GuestAddress;
912 #[cfg(feature = "backend-mmap")]
913 use std::time::{Duration, Instant};
914
915 use vmm_sys_util::tempfile::TempFile;
916
917 #[cfg(feature = "backend-mmap")]
918 type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
919
920 #[cfg(feature = "backend-mmap")]
921 fn make_image(size: u8) -> Vec<u8> {
922 let mut image: Vec<u8> = Vec::with_capacity(size as usize);
923 for i in 0..size {
924 image.push(i);
925 }
926 image
927 }
928
929 #[test]
930 fn test_file_offset() {
931 let file = TempFile::new().unwrap().into_file();
932 let start = 1234;
933 let file_offset = FileOffset::new(file, start);
934 assert_eq!(file_offset.start(), start);
935 assert_eq!(
936 file_offset.file() as *const File,
937 file_offset.arc().as_ref() as *const File
938 );
939 }
940
941 #[cfg(feature = "backend-mmap")]
942 #[test]
943 fn checked_read_from() {
944 let start_addr1 = GuestAddress(0x0);
945 let start_addr2 = GuestAddress(0x40);
946 let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap();
947 let image = make_image(0x80);
948 let offset = GuestAddress(0x30);
949 let count: usize = 0x20;
950 assert_eq!(
951 0x20_usize,
952 mem.read_volatile_from(offset, &mut image.as_slice(), count)
953 .unwrap()
954 );
955 }
956
957 // Runs the provided closure in a loop, until at least `duration` time units have elapsed.
958 #[cfg(feature = "backend-mmap")]
959 fn loop_timed<F>(duration: Duration, mut f: F)
960 where
961 F: FnMut(),
962 {
963 // We check the time every `CHECK_PERIOD` iterations.
964 const CHECK_PERIOD: u64 = 1_000_000;
965 let start_time = Instant::now();
966
967 loop {
968 for _ in 0..CHECK_PERIOD {
969 f();
970 }
971 if start_time.elapsed() >= duration {
972 break;
973 }
974 }
975 }
976
977 // Helper method for the following test. It spawns a writer and a reader thread, which
978 // simultaneously try to access an object that is placed at the junction of two memory regions.
979 // The part of the object that's continuously accessed is a member of type T. The writer
980 // flips all the bits of the member with every write, while the reader checks that every byte
981 // has the same value (and thus it did not do a non-atomic access). The test succeeds if
982 // no mismatch is detected after performing accesses for a pre-determined amount of time.
983 #[cfg(feature = "backend-mmap")]
984 #[cfg(not(miri))] // This test simulates a race condition between guest and vmm
985 fn non_atomic_access_helper<T>()
986 where
987 T: ByteValued
988 + std::fmt::Debug
989 + From<u8>
990 + Into<u128>
991 + std::ops::Not<Output = T>
992 + PartialEq,
993 {
994 use std::mem;
995 use std::thread;
996
997 // A dummy type that's always going to have the same alignment as the first member,
998 // and then adds some bytes at the end.
999 #[derive(Clone, Copy, Debug, Default, PartialEq)]
1000 struct Data<T> {
1001 val: T,
1002 some_bytes: [u8; 8],
1003 }
1004
1005 // Some sanity checks.
1006 assert_eq!(mem::align_of::<T>(), mem::align_of::<Data<T>>());
1007 assert_eq!(mem::size_of::<T>(), mem::align_of::<T>());
1008
1009 // There must be no padding bytes, as otherwise implementing ByteValued is UB
1010 assert_eq!(mem::size_of::<Data<T>>(), mem::size_of::<T>() + 8);
1011
1012 unsafe impl<T: ByteValued> ByteValued for Data<T> {}
1013
1014 // Start of first guest memory region.
1015 let start = GuestAddress(0);
1016 let region_len = 1 << 12;
1017
1018 // The address where we start writing/reading a Data<T> value.
1019 let data_start = GuestAddress((region_len - mem::size_of::<T>()) as u64);
1020
1021 let mem = GuestMemoryMmap::from_ranges(&[
1022 (start, region_len),
1023 (start.unchecked_add(region_len as u64), region_len),
1024 ])
1025 .unwrap();
1026
1027 // Need to clone this and move it into the new thread we create.
1028 let mem2 = mem.clone();
1029 // Just some bytes.
1030 let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255];
1031
1032 let mut data = Data {
1033 val: T::from(0u8),
1034 some_bytes,
1035 };
1036
1037 // Simple check that cross-region write/read is ok.
1038 mem.write_obj(data, data_start).unwrap();
1039 let read_data = mem.read_obj::<Data<T>>(data_start).unwrap();
1040 assert_eq!(read_data, data);
1041
1042 let t = thread::spawn(move || {
1043 let mut count: u64 = 0;
1044
1045 loop_timed(Duration::from_secs(3), || {
1046 let data = mem2.read_obj::<Data<T>>(data_start).unwrap();
1047
1048 // Every time data is written to memory by the other thread, the value of
1049 // data.val alternates between 0 and T::MAX, so the inner bytes should always
1050 // have the same value. If they don't match, it means we read a partial value,
1051 // so the access was not atomic.
1052 let bytes = data.val.into().to_le_bytes();
1053 for i in 1..mem::size_of::<T>() {
1054 if bytes[0] != bytes[i] {
1055 panic!(
1056 "val bytes don't match {:?} after {} iterations",
1057 &bytes[..mem::size_of::<T>()],
1058 count
1059 );
1060 }
1061 }
1062 count += 1;
1063 });
1064 });
1065
1066 // Write the object while flipping the bits of data.val over and over again.
1067 loop_timed(Duration::from_secs(3), || {
1068 mem.write_obj(data, data_start).unwrap();
1069 data.val = !data.val;
1070 });
1071
1072 t.join().unwrap()
1073 }
1074
1075 #[cfg(feature = "backend-mmap")]
1076 #[test]
1077 #[cfg(not(miri))]
1078 fn test_non_atomic_access() {
1079 non_atomic_access_helper::<u16>()
1080 }
1081
1082 #[cfg(feature = "backend-mmap")]
1083 #[test]
1084 fn test_zero_length_accesses() {
1085 #[derive(Default, Clone, Copy)]
1086 #[repr(C)]
1087 struct ZeroSizedStruct {
1088 dummy: [u32; 0],
1089 }
1090
1091 unsafe impl ByteValued for ZeroSizedStruct {}
1092
1093 let addr = GuestAddress(0x1000);
1094 let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1095 let obj = ZeroSizedStruct::default();
1096 let mut image = make_image(0x80);
1097
1098 assert_eq!(mem.write(&[], addr).unwrap(), 0);
1099 assert_eq!(mem.read(&mut [], addr).unwrap(), 0);
1100
1101 assert!(mem.write_slice(&[], addr).is_ok());
1102 assert!(mem.read_slice(&mut [], addr).is_ok());
1103
1104 assert!(mem.write_obj(obj, addr).is_ok());
1105 assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
1106
1107 assert_eq!(
1108 mem.read_volatile_from(addr, &mut image.as_slice(), 0)
1109 .unwrap(),
1110 0
1111 );
1112
1113 assert!(mem
1114 .read_exact_volatile_from(addr, &mut image.as_slice(), 0)
1115 .is_ok());
1116
1117 assert_eq!(
1118 mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0)
1119 .unwrap(),
1120 0
1121 );
1122
1123 assert!(mem
1124 .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0)
1125 .is_ok());
1126 }
1127
1128 #[cfg(feature = "backend-mmap")]
1129 #[test]
1130 fn test_atomic_accesses() {
1131 let addr = GuestAddress(0x1000);
1132 let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1133 let bad_addr = addr.unchecked_add(0x1000);
1134
1135 crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
1136 }
1137
1138 #[cfg(feature = "backend-mmap")]
1139 #[cfg(target_os = "linux")]
1140 #[test]
1141 fn test_guest_memory_mmap_is_hugetlbfs() {
1142 let addr = GuestAddress(0x1000);
1143 let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1144 let r = mem.find_region(addr).unwrap();
1145 assert_eq!(r.is_hugetlbfs(), None);
1146 }
1147
1148 /// Test `Permissions & Permissions`.
1149 #[test]
1150 fn test_perm_and() {
1151 use Permissions::*;
1152
1153 for p in [No, Read, Write, ReadWrite] {
1154 assert_eq!(p & p, p);
1155 }
1156 for p1 in [No, Read, Write, ReadWrite] {
1157 for p2 in [No, Read, Write, ReadWrite] {
1158 assert_eq!(p1 & p2, p2 & p1);
1159 }
1160 }
1161 for p in [No, Read, Write, ReadWrite] {
1162 assert_eq!(No & p, No);
1163 }
1164 for p in [No, Read, Write, ReadWrite] {
1165 assert_eq!(ReadWrite & p, p);
1166 }
1167 assert_eq!(Read & Write, No);
1168 }
1169
1170 /// Test `Permissions | Permissions`.
1171 #[test]
1172 fn test_perm_or() {
1173 use Permissions::*;
1174
1175 for p in [No, Read, Write, ReadWrite] {
1176 assert_eq!(p | p, p);
1177 }
1178 for p1 in [No, Read, Write, ReadWrite] {
1179 for p2 in [No, Read, Write, ReadWrite] {
1180 assert_eq!(p1 | p2, p2 | p1);
1181 }
1182 }
1183 for p in [No, Read, Write, ReadWrite] {
1184 assert_eq!(No | p, p);
1185 }
1186 for p in [No, Read, Write, ReadWrite] {
1187 assert_eq!(ReadWrite | p, ReadWrite);
1188 }
1189 assert_eq!(Read | Write, ReadWrite);
1190 }
1191
1192 /// Test `Permissions::has_write()`.
1193 #[test]
1194 fn test_perm_has_write() {
1195 assert!(!Permissions::No.has_write());
1196 assert!(!Permissions::Read.has_write());
1197 assert!(Permissions::Write.has_write());
1198 assert!(Permissions::ReadWrite.has_write());
1199 }
1200}