vm_memory/
volatile_memory.rs

1// Portions Copyright 2019 Red Hat, Inc.
2//
3// Copyright 2017 The Chromium OS Authors. All rights reserved.
4// Use of this source code is governed by a BSD-style license that can be
5// found in the THIRT-PARTY file.
6//
7// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
8
9//! Types for volatile access to memory.
10//!
11//! Two of the core rules for safe rust is no data races and no aliased mutable references.
12//! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
13//! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
14//! accessed volatile. Some systems really do need to operate on shared memory and can't have the
15//! compiler reordering or eliding access because it has no visibility into what other systems are
16//! doing with that hunk of memory.
17//!
18//! For the purposes of maintaining safety, volatile memory has some rules of its own:
19//!
20//! 1. No references or slices to volatile memory (`&` or `&mut`).
21//!
22//! 2. Access should always been done with a volatile read or write.
23//!
24//! The First rule is because having references of any kind to memory considered volatile would
25//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
26//! done concurrently without synchronization. With volatile access we know that the compiler has
27//! not reordered or elided the access.
28
29use std::cmp::min;
30use std::io;
31use std::marker::PhantomData;
32use std::mem::{align_of, size_of};
33use std::ptr::copy;
34use std::ptr::{read_volatile, write_volatile};
35use std::result;
36use std::sync::atomic::Ordering;
37
38use crate::atomic_integer::AtomicInteger;
39use crate::bitmap::{Bitmap, BitmapSlice, BS};
40use crate::{AtomicAccess, ByteValued, Bytes};
41
42#[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))]
43use crate::mmap::xen::{MmapXen as MmapInfo, MmapXenSlice};
44
45#[cfg(not(feature = "xen"))]
46type MmapInfo = std::marker::PhantomData<()>;
47
48use crate::io::{retry_eintr, ReadVolatile, WriteVolatile};
49use copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice};
50
51/// `VolatileMemory` related errors.
52#[allow(missing_docs)]
53#[derive(Debug, thiserror::Error)]
54pub enum Error {
55    /// `addr` is out of bounds of the volatile memory slice.
56    #[error("address 0x{addr:x} is out of bounds")]
57    OutOfBounds { addr: usize },
58    /// Taking a slice at `base` with `offset` would overflow `usize`.
59    #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
60    Overflow { base: usize, offset: usize },
61    /// Taking a slice whose size overflows `usize`.
62    #[error("{nelements:?} elements of size {size:?} would overflow a usize")]
63    TooBig { nelements: usize, size: usize },
64    /// Trying to obtain a misaligned reference.
65    #[error("address 0x{addr:x} is not aligned to {alignment:?}")]
66    Misaligned { addr: usize, alignment: usize },
67    /// Writing to memory failed
68    #[error("{0}")]
69    IOError(io::Error),
70    /// Incomplete read or write
71    #[error("only used {completed} bytes in {expected} long buffer")]
72    PartialBuffer { expected: usize, completed: usize },
73}
74
75/// Result of volatile memory operations.
76pub type Result<T> = result::Result<T, Error>;
77
78/// Convenience function for computing `base + offset`.
79///
80/// # Errors
81///
82/// Returns [`Err(Error::Overflow)`](enum.Error.html#variant.Overflow) in case `base + offset`
83/// exceeds `usize::MAX`.
84///
85/// # Examples
86///
87/// ```
88/// # use vm_memory::volatile_memory::compute_offset;
89/// #
90/// assert_eq!(108, compute_offset(100, 8).unwrap());
91/// assert!(compute_offset(usize::MAX, 6).is_err());
92/// ```
93pub fn compute_offset(base: usize, offset: usize) -> Result<usize> {
94    match base.checked_add(offset) {
95        None => Err(Error::Overflow { base, offset }),
96        Some(m) => Ok(m),
97    }
98}
99
100/// Types that support raw volatile access to their data.
101pub trait VolatileMemory {
102    /// Type used for dirty memory tracking.
103    type B: Bitmap;
104
105    /// Gets the size of this slice.
106    fn len(&self) -> usize;
107
108    /// Check whether the region is empty.
109    fn is_empty(&self) -> bool {
110        self.len() == 0
111    }
112
113    /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
114    /// `offset`.
115    ///
116    /// Note that the property `get_slice(offset, count).len() == count` MUST NOT be
117    /// relied on for the correctness of unsafe code. This is a safe function inside of a
118    /// safe trait, and implementors are under no obligation to follow its documentation.
119    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<'_, BS<'_, Self::B>>>;
120
121    /// Gets a slice of memory for the entire region that supports volatile access.
122    fn as_volatile_slice(&self) -> VolatileSlice<'_, BS<'_, Self::B>> {
123        self.get_slice(0, self.len()).unwrap()
124    }
125
126    /// Gets a `VolatileRef` at `offset`.
127    fn get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<'_, T, BS<'_, Self::B>>> {
128        let slice = self.get_slice(offset, size_of::<T>())?;
129
130        assert_eq!(
131            slice.len(),
132            size_of::<T>(),
133            "VolatileMemory::get_slice(offset, count) returned slice of length != count."
134        );
135
136        // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
137        // slice.addr is valid memory of size slice.len(). The assert above ensures that
138        // the length of the slice is exactly enough to hold one `T`. Lastly, the lifetime of the
139        // returned VolatileRef match that of the VolatileSlice returned by get_slice and thus the
140        // lifetime one `self`.
141        unsafe {
142            Ok(VolatileRef::with_bitmap(
143                slice.addr,
144                slice.bitmap,
145                slice.mmap,
146            ))
147        }
148    }
149
150    /// Returns a [`VolatileArrayRef`](struct.VolatileArrayRef.html) of `n` elements starting at
151    /// `offset`.
152    fn get_array_ref<T: ByteValued>(
153        &self,
154        offset: usize,
155        n: usize,
156    ) -> Result<VolatileArrayRef<'_, T, BS<'_, Self::B>>> {
157        // Use isize to avoid problems with ptr::offset and ptr::add down the line.
158        let nbytes = isize::try_from(n)
159            .ok()
160            .and_then(|n| n.checked_mul(size_of::<T>() as isize))
161            .ok_or(Error::TooBig {
162                nelements: n,
163                size: size_of::<T>(),
164            })?;
165        let slice = self.get_slice(offset, nbytes as usize)?;
166
167        assert_eq!(
168            slice.len(),
169            nbytes as usize,
170            "VolatileMemory::get_slice(offset, count) returned slice of length != count."
171        );
172
173        // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
174        // slice.addr is valid memory of size slice.len(). The assert above ensures that
175        // the length of the slice is exactly enough to hold `n` instances of `T`. Lastly, the lifetime of the
176        // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
177        // lifetime one `self`.
178        unsafe {
179            Ok(VolatileArrayRef::with_bitmap(
180                slice.addr,
181                n,
182                slice.bitmap,
183                slice.mmap,
184            ))
185        }
186    }
187
188    /// Returns a reference to an instance of `T` at `offset`.
189    ///
190    /// # Safety
191    /// To use this safely, the caller must guarantee that there are no other
192    /// users of the given chunk of memory for the lifetime of the result.
193    ///
194    /// # Errors
195    ///
196    /// If the resulting pointer is not aligned, this method will return an
197    /// [`Error`](enum.Error.html).
198    unsafe fn aligned_as_ref<T: ByteValued>(&self, offset: usize) -> Result<&T> {
199        let slice = self.get_slice(offset, size_of::<T>())?;
200        slice.check_alignment(align_of::<T>())?;
201
202        assert_eq!(
203            slice.len(),
204            size_of::<T>(),
205            "VolatileMemory::get_slice(offset, count) returned slice of length != count."
206        );
207
208        // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
209        // slice.addr is valid memory of size slice.len(). The assert above ensures that
210        // the length of the slice is exactly enough to hold one `T`.
211        // Dereferencing the pointer is safe because we check the alignment above, and the invariants
212        // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the
213        // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
214        // lifetime one `self`.
215        unsafe { Ok(&*(slice.addr as *const T)) }
216    }
217
218    /// Returns a mutable reference to an instance of `T` at `offset`. Mutable accesses performed
219    /// using the resulting reference are not automatically accounted for by the dirty bitmap
220    /// tracking functionality.
221    ///
222    /// # Safety
223    ///
224    /// To use this safely, the caller must guarantee that there are no other
225    /// users of the given chunk of memory for the lifetime of the result.
226    ///
227    /// # Errors
228    ///
229    /// If the resulting pointer is not aligned, this method will return an
230    /// [`Error`](enum.Error.html).
231    // the function is unsafe, and the conversion is safe if following the safety
232    // instrutions above
233    #[allow(clippy::mut_from_ref)]
234    unsafe fn aligned_as_mut<T: ByteValued>(&self, offset: usize) -> Result<&mut T> {
235        let slice = self.get_slice(offset, size_of::<T>())?;
236        slice.check_alignment(align_of::<T>())?;
237
238        assert_eq!(
239            slice.len(),
240            size_of::<T>(),
241            "VolatileMemory::get_slice(offset, count) returned slice of length != count."
242        );
243
244        // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
245        // slice.addr is valid memory of size slice.len(). The assert above ensures that
246        // the length of the slice is exactly enough to hold one `T`.
247        // Dereferencing the pointer is safe because we check the alignment above, and the invariants
248        // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the
249        // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
250        // lifetime one `self`.
251
252        unsafe { Ok(&mut *(slice.addr as *mut T)) }
253    }
254
255    /// Returns a reference to an instance of `T` at `offset`. Mutable accesses performed
256    /// using the resulting reference are not automatically accounted for by the dirty bitmap
257    /// tracking functionality.
258    ///
259    /// # Errors
260    ///
261    /// If the resulting pointer is not aligned, this method will return an
262    /// [`Error`](enum.Error.html).
263    fn get_atomic_ref<T: AtomicInteger>(&self, offset: usize) -> Result<&T> {
264        let slice = self.get_slice(offset, size_of::<T>())?;
265        slice.check_alignment(align_of::<T>())?;
266
267        assert_eq!(
268            slice.len(),
269            size_of::<T>(),
270            "VolatileMemory::get_slice(offset, count) returned slice of length != count."
271        );
272
273        // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that
274        // slice.addr is valid memory of size slice.len(). The assert above ensures that
275        // the length of the slice is exactly enough to hold one `T`.
276        // Dereferencing the pointer is safe because we check the alignment above. Lastly, the lifetime of the
277        // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the
278        // lifetime one `self`.
279        unsafe { Ok(&*(slice.addr as *const T)) }
280    }
281
282    /// Returns the sum of `base` and `offset` if it is valid to access a range of `offset`
283    /// bytes starting at `base`.
284    ///
285    /// Specifically, allows accesses of length 0 at the end of a slice:
286    ///
287    /// ```rust
288    /// # use vm_memory::{VolatileMemory, VolatileSlice};
289    /// let mut arr = [1, 2, 3];
290    /// let slice = VolatileSlice::from(arr.as_mut_slice());
291    ///
292    /// assert_eq!(slice.compute_end_offset(3, 0).unwrap(), 3);
293    /// ```
294    fn compute_end_offset(&self, base: usize, offset: usize) -> Result<usize> {
295        let mem_end = compute_offset(base, offset)?;
296        if mem_end > self.len() {
297            return Err(Error::OutOfBounds { addr: mem_end });
298        }
299        Ok(mem_end)
300    }
301}
302
303impl<'a> From<&'a mut [u8]> for VolatileSlice<'a, ()> {
304    fn from(value: &'a mut [u8]) -> Self {
305        // SAFETY: Since we construct the VolatileSlice from a rust slice, we know that
306        // the memory at addr `value as *mut u8` is valid for reads and writes (because mutable
307        // reference) of len `value.len()`. Since the `VolatileSlice` inherits the lifetime `'a`,
308        // it is not possible to access/mutate `value` while the VolatileSlice is alive.
309        //
310        // Note that it is possible for multiple aliasing sub slices of this `VolatileSlice`s to
311        // be created through `VolatileSlice::subslice`. This is OK, as pointers are allowed to
312        // alias, and it is impossible to get rust-style references from a `VolatileSlice`.
313        unsafe { VolatileSlice::new(value.as_mut_ptr(), value.len()) }
314    }
315}
316
317#[repr(C, packed)]
318struct Packed<T>(T);
319
320/// A guard to perform mapping and protect unmapping of the memory.
321#[derive(Debug)]
322pub struct PtrGuard {
323    addr: *mut u8,
324    len: usize,
325
326    // This isn't used anymore, but it protects the slice from getting unmapped while in use.
327    // Once this goes out of scope, the memory is unmapped automatically.
328    #[cfg(all(feature = "xen", target_family = "unix"))]
329    _slice: MmapXenSlice,
330}
331
332#[allow(clippy::len_without_is_empty)]
333impl PtrGuard {
334    #[allow(unused_variables)]
335    fn new(mmap: Option<&MmapInfo>, addr: *mut u8, write: bool, len: usize) -> Self {
336        #[cfg(all(feature = "xen", target_family = "unix"))]
337        let (addr, _slice) = {
338            let prot = if write {
339                libc::PROT_WRITE
340            } else {
341                libc::PROT_READ
342            };
343            let slice = MmapInfo::mmap(mmap, addr, prot, len);
344            (slice.addr(), slice)
345        };
346
347        Self {
348            addr,
349            len,
350
351            #[cfg(all(feature = "xen", target_family = "unix"))]
352            _slice,
353        }
354    }
355
356    fn read(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self {
357        Self::new(mmap, addr, false, len)
358    }
359
360    /// Returns a non-mutable pointer to the beginning of the slice.
361    pub fn as_ptr(&self) -> *const u8 {
362        self.addr
363    }
364
365    /// Gets the length of the mapped region.
366    pub fn len(&self) -> usize {
367        self.len
368    }
369}
370
371/// A mutable guard to perform mapping and protect unmapping of the memory.
372#[derive(Debug)]
373pub struct PtrGuardMut(PtrGuard);
374
375#[allow(clippy::len_without_is_empty)]
376impl PtrGuardMut {
377    fn write(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self {
378        Self(PtrGuard::new(mmap, addr, true, len))
379    }
380
381    /// Returns a mutable pointer to the beginning of the slice. Mutable accesses performed
382    /// using the resulting pointer are not automatically accounted for by the dirty bitmap
383    /// tracking functionality.
384    pub fn as_ptr(&self) -> *mut u8 {
385        self.0.addr
386    }
387
388    /// Gets the length of the mapped region.
389    pub fn len(&self) -> usize {
390        self.0.len
391    }
392}
393
394/// A slice of raw memory that supports volatile access.
395#[derive(Clone, Copy, Debug)]
396pub struct VolatileSlice<'a, B = ()> {
397    addr: *mut u8,
398    size: usize,
399    bitmap: B,
400    mmap: Option<&'a MmapInfo>,
401}
402
403impl<'a> VolatileSlice<'a, ()> {
404    /// Creates a slice of raw memory that must support volatile access.
405    ///
406    /// # Safety
407    ///
408    /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
409    /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
410    /// must also guarantee that all other users of the given chunk of memory are using volatile
411    /// accesses.
412    pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
413        Self::with_bitmap(addr, size, (), None)
414    }
415}
416
417impl<'a, B: BitmapSlice> VolatileSlice<'a, B> {
418    /// Creates a slice of raw memory that must support volatile access, and uses the provided
419    /// `bitmap` object for dirty page tracking.
420    ///
421    /// # Safety
422    ///
423    /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
424    /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
425    /// must also guarantee that all other users of the given chunk of memory are using volatile
426    /// accesses.
427    pub unsafe fn with_bitmap(
428        addr: *mut u8,
429        size: usize,
430        bitmap: B,
431        mmap: Option<&'a MmapInfo>,
432    ) -> VolatileSlice<'a, B> {
433        VolatileSlice {
434            addr,
435            size,
436            bitmap,
437            mmap,
438        }
439    }
440
441    /// Replaces the bitmap in `self` by `new_bitmap`.
442    #[cfg(feature = "iommu")]
443    pub(crate) fn replace_bitmap<NB: BitmapSlice>(self, new_bitmap: NB) -> VolatileSlice<'a, NB> {
444        VolatileSlice {
445            addr: self.addr,
446            size: self.size,
447            bitmap: new_bitmap,
448            mmap: self.mmap,
449        }
450    }
451
452    /// Returns a guard for the pointer to the underlying memory.
453    pub fn ptr_guard(&self) -> PtrGuard {
454        PtrGuard::read(self.mmap, self.addr, self.len())
455    }
456
457    /// Returns a mutable guard for the pointer to the underlying memory.
458    pub fn ptr_guard_mut(&self) -> PtrGuardMut {
459        PtrGuardMut::write(self.mmap, self.addr, self.len())
460    }
461
462    /// Gets the size of this slice.
463    pub fn len(&self) -> usize {
464        self.size
465    }
466
467    /// Checks if the slice is empty.
468    pub fn is_empty(&self) -> bool {
469        self.size == 0
470    }
471
472    /// Borrows the inner `BitmapSlice`.
473    pub fn bitmap(&self) -> &B {
474        &self.bitmap
475    }
476
477    /// Divides one slice into two at an index.
478    ///
479    /// # Example
480    ///
481    /// ```
482    /// # use vm_memory::{VolatileMemory, VolatileSlice};
483    /// #
484    /// # // Create a buffer
485    /// # let mut mem = [0u8; 32];
486    /// #
487    /// # // Get a `VolatileSlice` from the buffer
488    /// let vslice = VolatileSlice::from(&mut mem[..]);
489    ///
490    /// let (start, end) = vslice.split_at(8).expect("Could not split VolatileSlice");
491    /// assert_eq!(8, start.len());
492    /// assert_eq!(24, end.len());
493    /// ```
494    pub fn split_at(&self, mid: usize) -> Result<(Self, Self)> {
495        let end = self.offset(mid)?;
496        let start =
497            // SAFETY: safe because self.offset() already checked the bounds
498            unsafe { VolatileSlice::with_bitmap(self.addr, mid, self.bitmap.clone(), self.mmap) };
499
500        Ok((start, end))
501    }
502
503    /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
504    /// `offset` with `count` length.
505    ///
506    /// The returned subslice is a copy of this slice with the address increased by `offset` bytes
507    /// and the size set to `count` bytes.
508    pub fn subslice(&self, offset: usize, count: usize) -> Result<Self> {
509        let _ = self.compute_end_offset(offset, count)?;
510
511        // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
512        // the lifetime is the same as the original slice.
513        unsafe {
514            Ok(VolatileSlice::with_bitmap(
515                self.addr.add(offset),
516                count,
517                self.bitmap.slice_at(offset),
518                self.mmap,
519            ))
520        }
521    }
522
523    /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
524    /// `offset`.
525    ///
526    /// The returned subslice is a copy of this slice with the address increased by `count` bytes
527    /// and the size reduced by `count` bytes.
528    pub fn offset(&self, count: usize) -> Result<VolatileSlice<'a, B>> {
529        let new_addr = (self.addr as usize)
530            .checked_add(count)
531            .ok_or(Error::Overflow {
532                base: self.addr as usize,
533                offset: count,
534            })?;
535        let new_size = self
536            .size
537            .checked_sub(count)
538            .ok_or(Error::OutOfBounds { addr: new_addr })?;
539        // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
540        // memory of the original slice.
541        unsafe {
542            Ok(VolatileSlice::with_bitmap(
543                self.addr.add(count),
544                new_size,
545                self.bitmap.slice_at(count),
546                self.mmap,
547            ))
548        }
549    }
550
551    /// Copies as many elements of type `T` as possible from this slice to `buf`.
552    ///
553    /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
554    /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
555    /// using volatile reads.
556    ///
557    /// # Examples
558    ///
559    /// ```
560    /// # use vm_memory::{VolatileMemory, VolatileSlice};
561    /// #
562    /// let mut mem = [0u8; 32];
563    /// let vslice = VolatileSlice::from(&mut mem[..]);
564    /// let mut buf = [5u8; 16];
565    /// let res = vslice.copy_to(&mut buf[..]);
566    ///
567    /// assert_eq!(16, res);
568    /// for &v in &buf[..] {
569    ///     assert_eq!(v, 0);
570    /// }
571    /// ```
572    pub fn copy_to<T>(&self, buf: &mut [T]) -> usize
573    where
574        T: ByteValued,
575    {
576        // A fast path for u8/i8
577        if size_of::<T>() == 1 {
578            let total = buf.len().min(self.len());
579
580            // SAFETY:
581            // - dst is valid for writes of at least `total`, since total <= buf.len()
582            // - src is valid for reads of at least `total` as total <= self.len()
583            // - The regions are non-overlapping as `src` points to guest memory and `buf` is
584            //   a slice and thus has to live outside of guest memory (there can be more slices to
585            //   guest memory without violating rust's aliasing rules)
586            // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
587            unsafe { copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, self, total) }
588        } else {
589            let count = self.size / size_of::<T>();
590            let source = self.get_array_ref::<T>(0, count).unwrap();
591            source.copy_to(buf)
592        }
593    }
594
595    /// Copies as many bytes as possible from this slice to the provided `slice`.
596    ///
597    /// The copies happen in an undefined order.
598    ///
599    /// # Examples
600    ///
601    /// ```
602    /// # use vm_memory::{VolatileMemory, VolatileSlice};
603    /// #
604    /// # // Create a buffer
605    /// # let mut mem = [0u8; 32];
606    /// #
607    /// # // Get a `VolatileSlice` from the buffer
608    /// # let vslice = VolatileSlice::from(&mut mem[..]);
609    /// #
610    /// vslice.copy_to_volatile_slice(
611    ///     vslice
612    ///         .get_slice(16, 16)
613    ///         .expect("Could not get VolatileSlice"),
614    /// );
615    /// ```
616    pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
617        // SAFETY: Safe because the pointers are range-checked when the slices
618        // are created, and they never escape the VolatileSlices.
619        // FIXME: ... however, is it really okay to mix non-volatile
620        // operations such as copy with read_volatile and write_volatile?
621        unsafe {
622            let count = min(self.size, slice.size);
623            copy(self.addr, slice.addr, count);
624            slice.bitmap.mark_dirty(0, count);
625        }
626    }
627
628    /// Copies as many elements of type `T` as possible from `buf` to this slice.
629    ///
630    /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
631    ///
632    /// # Examples
633    ///
634    /// ```
635    /// # use vm_memory::{VolatileMemory, VolatileSlice};
636    /// #
637    /// let mut mem = [0u8; 32];
638    /// let vslice = VolatileSlice::from(&mut mem[..]);
639    ///
640    /// let buf = [5u8; 64];
641    /// vslice.copy_from(&buf[..]);
642    ///
643    /// for i in 0..4 {
644    ///     let val = vslice
645    ///         .get_ref::<u32>(i * 4)
646    ///         .expect("Could not get value")
647    ///         .load();
648    ///     assert_eq!(val, 0x05050505);
649    /// }
650    /// ```
651    pub fn copy_from<T>(&self, buf: &[T])
652    where
653        T: ByteValued,
654    {
655        // A fast path for u8/i8
656        if size_of::<T>() == 1 {
657            let total = buf.len().min(self.len());
658            // SAFETY:
659            // - dst is valid for writes of at least `total`, since total <= self.len()
660            // - src is valid for reads of at least `total` as total <= buf.len()
661            // - The regions are non-overlapping as `dst` points to guest memory and `buf` is
662            //   a slice and thus has to live outside of guest memory (there can be more slices to
663            //   guest memory without violating rust's aliasing rules)
664            // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
665            unsafe { copy_to_volatile_slice(self, buf.as_ptr() as *const u8, total) };
666        } else {
667            let count = self.size / size_of::<T>();
668            // It's ok to use unwrap here because `count` was computed based on the current
669            // length of `self`.
670            let dest = self.get_array_ref::<T>(0, count).unwrap();
671
672            // No need to explicitly call `mark_dirty` after this call because
673            // `VolatileArrayRef::copy_from` already takes care of that.
674            dest.copy_from(buf);
675        };
676    }
677
678    /// Checks if the current slice is aligned at `alignment` bytes.
679    fn check_alignment(&self, alignment: usize) -> Result<()> {
680        // Check that the desired alignment is a power of two.
681        debug_assert!((alignment & (alignment - 1)) == 0);
682        if ((self.addr as usize) & (alignment - 1)) != 0 {
683            return Err(Error::Misaligned {
684                addr: self.addr as usize,
685                alignment,
686            });
687        }
688        Ok(())
689    }
690}
691
692impl<B: BitmapSlice> Bytes<usize> for VolatileSlice<'_, B> {
693    type E = Error;
694
695    /// # Examples
696    /// * Write a slice of size 5 at offset 1020 of a 1024-byte `VolatileSlice`.
697    ///
698    /// ```
699    /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
700    /// #
701    /// let mut mem = [0u8; 1024];
702    /// let vslice = VolatileSlice::from(&mut mem[..]);
703    /// let res = vslice.write(&[1, 2, 3, 4, 5], 1020);
704    ///
705    /// assert!(res.is_ok());
706    /// assert_eq!(res.unwrap(), 4);
707    /// ```
708    fn write(&self, mut buf: &[u8], addr: usize) -> Result<usize> {
709        if buf.is_empty() {
710            return Ok(0);
711        }
712
713        if addr >= self.size {
714            return Err(Error::OutOfBounds { addr });
715        }
716
717        // NOTE: the duality of read <-> write here is correct. This is because we translate a call
718        // "volatile_slice.write(buf)" (e.g. "write to volatile_slice from buf") into
719        // "buf.read_volatile(volatile_slice)" (e.g. read from buf into volatile_slice)
720        buf.read_volatile(&mut self.offset(addr)?)
721    }
722
723    /// # Examples
724    /// * Read a slice of size 16 at offset 1010 of a 1024-byte `VolatileSlice`.
725    ///
726    /// ```
727    /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
728    /// #
729    /// let mut mem = [0u8; 1024];
730    /// let vslice = VolatileSlice::from(&mut mem[..]);
731    /// let buf = &mut [0u8; 16];
732    /// let res = vslice.read(buf, 1010);
733    ///
734    /// assert!(res.is_ok());
735    /// assert_eq!(res.unwrap(), 14);
736    /// ```
737    fn read(&self, mut buf: &mut [u8], addr: usize) -> Result<usize> {
738        if buf.is_empty() {
739            return Ok(0);
740        }
741
742        if addr >= self.size {
743            return Err(Error::OutOfBounds { addr });
744        }
745
746        // NOTE: The duality of read <-> write here is correct. This is because we translate a call
747        // volatile_slice.read(buf) (e.g. read from volatile_slice into buf) into
748        // "buf.write_volatile(volatile_slice)" (e.g. write into buf from volatile_slice)
749        // Both express data transfer from volatile_slice to buf.
750        buf.write_volatile(&self.offset(addr)?)
751    }
752
753    /// # Examples
754    /// * Write a slice at offset 256.
755    ///
756    /// ```
757    /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
758    /// #
759    /// # // Create a buffer
760    /// # let mut mem = [0u8; 1024];
761    /// #
762    /// # // Get a `VolatileSlice` from the buffer
763    /// # let vslice = VolatileSlice::from(&mut mem[..]);
764    /// #
765    /// let res = vslice.write_slice(&[1, 2, 3, 4, 5], 256);
766    ///
767    /// assert!(res.is_ok());
768    /// assert_eq!(res.unwrap(), ());
769    /// ```
770    fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> {
771        // `mark_dirty` called within `self.write`.
772        let len = self.write(buf, addr)?;
773        if len != buf.len() {
774            return Err(Error::PartialBuffer {
775                expected: buf.len(),
776                completed: len,
777            });
778        }
779        Ok(())
780    }
781
782    /// # Examples
783    /// * Read a slice of size 16 at offset 256.
784    ///
785    /// ```
786    /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice};
787    /// #
788    /// # // Create a buffer
789    /// # let mut mem = [0u8; 1024];
790    /// #
791    /// # // Get a `VolatileSlice` from the buffer
792    /// # let vslice = VolatileSlice::from(&mut mem[..]);
793    /// #
794    /// let buf = &mut [0u8; 16];
795    /// let res = vslice.read_slice(buf, 256);
796    ///
797    /// assert!(res.is_ok());
798    /// ```
799    fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> {
800        let len = self.read(buf, addr)?;
801        if len != buf.len() {
802            return Err(Error::PartialBuffer {
803                expected: buf.len(),
804                completed: len,
805            });
806        }
807        Ok(())
808    }
809
810    fn read_volatile_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize>
811    where
812        F: ReadVolatile,
813    {
814        let slice = self.offset(addr)?;
815        /* Unwrap safe here because (0, min(len, count)) is definitely a valid subslice */
816        let mut slice = slice.subslice(0, slice.len().min(count)).unwrap();
817        retry_eintr!(src.read_volatile(&mut slice))
818    }
819
820    fn read_exact_volatile_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()>
821    where
822        F: ReadVolatile,
823    {
824        src.read_exact_volatile(&mut self.get_slice(addr, count)?)
825    }
826
827    fn write_volatile_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize>
828    where
829        F: WriteVolatile,
830    {
831        let slice = self.offset(addr)?;
832        /* Unwrap safe here because (0, min(len, count)) is definitely a valid subslice */
833        let slice = slice.subslice(0, slice.len().min(count)).unwrap();
834        retry_eintr!(dst.write_volatile(&slice))
835    }
836
837    fn write_all_volatile_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()>
838    where
839        F: WriteVolatile,
840    {
841        dst.write_all_volatile(&self.get_slice(addr, count)?)
842    }
843
844    fn store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()> {
845        self.get_atomic_ref::<T::A>(addr).map(|r| {
846            r.store(val.into(), order);
847            self.bitmap.mark_dirty(addr, size_of::<T>())
848        })
849    }
850
851    fn load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T> {
852        self.get_atomic_ref::<T::A>(addr)
853            .map(|r| r.load(order).into())
854    }
855}
856
857impl<B: BitmapSlice> VolatileMemory for VolatileSlice<'_, B> {
858    type B = B;
859
860    fn len(&self) -> usize {
861        self.size
862    }
863
864    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<'_, B>> {
865        self.subslice(offset, count)
866    }
867}
868
869/// A memory location that supports volatile access to an instance of `T`.
870///
871/// # Examples
872///
873/// ```
874/// # use vm_memory::VolatileRef;
875/// #
876/// let mut v = 5u32;
877/// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32 as *mut u8) };
878///
879/// assert_eq!(v, 5);
880/// assert_eq!(v_ref.load(), 5);
881/// v_ref.store(500);
882/// assert_eq!(v, 500);
883/// ```
884#[derive(Clone, Copy, Debug)]
885pub struct VolatileRef<'a, T, B = ()> {
886    addr: *mut Packed<T>,
887    bitmap: B,
888    mmap: Option<&'a MmapInfo>,
889}
890
891impl<T> VolatileRef<'_, T, ()>
892where
893    T: ByteValued,
894{
895    /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`.
896    ///
897    /// # Safety
898    ///
899    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
900    /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
901    /// must also guarantee that all other users of the given chunk of memory are using volatile
902    /// accesses.
903    pub unsafe fn new(addr: *mut u8) -> Self {
904        Self::with_bitmap(addr, (), None)
905    }
906}
907
908#[allow(clippy::len_without_is_empty)]
909impl<'a, T, B> VolatileRef<'a, T, B>
910where
911    T: ByteValued,
912    B: BitmapSlice,
913{
914    /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`, using the
915    /// provided `bitmap` object for dirty page tracking.
916    ///
917    /// # Safety
918    ///
919    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
920    /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
921    /// must also guarantee that all other users of the given chunk of memory are using volatile
922    /// accesses.
923    pub unsafe fn with_bitmap(addr: *mut u8, bitmap: B, mmap: Option<&'a MmapInfo>) -> Self {
924        VolatileRef {
925            addr: addr as *mut Packed<T>,
926            bitmap,
927            mmap,
928        }
929    }
930
931    /// Returns a guard for the pointer to the underlying memory.
932    pub fn ptr_guard(&self) -> PtrGuard {
933        PtrGuard::read(self.mmap, self.addr as *mut u8, self.len())
934    }
935
936    /// Returns a mutable guard for the pointer to the underlying memory.
937    pub fn ptr_guard_mut(&self) -> PtrGuardMut {
938        PtrGuardMut::write(self.mmap, self.addr as *mut u8, self.len())
939    }
940
941    /// Gets the size of the referenced type `T`.
942    ///
943    /// # Examples
944    ///
945    /// ```
946    /// # use std::mem::size_of;
947    /// # use vm_memory::VolatileRef;
948    /// #
949    /// let v_ref = unsafe { VolatileRef::<u32>::new(0 as *mut _) };
950    /// assert_eq!(v_ref.len(), size_of::<u32>() as usize);
951    /// ```
952    pub fn len(&self) -> usize {
953        size_of::<T>()
954    }
955
956    /// Borrows the inner `BitmapSlice`.
957    pub fn bitmap(&self) -> &B {
958        &self.bitmap
959    }
960
961    /// Does a volatile write of the value `v` to the address of this ref.
962    #[inline(always)]
963    pub fn store(&self, v: T) {
964        let guard = self.ptr_guard_mut();
965
966        // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
967        unsafe { write_volatile(guard.as_ptr() as *mut Packed<T>, Packed::<T>(v)) };
968        self.bitmap.mark_dirty(0, self.len())
969    }
970
971    /// Does a volatile read of the value at the address of this ref.
972    #[inline(always)]
973    pub fn load(&self) -> T {
974        let guard = self.ptr_guard();
975
976        // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
977        // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
978        // in this function with the commented code below and running `cargo test --release`.
979        // unsafe { *(self.addr as *const T) }
980        unsafe { read_volatile(guard.as_ptr() as *const Packed<T>).0 }
981    }
982
983    /// Converts this to a [`VolatileSlice`](struct.VolatileSlice.html) with the same size and
984    /// address.
985    pub fn to_slice(&self) -> VolatileSlice<'a, B> {
986        // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
987        unsafe {
988            VolatileSlice::with_bitmap(
989                self.addr as *mut u8,
990                size_of::<T>(),
991                self.bitmap.clone(),
992                self.mmap,
993            )
994        }
995    }
996}
997
998/// A memory location that supports volatile access to an array of elements of type `T`.
999///
1000/// # Examples
1001///
1002/// ```
1003/// # use vm_memory::VolatileArrayRef;
1004/// #
1005/// let mut v = [5u32; 1];
1006/// let v_ref = unsafe { VolatileArrayRef::new(&mut v[0] as *mut u32 as *mut u8, v.len()) };
1007///
1008/// assert_eq!(v[0], 5);
1009/// assert_eq!(v_ref.load(0), 5);
1010/// v_ref.store(0, 500);
1011/// assert_eq!(v[0], 500);
1012/// ```
1013#[derive(Clone, Copy, Debug)]
1014pub struct VolatileArrayRef<'a, T, B = ()> {
1015    addr: *mut u8,
1016    nelem: usize,
1017    bitmap: B,
1018    phantom: PhantomData<&'a T>,
1019    mmap: Option<&'a MmapInfo>,
1020}
1021
1022impl<T> VolatileArrayRef<'_, T>
1023where
1024    T: ByteValued,
1025{
1026    /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
1027    /// type `T`.
1028    ///
1029    /// # Safety
1030    ///
1031    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
1032    /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
1033    /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
1034    /// memory are using volatile accesses.
1035    pub unsafe fn new(addr: *mut u8, nelem: usize) -> Self {
1036        Self::with_bitmap(addr, nelem, (), None)
1037    }
1038}
1039
1040impl<'a, T, B> VolatileArrayRef<'a, T, B>
1041where
1042    T: ByteValued,
1043    B: BitmapSlice,
1044{
1045    /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
1046    /// type `T`, using the provided `bitmap` object for dirty page tracking.
1047    ///
1048    /// # Safety
1049    ///
1050    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
1051    /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
1052    /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
1053    /// memory are using volatile accesses.
1054    pub unsafe fn with_bitmap(
1055        addr: *mut u8,
1056        nelem: usize,
1057        bitmap: B,
1058        mmap: Option<&'a MmapInfo>,
1059    ) -> Self {
1060        VolatileArrayRef {
1061            addr,
1062            nelem,
1063            bitmap,
1064            phantom: PhantomData,
1065            mmap,
1066        }
1067    }
1068
1069    /// Returns `true` if this array is empty.
1070    ///
1071    /// # Examples
1072    ///
1073    /// ```
1074    /// # use vm_memory::VolatileArrayRef;
1075    /// #
1076    /// let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
1077    /// assert!(v_array.is_empty());
1078    /// ```
1079    pub fn is_empty(&self) -> bool {
1080        self.nelem == 0
1081    }
1082
1083    /// Returns the number of elements in the array.
1084    ///
1085    /// # Examples
1086    ///
1087    /// ```
1088    /// # use vm_memory::VolatileArrayRef;
1089    /// #
1090    /// # let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 1) };
1091    /// assert_eq!(v_array.len(), 1);
1092    /// ```
1093    pub fn len(&self) -> usize {
1094        self.nelem
1095    }
1096
1097    /// Returns the size of `T`.
1098    ///
1099    /// # Examples
1100    ///
1101    /// ```
1102    /// # use std::mem::size_of;
1103    /// # use vm_memory::VolatileArrayRef;
1104    /// #
1105    /// let v_ref = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
1106    /// assert_eq!(v_ref.element_size(), size_of::<u32>() as usize);
1107    /// ```
1108    pub fn element_size(&self) -> usize {
1109        size_of::<T>()
1110    }
1111
1112    /// Returns a guard for the pointer to the underlying memory.
1113    pub fn ptr_guard(&self) -> PtrGuard {
1114        PtrGuard::read(self.mmap, self.addr, self.len())
1115    }
1116
1117    /// Returns a mutable guard for the pointer to the underlying memory.
1118    pub fn ptr_guard_mut(&self) -> PtrGuardMut {
1119        PtrGuardMut::write(self.mmap, self.addr, self.len())
1120    }
1121
1122    /// Borrows the inner `BitmapSlice`.
1123    pub fn bitmap(&self) -> &B {
1124        &self.bitmap
1125    }
1126
1127    /// Converts this to a `VolatileSlice` with the same size and address.
1128    pub fn to_slice(&self) -> VolatileSlice<'a, B> {
1129        // SAFETY: Safe as long as the caller validated addr when creating this object.
1130        unsafe {
1131            VolatileSlice::with_bitmap(
1132                self.addr,
1133                self.nelem * self.element_size(),
1134                self.bitmap.clone(),
1135                self.mmap,
1136            )
1137        }
1138    }
1139
1140    /// Does a volatile read of the element at `index`.
1141    ///
1142    /// # Panics
1143    ///
1144    /// Panics if `index` is less than the number of elements of the array to which `&self` points.
1145    pub fn ref_at(&self, index: usize) -> VolatileRef<'a, T, B> {
1146        assert!(index < self.nelem);
1147        // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
1148        // memory of the VolatileArrayRef.
1149        unsafe {
1150            // byteofs must fit in an isize as it was checked in get_array_ref.
1151            let byteofs = (self.element_size() * index) as isize;
1152            let ptr = self.addr.offset(byteofs);
1153            VolatileRef::with_bitmap(ptr, self.bitmap.slice_at(byteofs as usize), self.mmap)
1154        }
1155    }
1156
1157    /// Does a volatile read of the element at `index`.
1158    pub fn load(&self, index: usize) -> T {
1159        self.ref_at(index).load()
1160    }
1161
1162    /// Does a volatile write of the element at `index`.
1163    pub fn store(&self, index: usize, value: T) {
1164        // The `VolatileRef::store` call below implements the required dirty bitmap tracking logic,
1165        // so no need to do that in this method as well.
1166        self.ref_at(index).store(value)
1167    }
1168
1169    /// Copies as many elements of type `T` as possible from this array to `buf`.
1170    ///
1171    /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
1172    /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
1173    /// using volatile reads.
1174    ///
1175    /// # Examples
1176    ///
1177    /// ```
1178    /// # use vm_memory::VolatileArrayRef;
1179    /// #
1180    /// let mut v = [0u8; 32];
1181    /// let v_ref = unsafe { VolatileArrayRef::new(v.as_mut_ptr(), v.len()) };
1182    ///
1183    /// let mut buf = [5u8; 16];
1184    /// v_ref.copy_to(&mut buf[..]);
1185    /// for &v in &buf[..] {
1186    ///     assert_eq!(v, 0);
1187    /// }
1188    /// ```
1189    pub fn copy_to(&self, buf: &mut [T]) -> usize {
1190        // A fast path for u8/i8
1191        if size_of::<T>() == 1 {
1192            let source = self.to_slice();
1193            let total = buf.len().min(source.len());
1194
1195            // SAFETY:
1196            // - dst is valid for writes of at least `total`, since total <= buf.len()
1197            // - src is valid for reads of at least `total` as total <= source.len()
1198            // - The regions are non-overlapping as `src` points to guest memory and `buf` is
1199            //   a slice and thus has to live outside of guest memory (there can be more slices to
1200            //   guest memory without violating rust's aliasing rules)
1201            // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine
1202            return unsafe {
1203                copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, &source, total)
1204            };
1205        }
1206
1207        let guard = self.ptr_guard();
1208        let mut ptr = guard.as_ptr() as *const Packed<T>;
1209        let start = ptr;
1210
1211        for v in buf.iter_mut().take(self.len()) {
1212            // SAFETY: read_volatile is safe because the pointers are range-checked when
1213            // the slices are created, and they never escape the VolatileSlices.
1214            // ptr::add is safe because get_array_ref() validated that
1215            // size_of::<T>() * self.len() fits in an isize.
1216            unsafe {
1217                *v = read_volatile(ptr).0;
1218                ptr = ptr.add(1);
1219            }
1220        }
1221
1222        // SAFETY: It is guaranteed that start and ptr point to the regions of the same slice.
1223        unsafe { ptr.offset_from(start) as usize }
1224    }
1225
1226    /// Copies as many bytes as possible from this slice to the provided `slice`.
1227    ///
1228    /// The copies happen in an undefined order.
1229    ///
1230    /// # Examples
1231    ///
1232    /// ```
1233    /// # use vm_memory::VolatileArrayRef;
1234    /// #
1235    /// let mut v = [0u8; 32];
1236    /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(v.as_mut_ptr(), v.len()) };
1237    /// let mut buf = [5u8; 16];
1238    /// let v_ref2 = unsafe { VolatileArrayRef::<u8>::new(buf.as_mut_ptr(), buf.len()) };
1239    ///
1240    /// v_ref.copy_to_volatile_slice(v_ref2.to_slice());
1241    /// for &v in &buf[..] {
1242    ///     assert_eq!(v, 0);
1243    /// }
1244    /// ```
1245    pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
1246        // SAFETY: Safe because the pointers are range-checked when the slices
1247        // are created, and they never escape the VolatileSlices.
1248        // FIXME: ... however, is it really okay to mix non-volatile
1249        // operations such as copy with read_volatile and write_volatile?
1250        unsafe {
1251            let count = min(self.len() * self.element_size(), slice.size);
1252            copy(self.addr, slice.addr, count);
1253            slice.bitmap.mark_dirty(0, count);
1254        }
1255    }
1256
1257    /// Copies as many elements of type `T` as possible from `buf` to this slice.
1258    ///
1259    /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
1260    /// to this slice's memory. The copy happens from smallest to largest address in
1261    /// `T` sized chunks using volatile writes.
1262    ///
1263    /// # Examples
1264    ///
1265    /// ```
1266    /// # use vm_memory::VolatileArrayRef;
1267    /// #
1268    /// let mut v = [0u8; 32];
1269    /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(v.as_mut_ptr(), v.len()) };
1270    ///
1271    /// let buf = [5u8; 64];
1272    /// v_ref.copy_from(&buf[..]);
1273    /// for &val in &v[..] {
1274    ///     assert_eq!(5u8, val);
1275    /// }
1276    /// ```
1277    pub fn copy_from(&self, buf: &[T]) {
1278        // A fast path for u8/i8
1279        if size_of::<T>() == 1 {
1280            let destination = self.to_slice();
1281            let total = buf.len().min(destination.len());
1282
1283            // absurd formatting brought to you by clippy
1284            // SAFETY:
1285            // - dst is valid for writes of at least `total`, since total <= destination.len()
1286            // - src is valid for reads of at least `total` as total <= buf.len()
1287            // - The regions are non-overlapping as `dst` points to guest memory and `buf` is
1288            //   a slice and thus has to live outside of guest memory (there can be more slices to
1289            //   guest memory without violating rust's aliasing rules)
1290            // - size is always a multiple of alignment, so treating *const T as *const u8 is fine
1291            unsafe { copy_to_volatile_slice(&destination, buf.as_ptr() as *const u8, total) };
1292        } else {
1293            let guard = self.ptr_guard_mut();
1294            let start = guard.as_ptr();
1295            let mut ptr = start as *mut Packed<T>;
1296
1297            for &v in buf.iter().take(self.len()) {
1298                // SAFETY: write_volatile is safe because the pointers are range-checked when
1299                // the slices are created, and they never escape the VolatileSlices.
1300                // ptr::add is safe because get_array_ref() validated that
1301                // size_of::<T>() * self.len() fits in an isize.
1302                unsafe {
1303                    write_volatile(ptr, Packed::<T>(v));
1304                    ptr = ptr.add(1);
1305                }
1306            }
1307
1308            self.bitmap.mark_dirty(0, ptr as usize - start as usize);
1309        }
1310    }
1311}
1312
1313impl<'a, B: BitmapSlice> From<VolatileSlice<'a, B>> for VolatileArrayRef<'a, u8, B> {
1314    fn from(slice: VolatileSlice<'a, B>) -> Self {
1315        // SAFETY: Safe because the result has the same lifetime and points to the same
1316        // memory as the incoming VolatileSlice.
1317        unsafe { VolatileArrayRef::with_bitmap(slice.addr, slice.len(), slice.bitmap, slice.mmap) }
1318    }
1319}
1320
1321// Return the largest value that `addr` is aligned to. Forcing this function to return 1 will
1322// cause test_non_atomic_access to fail.
1323fn alignment(addr: usize) -> usize {
1324    // Rust is silly and does not let me write addr & -addr.
1325    addr & (!addr + 1)
1326}
1327
1328pub(crate) mod copy_slice_impl {
1329    use super::*;
1330
1331    // SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
1332    // - `src_addr` and `dst_addr` must be valid for reads/writes.
1333    // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`.
1334    // - `src_addr` must point to a properly initialized value, which is true here because
1335    //   we're only using integer primitives.
1336    unsafe fn copy_single(align: usize, src_addr: *const u8, dst_addr: *mut u8) {
1337        match align {
1338            8 => write_volatile(dst_addr as *mut u64, read_volatile(src_addr as *const u64)),
1339            4 => write_volatile(dst_addr as *mut u32, read_volatile(src_addr as *const u32)),
1340            2 => write_volatile(dst_addr as *mut u16, read_volatile(src_addr as *const u16)),
1341            1 => write_volatile(dst_addr, read_volatile(src_addr)),
1342            _ => unreachable!(),
1343        }
1344    }
1345
1346    /// Copies `total` bytes from `src` to `dst` using a loop of volatile reads and writes
1347    ///
1348    /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least
1349    /// length `total`. The regions must not overlap
1350    unsafe fn copy_slice_volatile(mut dst: *mut u8, mut src: *const u8, total: usize) -> usize {
1351        let mut left = total;
1352
1353        let align = min(alignment(src as usize), alignment(dst as usize));
1354
1355        let mut copy_aligned_slice = |min_align| {
1356            if align < min_align {
1357                return;
1358            }
1359
1360            while left >= min_align {
1361                // SAFETY: Safe because we check alignment beforehand, the memory areas are valid
1362                // for reads/writes, and the source always contains a valid value.
1363                unsafe { copy_single(min_align, src, dst) };
1364
1365                left -= min_align;
1366
1367                if left == 0 {
1368                    break;
1369                }
1370
1371                // SAFETY: We only explain the invariants for `src`, the argument for `dst` is
1372                // analogous.
1373                // - `src` and `src + min_align` are within (or one byte past) the same allocated object
1374                //   This is given by the invariant on this function ensuring that [src, src + total)
1375                //   are part of the same allocated object, and the condition on the while loop
1376                //   ensures that we do not go outside this object
1377                // - The computed offset in bytes cannot overflow isize, because `min_align` is at
1378                //   most 8 when the closure is called (see below)
1379                // - The sum `src as usize + min_align` can only wrap around if src as usize + min_align - 1 == usize::MAX,
1380                //   however in this case, left == 0, and we'll have exited the loop above.
1381                unsafe {
1382                    src = src.add(min_align);
1383                    dst = dst.add(min_align);
1384                }
1385            }
1386        };
1387
1388        if size_of::<usize>() > 4 {
1389            copy_aligned_slice(8);
1390        }
1391        copy_aligned_slice(4);
1392        copy_aligned_slice(2);
1393        copy_aligned_slice(1);
1394
1395        total
1396    }
1397
1398    /// Copies `total` bytes from `src` to `dst`
1399    ///
1400    /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least
1401    /// length `total`. The regions must not overlap
1402    unsafe fn copy_slice(dst: *mut u8, src: *const u8, total: usize) -> usize {
1403        if total <= size_of::<usize>() {
1404            // SAFETY: Invariants of copy_slice_volatile are the same as invariants of copy_slice
1405            unsafe {
1406                copy_slice_volatile(dst, src, total);
1407            };
1408        } else {
1409            // SAFETY:
1410            // - Both src and dst are allocated for reads/writes of length `total` by function
1411            //   invariant
1412            // - src and dst are properly aligned, as any alignment is valid for u8
1413            // - The regions are not overlapping by function invariant
1414            unsafe {
1415                std::ptr::copy_nonoverlapping(src, dst, total);
1416            }
1417        }
1418
1419        total
1420    }
1421
1422    /// Copies `total` bytes from `slice` to `dst`
1423    ///
1424    /// SAFETY: `slice` and `dst` must be point to a contiguously allocated memory region of at
1425    /// least length `total`. The regions must not overlap.
1426    pub(crate) unsafe fn copy_from_volatile_slice<B: BitmapSlice>(
1427        dst: *mut u8,
1428        slice: &VolatileSlice<'_, B>,
1429        total: usize,
1430    ) -> usize {
1431        let guard = slice.ptr_guard();
1432
1433        // SAFETY: guaranteed by function invariants.
1434        copy_slice(dst, guard.as_ptr(), total)
1435    }
1436
1437    /// Copies `total` bytes from 'src' to `slice`
1438    ///
1439    /// SAFETY: `slice` and `src` must be point to a contiguously allocated memory region of at
1440    /// least length `total`. The regions must not overlap.
1441    pub(crate) unsafe fn copy_to_volatile_slice<B: BitmapSlice>(
1442        slice: &VolatileSlice<'_, B>,
1443        src: *const u8,
1444        total: usize,
1445    ) -> usize {
1446        let guard = slice.ptr_guard_mut();
1447
1448        // SAFETY: guaranteed by function invariants.
1449        let count = copy_slice(guard.as_ptr(), src, total);
1450        slice.bitmap.mark_dirty(0, count);
1451        count
1452    }
1453}
1454
1455#[cfg(test)]
1456mod tests {
1457    #![allow(clippy::undocumented_unsafe_blocks)]
1458
1459    use super::*;
1460    use std::alloc::Layout;
1461
1462    #[cfg(feature = "rawfd")]
1463    use std::fs::File;
1464    #[cfg(feature = "backend-bitmap")]
1465    use std::mem::size_of_val;
1466    #[cfg(feature = "rawfd")]
1467    use std::path::Path;
1468    use std::sync::atomic::{AtomicUsize, Ordering};
1469    use std::sync::{Arc, Barrier};
1470    use std::thread::spawn;
1471
1472    use matches::assert_matches;
1473    #[cfg(feature = "backend-bitmap")]
1474    use std::num::NonZeroUsize;
1475    #[cfg(feature = "rawfd")]
1476    use vmm_sys_util::tempfile::TempFile;
1477
1478    #[cfg(feature = "backend-bitmap")]
1479    use crate::bitmap::tests::{
1480        check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory,
1481    };
1482    #[cfg(feature = "backend-bitmap")]
1483    use crate::bitmap::{AtomicBitmap, RefSlice};
1484
1485    #[cfg(feature = "backend-bitmap")]
1486    const DEFAULT_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(0x1000).unwrap();
1487
1488    #[test]
1489    fn test_compute_end_offset() {
1490        let mut array = [1, 2, 3, 4, 5];
1491        let slice = VolatileSlice::from(array.as_mut_slice());
1492
1493        // Iterate over all valid ranges, assert that they pass validation.
1494        // This includes edge cases such as len = 0 and base = 5!
1495        for len in 0..slice.len() {
1496            for base in 0..=slice.len() - len {
1497                assert_eq!(
1498                    slice.compute_end_offset(base, len).unwrap(),
1499                    len + base,
1500                    "compute_end_offset rejected valid base/offset pair {base} + {len}"
1501                );
1502            }
1503        }
1504
1505        // Check invalid configurations
1506        slice.compute_end_offset(5, 1).unwrap_err();
1507        slice.compute_end_offset(6, 0).unwrap_err();
1508    }
1509
1510    #[test]
1511    fn misaligned_ref() {
1512        let mut a = [0u8; 3];
1513        let a_ref = VolatileSlice::from(&mut a[..]);
1514        unsafe {
1515            assert!(
1516                a_ref.aligned_as_ref::<u16>(0).is_err() ^ a_ref.aligned_as_ref::<u16>(1).is_err()
1517            );
1518            assert!(
1519                a_ref.aligned_as_mut::<u16>(0).is_err() ^ a_ref.aligned_as_mut::<u16>(1).is_err()
1520            );
1521        }
1522    }
1523
1524    #[test]
1525    fn atomic_store() {
1526        let mut a = [0usize; 1];
1527        {
1528            let a_ref = unsafe {
1529                VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>())
1530            };
1531            let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
1532            atomic.store(2usize, Ordering::Relaxed)
1533        }
1534        assert_eq!(a[0], 2);
1535    }
1536
1537    #[test]
1538    fn atomic_load() {
1539        let mut a = [5usize; 1];
1540        {
1541            let a_ref = unsafe {
1542                VolatileSlice::new(&mut a[0] as *mut usize as *mut u8,
1543                                   size_of::<usize>())
1544            };
1545            let atomic = {
1546                let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
1547                assert_eq!(atomic.load(Ordering::Relaxed), 5usize);
1548                atomic
1549            };
1550            // To make sure we can take the atomic out of the scope we made it in:
1551            atomic.load(Ordering::Relaxed);
1552            // but not too far:
1553            // atomicu8
1554        } //.load(std::sync::atomic::Ordering::Relaxed)
1555        ;
1556    }
1557
1558    #[test]
1559    fn misaligned_atomic() {
1560        let mut a = [5usize, 5usize];
1561        let a_ref =
1562            unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>()) };
1563        assert!(a_ref.get_atomic_ref::<AtomicUsize>(0).is_ok());
1564        assert!(a_ref.get_atomic_ref::<AtomicUsize>(1).is_err());
1565    }
1566
1567    #[test]
1568    fn ref_store() {
1569        let mut a = [0u8; 1];
1570        {
1571            let a_ref = VolatileSlice::from(&mut a[..]);
1572            let v_ref = a_ref.get_ref(0).unwrap();
1573            v_ref.store(2u8);
1574        }
1575        assert_eq!(a[0], 2);
1576    }
1577
1578    #[test]
1579    fn ref_load() {
1580        let mut a = [5u8; 1];
1581        {
1582            let a_ref = VolatileSlice::from(&mut a[..]);
1583            let c = {
1584                let v_ref = a_ref.get_ref::<u8>(0).unwrap();
1585                assert_eq!(v_ref.load(), 5u8);
1586                v_ref
1587            };
1588            // To make sure we can take a v_ref out of the scope we made it in:
1589            c.load();
1590            // but not too far:
1591            // c
1592        } //.load()
1593        ;
1594    }
1595
1596    #[test]
1597    fn ref_to_slice() {
1598        let mut a = [1u8; 5];
1599        let a_ref = VolatileSlice::from(&mut a[..]);
1600        let v_ref = a_ref.get_ref(1).unwrap();
1601        v_ref.store(0x1234_5678u32);
1602        let ref_slice = v_ref.to_slice();
1603        assert_eq!(v_ref.addr as usize, ref_slice.addr as usize);
1604        assert_eq!(v_ref.len(), ref_slice.len());
1605        assert!(!ref_slice.is_empty());
1606    }
1607
1608    #[test]
1609    fn observe_mutate() {
1610        struct RawMemory(*mut u8);
1611
1612        // SAFETY: we use property synchronization below
1613        unsafe impl Send for RawMemory {}
1614        unsafe impl Sync for RawMemory {}
1615
1616        let mem = Arc::new(RawMemory(unsafe {
1617            std::alloc::alloc(Layout::from_size_align(1, 1).unwrap())
1618        }));
1619
1620        let outside_slice = unsafe { VolatileSlice::new(Arc::clone(&mem).0, 1) };
1621        let inside_arc = Arc::clone(&mem);
1622
1623        let v_ref = outside_slice.get_ref::<u8>(0).unwrap();
1624        let barrier = Arc::new(Barrier::new(2));
1625        let barrier1 = barrier.clone();
1626
1627        v_ref.store(99);
1628        spawn(move || {
1629            barrier1.wait();
1630            let inside_slice = unsafe { VolatileSlice::new(inside_arc.0, 1) };
1631            let clone_v_ref = inside_slice.get_ref::<u8>(0).unwrap();
1632            clone_v_ref.store(0);
1633            barrier1.wait();
1634        });
1635
1636        assert_eq!(v_ref.load(), 99);
1637        barrier.wait();
1638        barrier.wait();
1639        assert_eq!(v_ref.load(), 0);
1640
1641        unsafe { std::alloc::dealloc(mem.0, Layout::from_size_align(1, 1).unwrap()) }
1642    }
1643
1644    #[test]
1645    fn mem_is_empty() {
1646        let mut backing = vec![0u8; 100];
1647        let a = VolatileSlice::from(backing.as_mut_slice());
1648        assert!(!a.is_empty());
1649
1650        let mut backing = vec![];
1651        let a = VolatileSlice::from(backing.as_mut_slice());
1652        assert!(a.is_empty());
1653    }
1654
1655    #[test]
1656    fn slice_len() {
1657        let mut backing = vec![0u8; 100];
1658        let mem = VolatileSlice::from(backing.as_mut_slice());
1659        let slice = mem.get_slice(0, 27).unwrap();
1660        assert_eq!(slice.len(), 27);
1661        assert!(!slice.is_empty());
1662
1663        let slice = mem.get_slice(34, 27).unwrap();
1664        assert_eq!(slice.len(), 27);
1665        assert!(!slice.is_empty());
1666
1667        let slice = slice.get_slice(20, 5).unwrap();
1668        assert_eq!(slice.len(), 5);
1669        assert!(!slice.is_empty());
1670
1671        let slice = mem.get_slice(34, 0).unwrap();
1672        assert!(slice.is_empty());
1673    }
1674
1675    #[test]
1676    fn slice_subslice() {
1677        let mut backing = vec![0u8; 100];
1678        let mem = VolatileSlice::from(backing.as_mut_slice());
1679        let slice = mem.get_slice(0, 100).unwrap();
1680        assert!(slice.write(&[1; 80], 10).is_ok());
1681
1682        assert!(slice.subslice(0, 0).is_ok());
1683        assert!(slice.subslice(0, 101).is_err());
1684
1685        assert!(slice.subslice(99, 0).is_ok());
1686        assert!(slice.subslice(99, 1).is_ok());
1687        assert!(slice.subslice(99, 2).is_err());
1688
1689        assert!(slice.subslice(100, 0).is_ok());
1690        assert!(slice.subslice(100, 1).is_err());
1691
1692        assert!(slice.subslice(101, 0).is_err());
1693        assert!(slice.subslice(101, 1).is_err());
1694
1695        assert!(slice.subslice(usize::MAX, 2).is_err());
1696        assert!(slice.subslice(2, usize::MAX).is_err());
1697
1698        let maybe_offset_slice = slice.subslice(10, 80);
1699        assert!(maybe_offset_slice.is_ok());
1700        let offset_slice = maybe_offset_slice.unwrap();
1701        assert_eq!(offset_slice.len(), 80);
1702
1703        let mut buf = [0; 80];
1704        assert!(offset_slice.read(&mut buf, 0).is_ok());
1705        assert_eq!(&buf[0..80], &[1; 80][0..80]);
1706    }
1707
1708    #[test]
1709    fn slice_offset() {
1710        let mut backing = vec![0u8; 100];
1711        let mem = VolatileSlice::from(backing.as_mut_slice());
1712        let slice = mem.get_slice(0, 100).unwrap();
1713        assert!(slice.write(&[1; 80], 10).is_ok());
1714
1715        assert!(slice.offset(101).is_err());
1716
1717        let maybe_offset_slice = slice.offset(10);
1718        assert!(maybe_offset_slice.is_ok());
1719        let offset_slice = maybe_offset_slice.unwrap();
1720        assert_eq!(offset_slice.len(), 90);
1721        let mut buf = [0; 90];
1722        assert!(offset_slice.read(&mut buf, 0).is_ok());
1723        assert_eq!(&buf[0..80], &[1; 80][0..80]);
1724        assert_eq!(&buf[80..90], &[0; 10][0..10]);
1725    }
1726
1727    #[test]
1728    fn slice_copy_to_u8() {
1729        let mut a = [2u8, 4, 6, 8, 10];
1730        let mut b = [0u8; 4];
1731        let mut c = [0u8; 6];
1732        let a_ref = VolatileSlice::from(&mut a[..]);
1733        let v_ref = a_ref.get_slice(0, a_ref.len()).unwrap();
1734        v_ref.copy_to(&mut b[..]);
1735        v_ref.copy_to(&mut c[..]);
1736        assert_eq!(b[0..4], a[0..4]);
1737        assert_eq!(c[0..5], a[0..5]);
1738    }
1739
1740    #[test]
1741    fn slice_copy_to_u16() {
1742        let mut a = [0x01u16, 0x2, 0x03, 0x4, 0x5];
1743        let mut b = [0u16; 4];
1744        let mut c = [0u16; 6];
1745        let a_ref = &mut a[..];
1746        let v_ref = unsafe { VolatileSlice::new(a_ref.as_mut_ptr() as *mut u8, 9) };
1747
1748        v_ref.copy_to(&mut b[..]);
1749        v_ref.copy_to(&mut c[..]);
1750        assert_eq!(b[0..4], a_ref[0..4]);
1751        assert_eq!(c[0..4], a_ref[0..4]);
1752        assert_eq!(c[4], 0);
1753    }
1754
1755    #[test]
1756    fn slice_copy_from_u8() {
1757        let a = [2u8, 4, 6, 8, 10];
1758        let mut b = [0u8; 4];
1759        let mut c = [0u8; 6];
1760        let b_ref = VolatileSlice::from(&mut b[..]);
1761        let v_ref = b_ref.get_slice(0, b_ref.len()).unwrap();
1762        v_ref.copy_from(&a[..]);
1763        assert_eq!(b[0..4], a[0..4]);
1764
1765        let c_ref = VolatileSlice::from(&mut c[..]);
1766        let v_ref = c_ref.get_slice(0, c_ref.len()).unwrap();
1767        v_ref.copy_from(&a[..]);
1768        assert_eq!(c[0..5], a[0..5]);
1769    }
1770
1771    #[test]
1772    fn slice_copy_from_u16() {
1773        let a = [2u16, 4, 6, 8, 10];
1774        let mut b = [0u16; 4];
1775        let mut c = [0u16; 6];
1776        let b_ref = &mut b[..];
1777        let v_ref = unsafe { VolatileSlice::new(b_ref.as_mut_ptr() as *mut u8, 8) };
1778        v_ref.copy_from(&a[..]);
1779        assert_eq!(b_ref[0..4], a[0..4]);
1780
1781        let c_ref = &mut c[..];
1782        let v_ref = unsafe { VolatileSlice::new(c_ref.as_mut_ptr() as *mut u8, 9) };
1783        v_ref.copy_from(&a[..]);
1784        assert_eq!(c_ref[0..4], a[0..4]);
1785        assert_eq!(c_ref[4], 0);
1786    }
1787
1788    #[test]
1789    fn slice_copy_to_volatile_slice() {
1790        let mut a = [2u8, 4, 6, 8, 10];
1791        let a_ref = VolatileSlice::from(&mut a[..]);
1792        let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
1793
1794        let mut b = [0u8; 4];
1795        let b_ref = VolatileSlice::from(&mut b[..]);
1796        let b_slice = b_ref.get_slice(0, b_ref.len()).unwrap();
1797
1798        a_slice.copy_to_volatile_slice(b_slice);
1799        assert_eq!(b, [2, 4, 6, 8]);
1800    }
1801
1802    #[test]
1803    fn slice_overflow_error() {
1804        let mut backing = vec![0u8];
1805        let a = VolatileSlice::from(backing.as_mut_slice());
1806        let res = a.get_slice(usize::MAX, 1).unwrap_err();
1807        assert_matches!(
1808            res,
1809            Error::Overflow {
1810                base: usize::MAX,
1811                offset: 1,
1812            }
1813        );
1814    }
1815
1816    #[test]
1817    fn slice_oob_error() {
1818        let mut backing = vec![0u8; 100];
1819        let a = VolatileSlice::from(backing.as_mut_slice());
1820        a.get_slice(50, 50).unwrap();
1821        let res = a.get_slice(55, 50).unwrap_err();
1822        assert_matches!(res, Error::OutOfBounds { addr: 105 });
1823    }
1824
1825    #[test]
1826    fn ref_overflow_error() {
1827        let mut backing = vec![0u8];
1828        let a = VolatileSlice::from(backing.as_mut_slice());
1829        let res = a.get_ref::<u8>(usize::MAX).unwrap_err();
1830        assert_matches!(
1831            res,
1832            Error::Overflow {
1833                base: usize::MAX,
1834                offset: 1,
1835            }
1836        );
1837    }
1838
1839    #[test]
1840    fn ref_oob_error() {
1841        let mut backing = vec![0u8; 100];
1842        let a = VolatileSlice::from(backing.as_mut_slice());
1843        a.get_ref::<u8>(99).unwrap();
1844        let res = a.get_ref::<u16>(99).unwrap_err();
1845        assert_matches!(res, Error::OutOfBounds { addr: 101 });
1846    }
1847
1848    #[test]
1849    fn ref_oob_too_large() {
1850        let mut backing = vec![0u8; 3];
1851        let a = VolatileSlice::from(backing.as_mut_slice());
1852        let res = a.get_ref::<u32>(0).unwrap_err();
1853        assert_matches!(res, Error::OutOfBounds { addr: 4 });
1854    }
1855
1856    #[test]
1857    fn slice_store() {
1858        let mut backing = vec![0u8; 5];
1859        let a = VolatileSlice::from(backing.as_mut_slice());
1860        let s = a.as_volatile_slice();
1861        let r = a.get_ref(2).unwrap();
1862        r.store(9u16);
1863        assert_eq!(s.read_obj::<u16>(2).unwrap(), 9);
1864    }
1865
1866    #[test]
1867    fn test_write_past_end() {
1868        let mut backing = vec![0u8; 5];
1869        let a = VolatileSlice::from(backing.as_mut_slice());
1870        let s = a.as_volatile_slice();
1871        let res = s.write(&[1, 2, 3, 4, 5, 6], 0);
1872        assert!(res.is_ok());
1873        assert_eq!(res.unwrap(), 5);
1874    }
1875
1876    #[test]
1877    fn slice_read_and_write() {
1878        let mut backing = vec![0u8; 5];
1879        let a = VolatileSlice::from(backing.as_mut_slice());
1880        let s = a.as_volatile_slice();
1881        let sample_buf = [1, 2, 3];
1882        assert!(s.write(&sample_buf, 5).is_err());
1883        assert!(s.write(&sample_buf, 2).is_ok());
1884        let mut buf = [0u8; 3];
1885        assert!(s.read(&mut buf, 5).is_err());
1886        assert!(s.read_slice(&mut buf, 2).is_ok());
1887        assert_eq!(buf, sample_buf);
1888
1889        // Writing an empty buffer at the end of the volatile slice works.
1890        assert_eq!(s.write(&[], 100).unwrap(), 0);
1891        let buf: &mut [u8] = &mut [];
1892        assert_eq!(s.read(buf, 4).unwrap(), 0);
1893
1894        // Check that reading and writing an empty buffer does not yield an error.
1895        let mut backing = Vec::new();
1896        let empty_mem = VolatileSlice::from(backing.as_mut_slice());
1897        let empty = empty_mem.as_volatile_slice();
1898        assert_eq!(empty.write(&[], 1).unwrap(), 0);
1899        assert_eq!(empty.read(buf, 1).unwrap(), 0);
1900    }
1901
1902    #[test]
1903    fn obj_read_and_write() {
1904        let mut backing = vec![0u8; 5];
1905        let a = VolatileSlice::from(backing.as_mut_slice());
1906        let s = a.as_volatile_slice();
1907        assert!(s.write_obj(55u16, 4).is_err());
1908        assert!(s.write_obj(55u16, usize::MAX).is_err());
1909        assert!(s.write_obj(55u16, 2).is_ok());
1910        assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
1911        assert!(s.read_obj::<u16>(4).is_err());
1912        assert!(s.read_obj::<u16>(usize::MAX).is_err());
1913    }
1914
1915    #[test]
1916    #[cfg(feature = "rawfd")]
1917    fn mem_read_and_write() {
1918        let mut backing = vec![0u8; 5];
1919        let a = VolatileSlice::from(backing.as_mut_slice());
1920        let s = a.as_volatile_slice();
1921        assert!(s.write_obj(!0u32, 1).is_ok());
1922        let mut file = if cfg!(target_family = "unix") {
1923            File::open(Path::new("/dev/zero")).unwrap()
1924        } else {
1925            File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
1926        };
1927
1928        assert!(file
1929            .read_exact_volatile(&mut s.get_slice(1, size_of::<u32>()).unwrap())
1930            .is_ok());
1931
1932        let mut f = TempFile::new().unwrap().into_file();
1933        assert!(f
1934            .read_exact_volatile(&mut s.get_slice(1, size_of::<u32>()).unwrap())
1935            .is_err());
1936
1937        let value = s.read_obj::<u32>(1).unwrap();
1938        if cfg!(target_family = "unix") {
1939            assert_eq!(value, 0);
1940        } else {
1941            assert_eq!(value, 0x0090_5a4d);
1942        }
1943
1944        let mut sink = vec![0; size_of::<u32>()];
1945        assert!(sink
1946            .as_mut_slice()
1947            .write_all_volatile(&s.get_slice(1, size_of::<u32>()).unwrap())
1948            .is_ok());
1949
1950        if cfg!(target_family = "unix") {
1951            assert_eq!(sink, vec![0; size_of::<u32>()]);
1952        } else {
1953            assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
1954        };
1955    }
1956
1957    #[test]
1958    fn unaligned_read_and_write() {
1959        let mut backing = vec![0u8; 7];
1960        let a = VolatileSlice::from(backing.as_mut_slice());
1961        let s = a.as_volatile_slice();
1962        let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4];
1963        assert!(s.write_slice(&sample_buf, 0).is_ok());
1964        let r = a.get_ref::<u32>(2).unwrap();
1965        assert_eq!(r.load(), 0xAAAA_AAAA);
1966
1967        r.store(0x5555_5555);
1968        let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4];
1969        let mut buf: [u8; 7] = Default::default();
1970        assert!(s.read_slice(&mut buf, 0).is_ok());
1971        assert_eq!(buf, sample_buf);
1972    }
1973
1974    #[test]
1975    fn test_read_from_exceeds_size() {
1976        #[derive(Debug, Default, Copy, Clone)]
1977        struct BytesToRead {
1978            _val1: u128, // 16 bytes
1979            _val2: u128, // 16 bytes
1980        }
1981        unsafe impl ByteValued for BytesToRead {}
1982        let cursor_size = 20;
1983        let image = vec![1u8; cursor_size];
1984
1985        // Trying to read more bytes than we have space for in image
1986        // make the read_from function return maximum vec size (i.e. 20).
1987        let mut bytes_to_read = BytesToRead::default();
1988        assert_eq!(
1989            image
1990                .as_slice()
1991                .read_volatile(&mut bytes_to_read.as_bytes())
1992                .unwrap(),
1993            cursor_size
1994        );
1995    }
1996
1997    #[test]
1998    fn ref_array_from_slice() {
1999        let mut a = [2, 4, 6, 8, 10];
2000        let a_vec = a.to_vec();
2001        let a_ref = VolatileSlice::from(&mut a[..]);
2002        let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
2003        let a_array_ref: VolatileArrayRef<u8, ()> = a_slice.into();
2004        for (i, entry) in a_vec.iter().enumerate() {
2005            assert_eq!(&a_array_ref.load(i), entry);
2006        }
2007    }
2008
2009    #[test]
2010    fn ref_array_store() {
2011        let mut a = [0u8; 5];
2012        {
2013            let a_ref = VolatileSlice::from(&mut a[..]);
2014            let v_ref = a_ref.get_array_ref(1, 4).unwrap();
2015            v_ref.store(1, 2u8);
2016            v_ref.store(2, 4u8);
2017            v_ref.store(3, 6u8);
2018        }
2019        let expected = [2u8, 4u8, 6u8];
2020        assert_eq!(a[2..=4], expected);
2021    }
2022
2023    #[test]
2024    fn ref_array_load() {
2025        let mut a = [0, 0, 2, 3, 10];
2026        {
2027            let a_ref = VolatileSlice::from(&mut a[..]);
2028            let c = {
2029                let v_ref = a_ref.get_array_ref::<u8>(1, 4).unwrap();
2030                assert_eq!(v_ref.load(1), 2u8);
2031                assert_eq!(v_ref.load(2), 3u8);
2032                assert_eq!(v_ref.load(3), 10u8);
2033                v_ref
2034            };
2035            // To make sure we can take a v_ref out of the scope we made it in:
2036            c.load(0);
2037            // but not too far:
2038            // c
2039        } //.load()
2040        ;
2041    }
2042
2043    #[test]
2044    fn ref_array_overflow() {
2045        let mut a = [0, 0, 2, 3, 10];
2046        let a_ref = VolatileSlice::from(&mut a[..]);
2047        let res = a_ref.get_array_ref::<u32>(4, usize::MAX).unwrap_err();
2048        assert_matches!(
2049            res,
2050            Error::TooBig {
2051                nelements: usize::MAX,
2052                size: 4,
2053            }
2054        );
2055    }
2056
2057    #[test]
2058    fn alignment() {
2059        let a = [0u8; 64];
2060        let a = &a[a.as_ptr().align_offset(32)] as *const u8 as usize;
2061        assert!(super::alignment(a) >= 32);
2062        assert_eq!(super::alignment(a + 9), 1);
2063        assert_eq!(super::alignment(a + 30), 2);
2064        assert_eq!(super::alignment(a + 12), 4);
2065        assert_eq!(super::alignment(a + 8), 8);
2066    }
2067
2068    #[test]
2069    fn test_atomic_accesses() {
2070        let len = 0x1000;
2071        let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
2072        let a = unsafe { VolatileSlice::new(buf, len) };
2073
2074        crate::bytes::tests::check_atomic_accesses(a, 0, 0x1000);
2075        unsafe {
2076            std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap());
2077        }
2078    }
2079
2080    #[test]
2081    fn split_at() {
2082        let mut mem = [0u8; 32];
2083        let mem_ref = VolatileSlice::from(&mut mem[..]);
2084        let vslice = mem_ref.get_slice(0, 32).unwrap();
2085        let (start, end) = vslice.split_at(8).unwrap();
2086        assert_eq!(start.len(), 8);
2087        assert_eq!(end.len(), 24);
2088        let (start, end) = vslice.split_at(0).unwrap();
2089        assert_eq!(start.len(), 0);
2090        assert_eq!(end.len(), 32);
2091        let (start, end) = vslice.split_at(31).unwrap();
2092        assert_eq!(start.len(), 31);
2093        assert_eq!(end.len(), 1);
2094        let (start, end) = vslice.split_at(32).unwrap();
2095        assert_eq!(start.len(), 32);
2096        assert_eq!(end.len(), 0);
2097        let err = vslice.split_at(33).unwrap_err();
2098        assert_matches!(err, Error::OutOfBounds { addr: _ })
2099    }
2100
2101    #[test]
2102    #[cfg(feature = "backend-bitmap")]
2103    fn test_volatile_slice_dirty_tracking() {
2104        let val = 123u64;
2105        let dirty_offset = 0x1000;
2106        let dirty_len = size_of_val(&val);
2107
2108        let len = 0x10000;
2109        let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
2110
2111        // Invoke the `Bytes` test helper function.
2112        {
2113            let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
2114            let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
2115
2116            test_bytes(
2117                &slice,
2118                |s: &VolatileSlice<RefSlice<AtomicBitmap>>,
2119                 start: usize,
2120                 len: usize,
2121                 clean: bool| { check_range(s.bitmap(), start, len, clean) },
2122                |offset| offset,
2123                0x1000,
2124            );
2125        }
2126
2127        // Invoke the `VolatileMemory` test helper function.
2128        {
2129            let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
2130            let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
2131            test_volatile_memory(&slice);
2132        }
2133
2134        let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
2135        let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
2136
2137        let bitmap2 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
2138        let slice2 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap2.slice_at(0), None) };
2139
2140        let bitmap3 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
2141        let slice3 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap3.slice_at(0), None) };
2142
2143        assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
2144        assert!(range_is_clean(slice2.bitmap(), 0, slice2.len()));
2145
2146        slice.write_obj(val, dirty_offset).unwrap();
2147        assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
2148
2149        slice.copy_to_volatile_slice(slice2);
2150        assert!(range_is_dirty(slice2.bitmap(), 0, slice2.len()));
2151
2152        {
2153            let (s1, s2) = slice.split_at(dirty_offset).unwrap();
2154            assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
2155            assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
2156        }
2157
2158        {
2159            let s = slice.subslice(dirty_offset, dirty_len).unwrap();
2160            assert!(range_is_dirty(s.bitmap(), 0, s.len()));
2161        }
2162
2163        {
2164            let s = slice.offset(dirty_offset).unwrap();
2165            assert!(range_is_dirty(s.bitmap(), 0, dirty_len));
2166        }
2167
2168        // Test `copy_from` for size_of::<T> == 1.
2169        {
2170            let buf = vec![1u8; dirty_offset];
2171
2172            assert!(range_is_clean(slice.bitmap(), 0, dirty_offset));
2173            slice.copy_from(&buf);
2174            assert!(range_is_dirty(slice.bitmap(), 0, dirty_offset));
2175        }
2176
2177        // Test `copy_from` for size_of::<T> > 1.
2178        {
2179            let val = 1u32;
2180            let buf = vec![val; dirty_offset / size_of_val(&val)];
2181
2182            assert!(range_is_clean(slice3.bitmap(), 0, dirty_offset));
2183            slice3.copy_from(&buf);
2184            assert!(range_is_dirty(slice3.bitmap(), 0, dirty_offset));
2185        }
2186
2187        unsafe {
2188            std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap());
2189        }
2190    }
2191
2192    #[test]
2193    #[cfg(feature = "backend-bitmap")]
2194    fn test_volatile_ref_dirty_tracking() {
2195        let val = 123u64;
2196        let mut buf = vec![val];
2197
2198        let bitmap = AtomicBitmap::new(size_of_val(&val), DEFAULT_PAGE_SIZE);
2199        let vref = unsafe {
2200            VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0), None)
2201        };
2202
2203        assert!(range_is_clean(vref.bitmap(), 0, vref.len()));
2204        vref.store(val);
2205        assert!(range_is_dirty(vref.bitmap(), 0, vref.len()));
2206    }
2207
2208    #[cfg(feature = "backend-bitmap")]
2209    fn test_volatile_array_ref_copy_from_tracking<T>(
2210        buf: &mut [T],
2211        index: usize,
2212        page_size: NonZeroUsize,
2213    ) where
2214        T: ByteValued + From<u8>,
2215    {
2216        let bitmap = AtomicBitmap::new(size_of_val(buf), page_size);
2217        let arr = unsafe {
2218            VolatileArrayRef::with_bitmap(
2219                buf.as_mut_ptr() as *mut u8,
2220                index + 1,
2221                bitmap.slice_at(0),
2222                None,
2223            )
2224        };
2225
2226        let val = T::from(123);
2227        let copy_buf = vec![val; index + 1];
2228
2229        assert!(range_is_clean(arr.bitmap(), 0, arr.len() * size_of::<T>()));
2230        arr.copy_from(copy_buf.as_slice());
2231        assert!(range_is_dirty(arr.bitmap(), 0, size_of_val(buf)));
2232    }
2233
2234    #[test]
2235    #[cfg(feature = "backend-bitmap")]
2236    fn test_volatile_array_ref_dirty_tracking() {
2237        let val = 123u64;
2238        let dirty_len = size_of_val(&val);
2239        let index = 0x1000;
2240        let dirty_offset = dirty_len * index;
2241
2242        let mut buf = vec![0u64; index + 1];
2243        let mut byte_buf = vec![0u8; index + 1];
2244
2245        // Test `ref_at`.
2246        {
2247            let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE);
2248            let arr = unsafe {
2249                VolatileArrayRef::with_bitmap(
2250                    buf.as_mut_ptr() as *mut u8,
2251                    index + 1,
2252                    bitmap.slice_at(0),
2253                    None,
2254                )
2255            };
2256
2257            assert!(range_is_clean(arr.bitmap(), 0, arr.len() * dirty_len));
2258            arr.ref_at(index).store(val);
2259            assert!(range_is_dirty(arr.bitmap(), dirty_offset, dirty_len));
2260        }
2261
2262        // Test `store`.
2263        {
2264            let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE);
2265            let arr = unsafe {
2266                VolatileArrayRef::with_bitmap(
2267                    buf.as_mut_ptr() as *mut u8,
2268                    index + 1,
2269                    bitmap.slice_at(0),
2270                    None,
2271                )
2272            };
2273
2274            let slice = arr.to_slice();
2275            assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
2276            arr.store(index, val);
2277            assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
2278        }
2279
2280        // Test `copy_from` when size_of::<T>() == 1.
2281        test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, DEFAULT_PAGE_SIZE);
2282        // Test `copy_from` when size_of::<T>() > 1.
2283        test_volatile_array_ref_copy_from_tracking(&mut buf, index, DEFAULT_PAGE_SIZE);
2284    }
2285}