vm_memory/bytes.rs
1// Portions Copyright 2019 Red Hat, Inc.
2//
3// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4//
5// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6// Use of this source code is governed by a BSD-style license that can be
7// found in the LICENSE-BSD-3-Clause file.
8//
9// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10
11//! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
12//! data.
13
14use std::io::{Read, Write};
15use std::mem::{size_of, MaybeUninit};
16use std::result::Result;
17use std::slice::{from_raw_parts, from_raw_parts_mut};
18use std::sync::atomic::Ordering;
19
20use crate::atomic_integer::AtomicInteger;
21use crate::volatile_memory::VolatileSlice;
22
23/// Types for which it is safe to initialize from raw data.
24///
25/// # Safety
26///
27/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a
28/// byte array. This is generally true for all plain-old-data structs. It is notably not true for
29/// any type that includes a reference. It is generally also not safe for non-packed structs, as
30/// compiler-inserted padding is considered uninitialized memory, and thus reads/writing it will
31/// cause undefined behavior.
32///
33/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
34pub unsafe trait ByteValued: Copy + Send + Sync {
35 /// Converts a slice of raw data into a reference of `Self`.
36 ///
37 /// The value of `data` is not copied. Instead a reference is made from the given slice. The
38 /// value of `Self` will depend on the representation of the type in memory, and may change in
39 /// an unstable fashion.
40 ///
41 /// This will return `None` if the length of data does not match the size of `Self`, or if the
42 /// data is not aligned for the type of `Self`.
43 fn from_slice(data: &[u8]) -> Option<&Self> {
44 // Early out to avoid an unneeded `align_to` call.
45 if data.len() != size_of::<Self>() {
46 return None;
47 }
48
49 // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
50 // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
51 // ensures that we don't have any unaligned references. This aliases a pointer, but because
52 // the pointer is from a const slice reference, there are no mutable aliases. Finally, the
53 // reference returned can not outlive data because they have equal implicit lifetime
54 // constraints.
55 match unsafe { data.align_to::<Self>() } {
56 ([], [mid], []) => Some(mid),
57 _ => None,
58 }
59 }
60
61 /// Converts a mutable slice of raw data into a mutable reference of `Self`.
62 ///
63 /// Because `Self` is made from a reference to the mutable slice, mutations to the returned
64 /// reference are immediately reflected in `data`. The value of the returned `Self` will depend
65 /// on the representation of the type in memory, and may change in an unstable fashion.
66 ///
67 /// This will return `None` if the length of data does not match the size of `Self`, or if the
68 /// data is not aligned for the type of `Self`.
69 fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
70 // Early out to avoid an unneeded `align_to_mut` call.
71 if data.len() != size_of::<Self>() {
72 return None;
73 }
74
75 // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
76 // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
77 // ensures that we don't have any unaligned references. This aliases a pointer, but because
78 // the pointer is from a mut slice reference, we borrow the passed in mutable reference.
79 // Finally, the reference returned can not outlive data because they have equal implicit
80 // lifetime constraints.
81 match unsafe { data.align_to_mut::<Self>() } {
82 ([], [mid], []) => Some(mid),
83 _ => None,
84 }
85 }
86
87 /// Converts a reference to `self` into a slice of bytes.
88 ///
89 /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`.
90 /// The value of bytes in the returned slice will depend on the representation of the type in
91 /// memory, and may change in an unstable fashion.
92 fn as_slice(&self) -> &[u8] {
93 // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
94 // guarantees it. The lifetime of the returned slice is the same as the passed reference,
95 // so that no dangling pointers will result from this pointer alias.
96 unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
97 }
98
99 /// Converts a mutable reference to `self` into a mutable slice of bytes.
100 ///
101 /// Because the slice is made from a reference to `self`, mutations to the returned slice are
102 /// immediately reflected in `self`. The value of bytes in the returned slice will depend on
103 /// the representation of the type in memory, and may change in an unstable fashion.
104 fn as_mut_slice(&mut self) -> &mut [u8] {
105 // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
106 // guarantees it. The trait also guarantees that any combination of bytes is valid for this
107 // type, so modifying them in the form of a byte slice is valid. The lifetime of the
108 // returned slice is the same as the passed reference, so that no dangling pointers will
109 // result from this pointer alias. Although this does alias a mutable pointer, we do so by
110 // exclusively borrowing the given mutable reference.
111 unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
112 }
113
114 /// Converts a mutable reference to `self` into a `VolatileSlice`. This is
115 /// useful because `VolatileSlice` provides a `Bytes<usize>` implementation.
116 ///
117 /// # Safety
118 ///
119 /// Unlike most `VolatileMemory` implementation, this method requires an exclusive
120 /// reference to `self`; this trivially fulfills `VolatileSlice::new`'s requirement
121 /// that all accesses to `self` use volatile accesses (because there can
122 /// be no other accesses).
123 fn as_bytes(&mut self) -> VolatileSlice {
124 // SAFETY: This is safe because the lifetime is the same as self
125 unsafe { VolatileSlice::new(self as *mut Self as *mut _, size_of::<Self>()) }
126 }
127}
128
129macro_rules! byte_valued_array {
130 ($T:ty, $($N:expr)+) => {
131 $(
132 // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued.
133 // They are just numbers.
134 unsafe impl ByteValued for [$T; $N] {}
135 )+
136 }
137}
138
139macro_rules! byte_valued_type {
140 ($T:ty) => {
141 // SAFETY: Safe as long T is POD.
142 // We are using this macro to generated the implementation for integer types below.
143 unsafe impl ByteValued for $T {}
144 byte_valued_array! {
145 $T,
146 0 1 2 3 4 5 6 7 8 9
147 10 11 12 13 14 15 16 17 18 19
148 20 21 22 23 24 25 26 27 28 29
149 30 31 32
150 }
151 };
152}
153
154byte_valued_type!(u8);
155byte_valued_type!(u16);
156byte_valued_type!(u32);
157byte_valued_type!(u64);
158byte_valued_type!(u128);
159byte_valued_type!(usize);
160byte_valued_type!(i8);
161byte_valued_type!(i16);
162byte_valued_type!(i32);
163byte_valued_type!(i64);
164byte_valued_type!(i128);
165byte_valued_type!(isize);
166
167/// A trait used to identify types which can be accessed atomically by proxy.
168pub trait AtomicAccess:
169 ByteValued
170 // Could not find a more succinct way of stating that `Self` can be converted
171 // into `Self::A::V`, and the other way around.
172 + From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
173 + Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
174{
175 /// The `AtomicInteger` that atomic operations on `Self` are based on.
176 type A: AtomicInteger;
177}
178
179macro_rules! impl_atomic_access {
180 ($T:ty, $A:path) => {
181 impl AtomicAccess for $T {
182 type A = $A;
183 }
184 };
185}
186
187impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
188impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
189impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
190#[cfg(any(
191 target_arch = "x86_64",
192 target_arch = "aarch64",
193 target_arch = "powerpc64",
194 target_arch = "s390x",
195 target_arch = "riscv64"
196))]
197impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
198
199impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
200impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
201impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
202#[cfg(any(
203 target_arch = "x86_64",
204 target_arch = "aarch64",
205 target_arch = "powerpc64",
206 target_arch = "s390x",
207 target_arch = "riscv64"
208))]
209impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
210
211impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
212impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);
213
214/// A container to host a range of bytes and access its content.
215///
216/// Candidates which may implement this trait include:
217/// - anonymous memory areas
218/// - mmapped memory areas
219/// - data files
220/// - a proxy to access memory on remote
221pub trait Bytes<A> {
222 /// Associated error codes
223 type E;
224
225 /// Writes a slice into the container at `addr`.
226 ///
227 /// Returns the number of bytes written. The number of bytes written can
228 /// be less than the length of the slice if there isn't enough room in the
229 /// container.
230 fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
231
232 /// Reads data from the container at `addr` into a slice.
233 ///
234 /// Returns the number of bytes read. The number of bytes read can be less than the length
235 /// of the slice if there isn't enough data within the container.
236 fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
237
238 /// Writes the entire content of a slice into the container at `addr`.
239 ///
240 /// # Errors
241 ///
242 /// Returns an error if there isn't enough space within the container to write the entire slice.
243 /// Part of the data may have been copied nevertheless.
244 fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
245
246 /// Reads data from the container at `addr` to fill an entire slice.
247 ///
248 /// # Errors
249 ///
250 /// Returns an error if there isn't enough data within the container to fill the entire slice.
251 /// Part of the data may have been copied nevertheless.
252 fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
253
254 /// Writes an object into the container at `addr`.
255 ///
256 /// # Errors
257 ///
258 /// Returns an error if the object doesn't fit inside the container.
259 fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
260 self.write_slice(val.as_slice(), addr)
261 }
262
263 /// Reads an object from the container at `addr`.
264 ///
265 /// Reading from a volatile area isn't strictly safe as it could change mid-read.
266 /// However, as long as the type T is plain old data and can handle random initialization,
267 /// everything will be OK.
268 ///
269 /// # Errors
270 ///
271 /// Returns an error if there's not enough data inside the container.
272 fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
273 // SAFETY: ByteValued objects must be assignable from a arbitrary byte
274 // sequence and are mandated to be packed.
275 // Hence, zeroed memory is a fine initialization.
276 let mut result: T = unsafe { MaybeUninit::<T>::zeroed().assume_init() };
277 self.read_slice(result.as_mut_slice(), addr).map(|_| result)
278 }
279
280 /// Reads up to `count` bytes from an object and writes them into the container at `addr`.
281 ///
282 /// Returns the number of bytes written into the container.
283 ///
284 /// # Arguments
285 /// * `addr` - Begin writing at this address.
286 /// * `src` - Copy from `src` into the container.
287 /// * `count` - Copy `count` bytes from `src` into the container.
288 #[deprecated(
289 note = "Use `.read_volatile_from` or the functions of the `ReadVolatile` trait instead"
290 )]
291 fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
292 where
293 F: Read;
294
295 /// Reads exactly `count` bytes from an object and writes them into the container at `addr`.
296 ///
297 /// # Errors
298 ///
299 /// Returns an error if `count` bytes couldn't have been copied from `src` to the container.
300 /// Part of the data may have been copied nevertheless.
301 ///
302 /// # Arguments
303 /// * `addr` - Begin writing at this address.
304 /// * `src` - Copy from `src` into the container.
305 /// * `count` - Copy exactly `count` bytes from `src` into the container.
306 #[deprecated(
307 note = "Use `.read_exact_volatile_from` or the functions of the `ReadVolatile` trait instead"
308 )]
309 fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
310 where
311 F: Read;
312
313 /// Reads up to `count` bytes from the container at `addr` and writes them it into an object.
314 ///
315 /// Returns the number of bytes written into the object.
316 ///
317 /// # Arguments
318 /// * `addr` - Begin reading from this address.
319 /// * `dst` - Copy from the container to `dst`.
320 /// * `count` - Copy `count` bytes from the container to `dst`.
321 #[deprecated(
322 note = "Use `.write_volatile_to` or the functions of the `WriteVolatile` trait instead"
323 )]
324 fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
325 where
326 F: Write;
327
328 /// Reads exactly `count` bytes from the container at `addr` and writes them into an object.
329 ///
330 /// # Errors
331 ///
332 /// Returns an error if `count` bytes couldn't have been copied from the container to `dst`.
333 /// Part of the data may have been copied nevertheless.
334 ///
335 /// # Arguments
336 /// * `addr` - Begin reading from this address.
337 /// * `dst` - Copy from the container to `dst`.
338 /// * `count` - Copy exactly `count` bytes from the container to `dst`.
339 #[deprecated(
340 note = "Use `.write_all_volatile_to` or the functions of the `WriteVolatile` trait instead"
341 )]
342 fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
343 where
344 F: Write;
345
346 /// Atomically store a value at the specified address.
347 fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;
348
349 /// Atomically load a value from the specified address.
350 fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
351}
352
353#[cfg(test)]
354pub(crate) mod tests {
355 #![allow(clippy::undocumented_unsafe_blocks)]
356 use super::*;
357
358 use std::cell::RefCell;
359 use std::fmt::Debug;
360 use std::mem::align_of;
361
362 // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be
363 // zero-initialized.
364 pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
365 where
366 A: Copy,
367 B: Bytes<A>,
368 B::E: Debug,
369 {
370 let val = 100u32;
371
372 assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
373 b.store(val, addr, Ordering::Relaxed).unwrap();
374 assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);
375
376 assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
377 assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
378 }
379
380 fn check_byte_valued_type<T>()
381 where
382 T: ByteValued + PartialEq + Debug + Default,
383 {
384 let mut data = [0u8; 48];
385 let pre_len = {
386 let (pre, _, _) = unsafe { data.align_to::<T>() };
387 pre.len()
388 };
389 {
390 let aligned_data = &mut data[pre_len..pre_len + size_of::<T>()];
391 {
392 let mut val: T = Default::default();
393 assert_eq!(T::from_slice(aligned_data), Some(&val));
394 assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val));
395 assert_eq!(val.as_slice(), aligned_data);
396 assert_eq!(val.as_mut_slice(), aligned_data);
397 }
398 }
399 for i in 1..size_of::<T>().min(align_of::<T>()) {
400 let begin = pre_len + i;
401 let end = begin + size_of::<T>();
402 let unaligned_data = &mut data[begin..end];
403 {
404 if align_of::<T>() != 1 {
405 assert_eq!(T::from_slice(unaligned_data), None);
406 assert_eq!(T::from_mut_slice(unaligned_data), None);
407 }
408 }
409 }
410 // Check the early out condition
411 {
412 assert!(T::from_slice(&data).is_none());
413 assert!(T::from_mut_slice(&mut data).is_none());
414 }
415 }
416
417 #[test]
418 fn test_byte_valued() {
419 check_byte_valued_type::<u8>();
420 check_byte_valued_type::<u16>();
421 check_byte_valued_type::<u32>();
422 check_byte_valued_type::<u64>();
423 check_byte_valued_type::<u128>();
424 check_byte_valued_type::<usize>();
425 check_byte_valued_type::<i8>();
426 check_byte_valued_type::<i16>();
427 check_byte_valued_type::<i32>();
428 check_byte_valued_type::<i64>();
429 check_byte_valued_type::<i128>();
430 check_byte_valued_type::<isize>();
431 }
432
433 pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10;
434
435 pub struct MockBytesContainer {
436 container: RefCell<[u8; MOCK_BYTES_CONTAINER_SIZE]>,
437 }
438
439 impl MockBytesContainer {
440 pub fn new() -> Self {
441 MockBytesContainer {
442 container: RefCell::new([0; MOCK_BYTES_CONTAINER_SIZE]),
443 }
444 }
445
446 pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> {
447 if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr {
448 return Err(());
449 }
450
451 Ok(())
452 }
453 }
454
455 impl Bytes<usize> for MockBytesContainer {
456 type E = ();
457
458 fn write(&self, _: &[u8], _: usize) -> Result<usize, Self::E> {
459 unimplemented!()
460 }
461
462 fn read(&self, _: &mut [u8], _: usize) -> Result<usize, Self::E> {
463 unimplemented!()
464 }
465
466 fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> {
467 self.validate_slice_op(buf, addr)?;
468
469 let mut container = self.container.borrow_mut();
470 container[addr..addr + buf.len()].copy_from_slice(buf);
471
472 Ok(())
473 }
474
475 fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> {
476 self.validate_slice_op(buf, addr)?;
477
478 let container = self.container.borrow();
479 buf.copy_from_slice(&container[addr..addr + buf.len()]);
480
481 Ok(())
482 }
483
484 fn read_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
485 where
486 F: Read,
487 {
488 unimplemented!()
489 }
490
491 fn read_exact_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
492 where
493 F: Read,
494 {
495 unimplemented!()
496 }
497
498 fn write_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
499 where
500 F: Write,
501 {
502 unimplemented!()
503 }
504
505 fn write_all_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
506 where
507 F: Write,
508 {
509 unimplemented!()
510 }
511
512 fn store<T: AtomicAccess>(
513 &self,
514 _val: T,
515 _addr: usize,
516 _order: Ordering,
517 ) -> Result<(), Self::E> {
518 unimplemented!()
519 }
520
521 fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
522 unimplemented!()
523 }
524 }
525
526 #[test]
527 fn test_bytes() {
528 let bytes = MockBytesContainer::new();
529
530 assert!(bytes.write_obj(u64::MAX, 0).is_ok());
531 assert_eq!(bytes.read_obj::<u64>(0).unwrap(), u64::MAX);
532
533 assert!(bytes
534 .write_obj(u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
535 .is_err());
536 assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
537 }
538
539 #[repr(C)]
540 #[derive(Copy, Clone, Default)]
541 struct S {
542 a: u32,
543 b: u32,
544 }
545
546 unsafe impl ByteValued for S {}
547
548 #[test]
549 fn byte_valued_slice() {
550 let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1];
551 let mut s: S = Default::default();
552 s.as_bytes().copy_from(&a);
553 assert_eq!(s.a, 0);
554 assert_eq!(s.b, 0x0101_0101);
555 }
556}