vm_memory/bytes.rs
1// Portions Copyright 2019 Red Hat, Inc.
2//
3// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4//
5// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6// Use of this source code is governed by a BSD-style license that can be
7// found in the LICENSE-BSD-3-Clause file.
8//
9// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10
11//! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
12//! data.
13
14use std::io::{Read, Write};
15use std::mem::{size_of, MaybeUninit};
16use std::result::Result;
17use std::slice::{from_raw_parts, from_raw_parts_mut};
18use std::sync::atomic::Ordering;
19
20use crate::atomic_integer::AtomicInteger;
21use crate::volatile_memory::VolatileSlice;
22use crate::{ReadVolatile, WriteVolatile};
23
24/// Types for which it is safe to initialize from raw data.
25///
26/// # Safety
27///
28/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a
29/// byte array. This is generally true for all plain-old-data structs. It is notably not true for
30/// any type that includes a reference. It is generally also not safe for non-packed structs, as
31/// compiler-inserted padding is considered uninitialized memory, and thus reads/writing it will
32/// cause undefined behavior.
33///
34/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
35pub unsafe trait ByteValued: Copy + Send + Sync {
36 /// Converts a slice of raw data into a reference of `Self`.
37 ///
38 /// The value of `data` is not copied. Instead a reference is made from the given slice. The
39 /// value of `Self` will depend on the representation of the type in memory, and may change in
40 /// an unstable fashion.
41 ///
42 /// This will return `None` if the length of data does not match the size of `Self`, or if the
43 /// data is not aligned for the type of `Self`.
44 fn from_slice(data: &[u8]) -> Option<&Self> {
45 // Early out to avoid an unneeded `align_to` call.
46 if data.len() != size_of::<Self>() {
47 return None;
48 }
49
50 // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
51 // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
52 // ensures that we don't have any unaligned references. This aliases a pointer, but because
53 // the pointer is from a const slice reference, there are no mutable aliases. Finally, the
54 // reference returned can not outlive data because they have equal implicit lifetime
55 // constraints.
56 match unsafe { data.align_to::<Self>() } {
57 ([], [mid], []) => Some(mid),
58 _ => None,
59 }
60 }
61
62 /// Converts a mutable slice of raw data into a mutable reference of `Self`.
63 ///
64 /// Because `Self` is made from a reference to the mutable slice, mutations to the returned
65 /// reference are immediately reflected in `data`. The value of the returned `Self` will depend
66 /// on the representation of the type in memory, and may change in an unstable fashion.
67 ///
68 /// This will return `None` if the length of data does not match the size of `Self`, or if the
69 /// data is not aligned for the type of `Self`.
70 fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
71 // Early out to avoid an unneeded `align_to_mut` call.
72 if data.len() != size_of::<Self>() {
73 return None;
74 }
75
76 // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
77 // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
78 // ensures that we don't have any unaligned references. This aliases a pointer, but because
79 // the pointer is from a mut slice reference, we borrow the passed in mutable reference.
80 // Finally, the reference returned can not outlive data because they have equal implicit
81 // lifetime constraints.
82 match unsafe { data.align_to_mut::<Self>() } {
83 ([], [mid], []) => Some(mid),
84 _ => None,
85 }
86 }
87
88 /// Converts a reference to `self` into a slice of bytes.
89 ///
90 /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`.
91 /// The value of bytes in the returned slice will depend on the representation of the type in
92 /// memory, and may change in an unstable fashion.
93 fn as_slice(&self) -> &[u8] {
94 // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
95 // guarantees it. The lifetime of the returned slice is the same as the passed reference,
96 // so that no dangling pointers will result from this pointer alias.
97 unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
98 }
99
100 /// Converts a mutable reference to `self` into a mutable slice of bytes.
101 ///
102 /// Because the slice is made from a reference to `self`, mutations to the returned slice are
103 /// immediately reflected in `self`. The value of bytes in the returned slice will depend on
104 /// the representation of the type in memory, and may change in an unstable fashion.
105 fn as_mut_slice(&mut self) -> &mut [u8] {
106 // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
107 // guarantees it. The trait also guarantees that any combination of bytes is valid for this
108 // type, so modifying them in the form of a byte slice is valid. The lifetime of the
109 // returned slice is the same as the passed reference, so that no dangling pointers will
110 // result from this pointer alias. Although this does alias a mutable pointer, we do so by
111 // exclusively borrowing the given mutable reference.
112 unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
113 }
114
115 /// Converts a mutable reference to `self` into a `VolatileSlice`. This is
116 /// useful because `VolatileSlice` provides a `Bytes<usize>` implementation.
117 fn as_bytes(&mut self) -> VolatileSlice {
118 VolatileSlice::from(self.as_mut_slice())
119 }
120
121 /// Constructs a `Self` ewhose binary representation is set to all zeroes.
122 fn zeroed() -> Self {
123 // SAFETY: ByteValued objects must be assignable from arbitrary byte
124 // sequences and are mandated to be packed.
125 // Hence, zeroed memory is a fine initialization.
126 unsafe { MaybeUninit::<Self>::zeroed().assume_init() }
127 }
128
129 /// Writes this [`ByteValued`]'s byte representation to the given [`Write`] impl.
130 fn write_all_to<W: Write>(&self, mut writer: W) -> Result<(), std::io::Error> {
131 writer.write_all(self.as_slice())
132 }
133
134 /// Constructs an instance of this [`ByteValued`] by reading from the given [`Read`] impl.
135 fn read_exact_from<R: Read>(mut reader: R) -> Result<Self, std::io::Error> {
136 let mut result = Self::zeroed();
137 reader.read_exact(result.as_mut_slice()).map(|_| result)
138 }
139}
140
141macro_rules! byte_valued_array {
142 ($T:ty, $($N:expr)+) => {
143 $(
144 // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued.
145 // They are just numbers.
146 unsafe impl ByteValued for [$T; $N] {}
147 )+
148 }
149}
150
151macro_rules! byte_valued_type {
152 ($T:ty) => {
153 // SAFETY: Safe as long T is POD.
154 // We are using this macro to generated the implementation for integer types below.
155 unsafe impl ByteValued for $T {}
156 byte_valued_array! {
157 $T,
158 0 1 2 3 4 5 6 7 8 9
159 10 11 12 13 14 15 16 17 18 19
160 20 21 22 23 24 25 26 27 28 29
161 30 31 32
162 }
163 };
164}
165
166byte_valued_type!(u8);
167byte_valued_type!(u16);
168byte_valued_type!(u32);
169byte_valued_type!(u64);
170byte_valued_type!(u128);
171byte_valued_type!(usize);
172byte_valued_type!(i8);
173byte_valued_type!(i16);
174byte_valued_type!(i32);
175byte_valued_type!(i64);
176byte_valued_type!(i128);
177byte_valued_type!(isize);
178
179/// A trait used to identify types which can be accessed atomically by proxy.
180pub trait AtomicAccess:
181 ByteValued
182 // Could not find a more succinct way of stating that `Self` can be converted
183 // into `Self::A::V`, and the other way around.
184 + From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
185 + Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
186{
187 /// The `AtomicInteger` that atomic operations on `Self` are based on.
188 type A: AtomicInteger;
189}
190
191macro_rules! impl_atomic_access {
192 ($T:ty, $A:path) => {
193 impl AtomicAccess for $T {
194 type A = $A;
195 }
196 };
197}
198
199impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
200impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
201impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
202#[cfg(any(
203 target_arch = "x86_64",
204 target_arch = "aarch64",
205 target_arch = "powerpc64",
206 target_arch = "s390x",
207 target_arch = "riscv64"
208))]
209impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
210
211impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
212impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
213impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
214#[cfg(any(
215 target_arch = "x86_64",
216 target_arch = "aarch64",
217 target_arch = "powerpc64",
218 target_arch = "s390x",
219 target_arch = "riscv64"
220))]
221impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
222
223impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
224impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);
225
226/// A container to host a range of bytes and access its content.
227///
228/// Candidates which may implement this trait include:
229/// - anonymous memory areas
230/// - mmapped memory areas
231/// - data files
232/// - a proxy to access memory on remote
233pub trait Bytes<A> {
234 /// Associated error codes
235 type E;
236
237 /// Writes a slice into the container at `addr`.
238 ///
239 /// Returns the number of bytes written. The number of bytes written can
240 /// be less than the length of the slice if there isn't enough room in the
241 /// container.
242 ///
243 /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr`
244 /// is otherwise out of bounds. However, if the container is empty, it will
245 /// return an error (unless the slice is also empty, in which case the above takes precedence).
246 ///
247 /// ```rust
248 /// # use vm_memory::{Bytes, VolatileMemoryError, VolatileSlice};
249 /// # use matches::assert_matches;
250 /// let mut arr = [1, 2, 3, 4, 5];
251 /// let slice = VolatileSlice::from(arr.as_mut_slice());
252 ///
253 /// assert_eq!(slice.write(&[1, 2, 3], 0).unwrap(), 3);
254 /// assert_eq!(slice.write(&[1, 2, 3], 3).unwrap(), 2);
255 /// assert_matches!(
256 /// slice.write(&[1, 2, 3], 5).unwrap_err(),
257 /// VolatileMemoryError::OutOfBounds { addr: 5 }
258 /// );
259 /// assert_eq!(slice.write(&[], 5).unwrap(), 0);
260 /// ```
261 fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
262
263 /// Reads data from the container at `addr` into a slice.
264 ///
265 /// Returns the number of bytes read. The number of bytes read can be less than the length
266 /// of the slice if there isn't enough data within the container.
267 ///
268 /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr`
269 /// is otherwise out of bounds. However, if the container is empty, it will
270 /// return an error (unless the slice is also empty, in which case the above takes precedence).
271 fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
272
273 /// Writes the entire content of a slice into the container at `addr`.
274 ///
275 /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr`
276 /// is otherwise out of bounds.
277 ///
278 /// # Errors
279 ///
280 /// Returns an error if there isn't enough space within the container to write the entire slice.
281 /// Part of the data may have been copied nevertheless.
282 fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
283
284 /// Reads data from the container at `addr` to fill an entire slice.
285 ///
286 /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr`
287 /// is otherwise out of bounds.
288 ///
289 /// # Errors
290 ///
291 /// Returns an error if there isn't enough data within the container to fill the entire slice.
292 /// Part of the data may have been copied nevertheless.
293 fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
294
295 /// Writes an object into the container at `addr`.
296 ///
297 /// # Errors
298 ///
299 /// Returns an error if the object doesn't fit inside the container.
300 fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
301 self.write_slice(val.as_slice(), addr)
302 }
303
304 /// Reads an object from the container at `addr`.
305 ///
306 /// Reading from a volatile area isn't strictly safe as it could change mid-read.
307 /// However, as long as the type T is plain old data and can handle random initialization,
308 /// everything will be OK.
309 ///
310 /// # Errors
311 ///
312 /// Returns an error if there's not enough data inside the container.
313 fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
314 let mut result = T::zeroed();
315 self.read_slice(result.as_mut_slice(), addr).map(|_| result)
316 }
317
318 /// Reads up to `count` bytes from `src` and writes them into the container at `addr`.
319 /// Unlike `VolatileRead::read_volatile`, this function retries on `EINTR` being returned from
320 /// the underlying I/O `read` operation.
321 ///
322 /// Returns the number of bytes written into the container.
323 ///
324 /// # Arguments
325 /// * `addr` - Begin writing at this address.
326 /// * `src` - Copy from `src` into the container.
327 /// * `count` - Copy `count` bytes from `src` into the container.
328 ///
329 /// # Examples
330 ///
331 /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
332 ///
333 /// ```
334 /// # #[cfg(all(feature = "backend-mmap", feature = "rawfd"))]
335 /// # {
336 /// # use vm_memory::{Address, GuestMemory, Bytes, GuestAddress, GuestMemoryMmap};
337 /// # use std::fs::File;
338 /// # use std::path::Path;
339 /// #
340 /// # let start_addr = GuestAddress(0x1000);
341 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
342 /// # .expect("Could not create guest memory");
343 /// # let addr = GuestAddress(0x1010);
344 /// # let mut file = if cfg!(target_family = "unix") {
345 /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
346 /// # file
347 /// # } else {
348 /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
349 /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
350 /// # };
351 ///
352 /// gm.read_volatile_from(addr, &mut file, 128)
353 /// .expect("Could not read from /dev/urandom into guest memory");
354 ///
355 /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
356 /// let rand_val: u32 = gm
357 /// .read_obj(read_addr)
358 /// .expect("Could not read u32 val from /dev/urandom");
359 /// # }
360 /// ```
361 fn read_volatile_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
362 where
363 F: ReadVolatile;
364
365 /// Reads exactly `count` bytes from an object and writes them into the container at `addr`.
366 ///
367 /// # Errors
368 ///
369 /// Returns an error if `count` bytes couldn't have been copied from `src` to the container.
370 /// Part of the data may have been copied nevertheless.
371 ///
372 /// # Arguments
373 /// * `addr` - Begin writing at this address.
374 /// * `src` - Copy from `src` into the container.
375 /// * `count` - Copy exactly `count` bytes from `src` into the container.
376 fn read_exact_volatile_from<F>(
377 &self,
378 addr: A,
379 src: &mut F,
380 count: usize,
381 ) -> Result<(), Self::E>
382 where
383 F: ReadVolatile;
384
385 /// Reads up to `count` bytes from the container at `addr` and writes them into `dst`.
386 /// Unlike `VolatileWrite::write_volatile`, this function retries on `EINTR` being returned by
387 /// the underlying I/O `write` operation.
388 ///
389 /// Returns the number of bytes written into the object.
390 ///
391 /// # Arguments
392 /// * `addr` - Begin reading from this address.
393 /// * `dst` - Copy from the container to `dst`.
394 /// * `count` - Copy `count` bytes from the container to `dst`.
395 fn write_volatile_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
396 where
397 F: WriteVolatile;
398
399 /// Reads exactly `count` bytes from the container at `addr` and writes them into an object.
400 ///
401 /// # Errors
402 ///
403 /// Returns an error if `count` bytes couldn't have been copied from the container to `dst`.
404 /// Part of the data may have been copied nevertheless.
405 ///
406 /// # Arguments
407 /// * `addr` - Begin reading from this address.
408 /// * `dst` - Copy from the container to `dst`.
409 /// * `count` - Copy exactly `count` bytes from the container to `dst`.
410 fn write_all_volatile_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
411 where
412 F: WriteVolatile;
413
414 /// Atomically store a value at the specified address.
415 fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;
416
417 /// Atomically load a value from the specified address.
418 fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
419}
420
421#[cfg(test)]
422pub(crate) mod tests {
423 #![allow(clippy::undocumented_unsafe_blocks)]
424 use super::*;
425
426 use std::cell::RefCell;
427 use std::fmt::Debug;
428 use std::io::ErrorKind;
429 use std::mem::align_of;
430
431 // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be
432 // zero-initialized.
433 pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
434 where
435 A: Copy,
436 B: Bytes<A>,
437 B::E: Debug,
438 {
439 let val = 100u32;
440
441 assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
442 b.store(val, addr, Ordering::Relaxed).unwrap();
443 assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);
444
445 assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
446 assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
447 }
448
449 fn check_byte_valued_type<T>()
450 where
451 T: ByteValued + PartialEq + Debug + Default,
452 {
453 let mut data = [0u8; 48];
454 let pre_len = {
455 let (pre, _, _) = unsafe { data.align_to::<T>() };
456 pre.len()
457 };
458 {
459 let aligned_data = &mut data[pre_len..pre_len + size_of::<T>()];
460 {
461 let mut val: T = Default::default();
462 assert_eq!(T::from_slice(aligned_data), Some(&val));
463 assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val));
464 assert_eq!(val.as_slice(), aligned_data);
465 assert_eq!(val.as_mut_slice(), aligned_data);
466 }
467 }
468 for i in 1..size_of::<T>().min(align_of::<T>()) {
469 let begin = pre_len + i;
470 let end = begin + size_of::<T>();
471 let unaligned_data = &mut data[begin..end];
472 {
473 if align_of::<T>() != 1 {
474 assert_eq!(T::from_slice(unaligned_data), None);
475 assert_eq!(T::from_mut_slice(unaligned_data), None);
476 }
477 }
478 }
479 // Check the early out condition
480 {
481 assert!(T::from_slice(&data).is_none());
482 assert!(T::from_mut_slice(&mut data).is_none());
483 }
484 }
485
486 #[test]
487 fn test_byte_valued() {
488 check_byte_valued_type::<u8>();
489 check_byte_valued_type::<u16>();
490 check_byte_valued_type::<u32>();
491 check_byte_valued_type::<u64>();
492 check_byte_valued_type::<u128>();
493 check_byte_valued_type::<usize>();
494 check_byte_valued_type::<i8>();
495 check_byte_valued_type::<i16>();
496 check_byte_valued_type::<i32>();
497 check_byte_valued_type::<i64>();
498 check_byte_valued_type::<i128>();
499 check_byte_valued_type::<isize>();
500 }
501
502 pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10;
503
504 pub struct MockBytesContainer {
505 container: RefCell<[u8; MOCK_BYTES_CONTAINER_SIZE]>,
506 }
507
508 impl MockBytesContainer {
509 pub fn new() -> Self {
510 MockBytesContainer {
511 container: RefCell::new([0; MOCK_BYTES_CONTAINER_SIZE]),
512 }
513 }
514
515 pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> {
516 if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr {
517 return Err(());
518 }
519
520 Ok(())
521 }
522 }
523
524 impl Bytes<usize> for MockBytesContainer {
525 type E = ();
526
527 fn write(&self, _: &[u8], _: usize) -> Result<usize, Self::E> {
528 unimplemented!()
529 }
530
531 fn read(&self, _: &mut [u8], _: usize) -> Result<usize, Self::E> {
532 unimplemented!()
533 }
534
535 fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> {
536 self.validate_slice_op(buf, addr)?;
537
538 let mut container = self.container.borrow_mut();
539 container[addr..addr + buf.len()].copy_from_slice(buf);
540
541 Ok(())
542 }
543
544 fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> {
545 self.validate_slice_op(buf, addr)?;
546
547 let container = self.container.borrow();
548 buf.copy_from_slice(&container[addr..addr + buf.len()]);
549
550 Ok(())
551 }
552
553 fn read_volatile_from<F>(
554 &self,
555 _addr: usize,
556 _src: &mut F,
557 _count: usize,
558 ) -> Result<usize, Self::E>
559 where
560 F: ReadVolatile,
561 {
562 unimplemented!()
563 }
564
565 fn read_exact_volatile_from<F>(
566 &self,
567 _addr: usize,
568 _src: &mut F,
569 _count: usize,
570 ) -> Result<(), Self::E>
571 where
572 F: ReadVolatile,
573 {
574 unimplemented!()
575 }
576
577 fn write_volatile_to<F>(
578 &self,
579 _addr: usize,
580 _dst: &mut F,
581 _count: usize,
582 ) -> Result<usize, Self::E>
583 where
584 F: WriteVolatile,
585 {
586 unimplemented!()
587 }
588
589 fn write_all_volatile_to<F>(
590 &self,
591 _addr: usize,
592 _dst: &mut F,
593 _count: usize,
594 ) -> Result<(), Self::E>
595 where
596 F: WriteVolatile,
597 {
598 unimplemented!()
599 }
600
601 fn store<T: AtomicAccess>(
602 &self,
603 _val: T,
604 _addr: usize,
605 _order: Ordering,
606 ) -> Result<(), Self::E> {
607 unimplemented!()
608 }
609
610 fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
611 unimplemented!()
612 }
613 }
614
615 #[test]
616 fn test_bytes() {
617 let bytes = MockBytesContainer::new();
618
619 assert!(bytes.write_obj(u64::MAX, 0).is_ok());
620 assert_eq!(bytes.read_obj::<u64>(0).unwrap(), u64::MAX);
621
622 assert!(bytes
623 .write_obj(u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
624 .is_err());
625 assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
626 }
627
628 #[repr(C)]
629 #[derive(Copy, Clone, Default, Debug)]
630 struct S {
631 a: u32,
632 b: u32,
633 }
634
635 unsafe impl ByteValued for S {}
636
637 #[test]
638 fn byte_valued_slice() {
639 let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1];
640 let mut s: S = Default::default();
641 s.as_bytes().copy_from(&a);
642 assert_eq!(s.a, 0);
643 assert_eq!(s.b, 0x0101_0101);
644 }
645
646 #[test]
647 fn test_byte_valued_io() {
648 let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1];
649
650 let result = S::read_exact_from(&a[1..]);
651 assert_eq!(result.unwrap_err().kind(), ErrorKind::UnexpectedEof);
652
653 let s = S::read_exact_from(&a[..]).unwrap();
654 assert_eq!(s.a, 0);
655 assert_eq!(s.b, 0x0101_0101);
656
657 let mut b = Vec::new();
658 s.write_all_to(&mut b).unwrap();
659 assert_eq!(a.as_ref(), b.as_slice());
660
661 let mut b = [0; 7];
662 let result = s.write_all_to(b.as_mut_slice());
663 assert_eq!(result.unwrap_err().kind(), ErrorKind::WriteZero);
664 }
665
666 #[test]
667 fn test_byte_valued_zeroed() {
668 let s = S::zeroed();
669
670 assert!(s.as_slice().iter().all(|&b| b == 0x0));
671 }
672}