use std::io::{Read, Write};
use std::mem::{size_of, MaybeUninit};
use std::result::Result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::sync::atomic::Ordering;
use crate::atomic_integer::AtomicInteger;
use crate::volatile_memory::VolatileSlice;
pub unsafe trait ByteValued: Copy + Send + Sync {
fn from_slice(data: &[u8]) -> Option<&Self> {
if data.len() != size_of::<Self>() {
return None;
}
match unsafe { data.align_to::<Self>() } {
([], [mid], []) => Some(mid),
_ => None,
}
}
fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
if data.len() != size_of::<Self>() {
return None;
}
match unsafe { data.align_to_mut::<Self>() } {
([], [mid], []) => Some(mid),
_ => None,
}
}
fn as_slice(&self) -> &[u8] {
unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
}
fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
}
fn as_bytes(&mut self) -> VolatileSlice {
unsafe { VolatileSlice::new(self as *mut Self as *mut _, size_of::<Self>()) }
}
}
macro_rules! byte_valued_array {
($T:ty, $($N:expr)+) => {
$(
unsafe impl ByteValued for [$T; $N] {}
)+
}
}
macro_rules! byte_valued_type {
($T:ty) => {
unsafe impl ByteValued for $T {}
byte_valued_array! {
$T,
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
};
}
byte_valued_type!(u8);
byte_valued_type!(u16);
byte_valued_type!(u32);
byte_valued_type!(u64);
byte_valued_type!(u128);
byte_valued_type!(usize);
byte_valued_type!(i8);
byte_valued_type!(i16);
byte_valued_type!(i32);
byte_valued_type!(i64);
byte_valued_type!(i128);
byte_valued_type!(isize);
pub trait AtomicAccess:
ByteValued
+ From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
+ Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
{
type A: AtomicInteger;
}
macro_rules! impl_atomic_access {
($T:ty, $A:path) => {
impl AtomicAccess for $T {
type A = $A;
}
};
}
impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv64"
))]
impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv64"
))]
impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);
pub trait Bytes<A> {
type E;
fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
self.write_slice(val.as_slice(), addr)
}
fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
let mut result: T = unsafe { MaybeUninit::<T>::zeroed().assume_init() };
self.read_slice(result.as_mut_slice(), addr).map(|_| result)
}
#[deprecated(
note = "Use `.read_volatile_from` or the functions of the `ReadVolatile` trait instead"
)]
fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Read;
#[deprecated(
note = "Use `.read_exact_volatile_from` or the functions of the `ReadVolatile` trait instead"
)]
fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
where
F: Read;
#[deprecated(
note = "Use `.write_volatile_to` or the functions of the `WriteVolatile` trait instead"
)]
fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Write;
#[deprecated(
note = "Use `.write_all_volatile_to` or the functions of the `WriteVolatile` trait instead"
)]
fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
where
F: Write;
fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;
fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
}
#[cfg(test)]
pub(crate) mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
use std::cell::RefCell;
use std::fmt::Debug;
use std::mem::align_of;
pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
where
A: Copy,
B: Bytes<A>,
B::E: Debug,
{
let val = 100u32;
assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
b.store(val, addr, Ordering::Relaxed).unwrap();
assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);
assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
}
fn check_byte_valued_type<T>()
where
T: ByteValued + PartialEq + Debug + Default,
{
let mut data = [0u8; 48];
let pre_len = {
let (pre, _, _) = unsafe { data.align_to::<T>() };
pre.len()
};
{
let aligned_data = &mut data[pre_len..pre_len + size_of::<T>()];
{
let mut val: T = Default::default();
assert_eq!(T::from_slice(aligned_data), Some(&val));
assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val));
assert_eq!(val.as_slice(), aligned_data);
assert_eq!(val.as_mut_slice(), aligned_data);
}
}
for i in 1..size_of::<T>().min(align_of::<T>()) {
let begin = pre_len + i;
let end = begin + size_of::<T>();
let unaligned_data = &mut data[begin..end];
{
if align_of::<T>() != 1 {
assert_eq!(T::from_slice(unaligned_data), None);
assert_eq!(T::from_mut_slice(unaligned_data), None);
}
}
}
{
assert!(T::from_slice(&data).is_none());
assert!(T::from_mut_slice(&mut data).is_none());
}
}
#[test]
fn test_byte_valued() {
check_byte_valued_type::<u8>();
check_byte_valued_type::<u16>();
check_byte_valued_type::<u32>();
check_byte_valued_type::<u64>();
check_byte_valued_type::<u128>();
check_byte_valued_type::<usize>();
check_byte_valued_type::<i8>();
check_byte_valued_type::<i16>();
check_byte_valued_type::<i32>();
check_byte_valued_type::<i64>();
check_byte_valued_type::<i128>();
check_byte_valued_type::<isize>();
}
pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10;
pub struct MockBytesContainer {
container: RefCell<[u8; MOCK_BYTES_CONTAINER_SIZE]>,
}
impl MockBytesContainer {
pub fn new() -> Self {
MockBytesContainer {
container: RefCell::new([0; MOCK_BYTES_CONTAINER_SIZE]),
}
}
pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> {
if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr {
return Err(());
}
Ok(())
}
}
impl Bytes<usize> for MockBytesContainer {
type E = ();
fn write(&self, _: &[u8], _: usize) -> Result<usize, Self::E> {
unimplemented!()
}
fn read(&self, _: &mut [u8], _: usize) -> Result<usize, Self::E> {
unimplemented!()
}
fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> {
self.validate_slice_op(buf, addr)?;
let mut container = self.container.borrow_mut();
container[addr..addr + buf.len()].copy_from_slice(buf);
Ok(())
}
fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> {
self.validate_slice_op(buf, addr)?;
let container = self.container.borrow();
buf.copy_from_slice(&container[addr..addr + buf.len()]);
Ok(())
}
fn read_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
where
F: Read,
{
unimplemented!()
}
fn read_exact_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
where
F: Read,
{
unimplemented!()
}
fn write_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
where
F: Write,
{
unimplemented!()
}
fn write_all_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
where
F: Write,
{
unimplemented!()
}
fn store<T: AtomicAccess>(
&self,
_val: T,
_addr: usize,
_order: Ordering,
) -> Result<(), Self::E> {
unimplemented!()
}
fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
unimplemented!()
}
}
#[test]
fn test_bytes() {
let bytes = MockBytesContainer::new();
assert!(bytes.write_obj(u64::MAX, 0).is_ok());
assert_eq!(bytes.read_obj::<u64>(0).unwrap(), u64::MAX);
assert!(bytes
.write_obj(u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
.is_err());
assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
}
#[repr(C)]
#[derive(Copy, Clone, Default)]
struct S {
a: u32,
b: u32,
}
unsafe impl ByteValued for S {}
#[test]
fn byte_valued_slice() {
let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1];
let mut s: S = Default::default();
s.as_bytes().copy_from(&a);
assert_eq!(s.a, 0);
assert_eq!(s.b, 0x0101_0101);
}
}