use core::sync::atomic;
use radium::Radium;
use crate::{
index::{
BitIdx,
BitMask,
},
mem::BitRegister,
order::BitOrder,
};
pub trait BitAccess: Radium
where <Self as Radium>::Item: BitRegister
{
fn clear_bits(&self, mask: BitMask<Self::Item>) {
self.fetch_and(!mask.value(), atomic::Ordering::Relaxed);
}
fn set_bits(&self, mask: BitMask<Self::Item>) {
self.fetch_or(mask.value(), atomic::Ordering::Relaxed);
}
fn invert_bits(&self, mask: BitMask<Self::Item>) {
self.fetch_xor(mask.value(), atomic::Ordering::Relaxed);
}
fn write_bit<O>(&self, index: BitIdx<Self::Item>, value: bool)
where O: BitOrder {
if value {
self.fetch_or(
index.select::<O>().value(),
atomic::Ordering::Relaxed,
);
}
else {
self.fetch_and(
!index.select::<O>().value(),
atomic::Ordering::Relaxed,
);
}
}
fn get_writers(value: bool) -> for<'a> fn(&'a Self, BitMask<Self::Item>) {
if value {
Self::set_bits
}
else {
Self::clear_bits
}
}
}
impl<A> BitAccess for A
where
A: Radium,
A::Item: BitRegister,
{
}
pub trait BitSafe {
type Mem: BitRegister;
type Rad: Radium<Item = Self::Mem>;
fn load(&self) -> Self::Mem;
fn store(&mut self, value: Self::Mem);
}
macro_rules! safe {
($($t:ident => $w:ident => $r:path),+ $(,)?) => { $(
#[derive(Debug)]
#[repr(transparent)]
pub struct $w {
inner: <Self as BitSafe>::Rad,
}
impl BitSafe for $w {
type Mem = $t;
#[cfg(feature = "atomic")]
type Rad = $r;
#[cfg(not(feature = "atomic"))]
type Rad = core::cell::Cell<$t>;
fn load(&self) -> $t {
radium::Radium::load(
&self.inner,
core::sync::atomic::Ordering::Relaxed,
)
}
fn store(&mut self, value: $t) {
radium::Radium::store(
&self.inner,
value,
core::sync::atomic::Ordering::Relaxed,
)
}
}
)+ };
}
safe! {
u8 => BitSafeU8 => radium::types::RadiumU8,
u16 => BitSafeU16 => radium::types::RadiumU16,
u32 => BitSafeU32 => radium::types::RadiumU32,
}
#[cfg(target_pointer_width = "64")]
safe!(u64 => BitSafeU64 => radium::types::RadiumU64);
safe!(usize => BitSafeUsize => radium::types::RadiumUsize);
#[cfg(test)]
mod tests {
use super::*;
use crate::prelude::*;
#[test]
fn touch_memory() {
let mut data = 0u8;
let bits = data.view_bits_mut::<LocalBits>();
let accessor = unsafe { &*(bits.as_bitspan().address().to_access()) };
let aliased = unsafe {
&*(bits.as_bitspan().address().to_const()
as *const <u8 as BitStore>::Alias)
};
BitAccess::set_bits(accessor, BitMask::ALL);
assert_eq!(accessor.get(), !0);
BitAccess::clear_bits(accessor, BitMask::ALL);
assert_eq!(accessor.get(), 0);
BitAccess::invert_bits(accessor, BitMask::ALL);
assert_eq!(accessor.get(), !0);
assert!(BitStore::get_bit::<Lsb0>(aliased, BitIdx::ZERO));
assert_eq!(accessor.get(), !0);
BitAccess::write_bit::<Lsb0>(accessor, BitIdx::new(1).unwrap(), false);
assert_eq!(accessor.get(), !2);
}
#[test]
#[cfg(not(miri))]
fn sanity_check_prefetch() {
use core::cell::Cell;
assert_eq!(
<Cell<u8> as BitAccess>::get_writers(false) as *const (),
<Cell<u8> as BitAccess>::clear_bits as *const ()
);
assert_eq!(
<Cell<u8> as BitAccess>::get_writers(true) as *const (),
<Cell<u8> as BitAccess>::set_bits as *const ()
);
}
#[test]
fn safe_wrappers() {
use super::BitSafe;
let bits = bits![mut Msb0, u8; 0; 24];
let (l, c): (&mut BitSlice<Msb0, BitSafeU8>, _) = bits.split_at_mut(4);
let (c, _): (&mut BitSlice<Msb0, BitSafeU8>, _) = c.split_at_mut(16);
let l_redge: &<BitSafeU8 as BitSafe>::Rad =
l.domain_mut().region().unwrap().2.unwrap().0;
let c_ledge: &BitSafeU8 = c.domain().region().unwrap().0.unwrap().1;
assert_eq!(
l_redge as *const _ as *const u8,
c_ledge as *const _ as *const u8,
);
assert_eq!(c_ledge.load(), 0);
l_redge.set_bits(BitMask::new(6));
assert_eq!(c_ledge.load(), 6);
}
}