use crate::{
access::BitAccess,
index::{
BitIdx,
BitMask,
BitRegister,
},
mem,
order::BitOrder,
};
use core::{
cell::Cell,
fmt::Debug,
};
use radium::Radium;
pub trait BitStore: seal::Sealed + Sized + Debug {
type Mem: BitRegister + Into<Self>;
type Access: BitAccess<Item = Self::Mem>;
type Alias: BitStore + Radium<Item = Self::Mem>;
#[doc(hidden)]
type Threadsafe;
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0];
fn load_value(&self) -> Self::Mem;
fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool
where O: BitOrder {
unsafe { BitMask::new(self.load_value()) }.test(index.select::<O>())
}
#[inline]
fn get_bits(&self, mask: BitMask<Self::Mem>) -> Self::Mem {
self.load_value() & mask.value()
}
}
macro_rules! store {
($($t:ty => $a:ty),+ $(,)?) => { $(
impl BitStore for $t {
type Access = Cell<Self>;
#[cfg(feature = "atomic")]
type Alias = $a;
#[cfg(not(feature = "atomic"))]
type Alias = Cell<Self>;
type Mem = Self;
#[doc(hidden)]
type Threadsafe = Self;
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0] = [(); mem::aligned_to_size::<Self>()];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0] = [(); mem::cmp_layout::<Self::Mem, Self::Alias>()];
#[inline(always)]
fn load_value(&self) -> Self::Mem {
*self
}
}
#[cfg(feature = "atomic")]
impl BitStore for $a {
type Access = Self;
type Alias = Self;
type Mem = $t;
#[doc(hidden)]
type Threadsafe = Self;
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0] = [(); mem::aligned_to_size::<Self>()];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0] = [(); mem::cmp_layout::<Self::Mem, Self::Alias>()];
#[inline(always)]
fn load_value(&self) -> Self::Mem {
Self::load(self, core::sync::atomic::Ordering::Relaxed)
}
}
impl seal::Sealed for $t {}
#[cfg(feature = "atomic")]
impl seal::Sealed for $a {}
)+ };
}
store!(
u8 => radium::types::RadiumU8,
u16 => radium::types::RadiumU16,
u32 => radium::types::RadiumU32,
);
#[cfg(target_pointer_width = "64")]
store!(u64 => radium::types::RadiumU64);
store!(usize => radium::types::RadiumUsize);
impl<R> BitStore for Cell<R>
where
Self: Radium<Item = R>,
R: BitRegister,
{
type Access = Self;
type Alias = Self;
type Mem = R;
#[doc(hidden)]
type Threadsafe = *const Self;
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0] = [];
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0] = [];
#[inline(always)]
fn load_value(&self) -> Self::Mem {
self.get()
}
}
impl<R> seal::Sealed for Cell<R> where R: BitRegister
{
}
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
compile_fail!(concat!(
"This architecture is currently not supported. File an issue at ",
env!("CARGO_PKG_REPOSITORY")
));
mod seal {
#[doc(hidden)]
pub trait Sealed {}
}
#[cfg(test)]
#[cfg(not(tarpaulin_include))]
mod tests {
use crate::prelude::*;
use core::cell::Cell;
use static_assertions::*;
#[test]
fn traits() {
assert_impl_all!(BitSlice<LocalBits, u8>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, u16>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, u32>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, usize>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_impl_all!(BitSlice<LocalBits, u64>: Send, Sync);
#[cfg(feature = "atomic")]
{
assert_impl_all!(BitSlice<LocalBits, <u8 as BitStore>::Alias>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, <u16 as BitStore>::Alias>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, <u32 as BitStore>::Alias>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, <usize as BitStore>::Alias>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_impl_all!(BitSlice<LocalBits, <u64 as BitStore>::Alias>: Send, Sync);
}
#[cfg(not(feature = "atomic"))]
{
assert_not_impl_any!(BitSlice<LocalBits, <u8 as BitStore>::Alias>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, <u16 as BitStore>::Alias>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, <u32 as BitStore>::Alias>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, <usize as BitStore>::Alias>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_not_impl_any!(BitSlice<LocalBits, <u64 as BitStore>::Alias>: Send, Sync);
}
assert_not_impl_any!(BitSlice<LocalBits, Cell<u8>>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, Cell<u16>>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, Cell<u32>>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, Cell<usize>>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_not_impl_any!(BitSlice<LocalBits, Cell<u64>>: Send, Sync);
}
}