use core::{
cell::Cell,
fmt::Debug,
};
use tap::pipe::Pipe;
use crate::{
access::*,
index::{
BitIdx,
BitMask,
},
mem::{
self,
BitRegister,
},
order::BitOrder,
};
pub trait BitStore: 'static + seal::Sealed + Debug {
type Mem: BitRegister + BitStore<Mem = Self::Mem>;
type Access: BitAccess<Item = Self::Mem> + BitStore<Mem = Self::Mem>;
type Alias: BitStore<Mem = Self::Mem>;
type Unalias: BitStore<Mem = Self::Mem>;
fn load_value(&self) -> Self::Mem;
fn store_value(&mut self, value: Self::Mem);
fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool
where O: BitOrder {
self.load_value()
.pipe(BitMask::new)
.test(index.select::<O>())
}
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0];
}
macro_rules! store {
( $($base:ty => $safe:ty),+ $(,)? ) => { $(
impl BitStore for $base {
type Mem = Self;
type Access = Cell<$base>;
type Alias = $safe;
type Unalias = Self;
fn load_value(&self) -> Self::Mem {
*self
}
fn store_value(&mut self, value: Self::Mem) {
*self = value;
}
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0]
= [(); mem::aligned_to_size::<Self>()];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0]
= [(); mem::cmp_layout::<Self, Self::Alias>()];
}
impl BitStore for $safe {
type Mem = $base;
type Access = <Self as BitSafe>::Rad;
type Alias = Self;
type Unalias = $base;
#[inline(always)]
fn load_value(&self) -> Self::Mem {
self.load()
}
#[inline(always)]
fn store_value(&mut self, value: Self::Mem) {
self.store(value);
}
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0]
= [(); mem::aligned_to_size::<Self>()];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0]
= [(); mem::cmp_layout::<Self, Self::Unalias>()];
}
impl BitStore for Cell<$base> {
type Mem = $base;
type Access = Self;
type Alias = Self;
type Unalias = Self;
#[inline(always)]
fn load_value(&self) -> Self::Mem {
self.get()
}
#[inline(always)]
fn store_value(&mut self, value: Self::Mem) {
self.set(value);
}
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0]
= [(); mem::aligned_to_size::<Self>()];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0] = [];
}
impl seal::Sealed for $base {}
impl seal::Sealed for $safe {}
impl seal::Sealed for Cell<$base> {}
)+ };
}
store! {
u8 => BitSafeU8,
u16 => BitSafeU16,
u32 => BitSafeU32,
}
#[cfg(target_pointer_width = "64")]
store!(u64 => BitSafeU64);
store!(usize => BitSafeUsize);
macro_rules! atomic_store {
($($w:tt , $base:ty => $atom:ident);+ $(;)?) => { $(
radium::if_atomic!(if atomic($w) {
use core::sync::atomic::$atom;
impl BitStore for $atom {
type Mem = $base;
type Access = Self;
type Alias = Self;
type Unalias = Self;
fn load_value(&self) -> Self::Mem {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn store_value(&mut self, value: Self::Mem) {
self.store(value, core::sync::atomic::Ordering::Relaxed);
}
#[doc(hidden)]
const __ALIGNED_TO_SIZE: [(); 0]
= [(); mem::aligned_to_size::<Self>()];
#[doc(hidden)]
const __ALIAS_WIDTH: [(); 0] = [];
}
impl seal::Sealed for $atom {}
});
)+ };
}
atomic_store! {
8, u8 => AtomicU8;
16, u16 => AtomicU16;
32, u32 => AtomicU32;
}
#[cfg(target_pointer_width = "64")]
atomic_store!(64, u64 => AtomicU64);
atomic_store!(size, usize => AtomicUsize);
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
compile_fail!(concat!(
"This architecture is currently not supported. File an issue at ",
env!("CARGO_PKG_REPOSITORY")
));
mod seal {
#[doc(hidden)]
pub trait Sealed {}
}
#[cfg(test)]
mod tests {
use core::cell::Cell;
use static_assertions::*;
use super::*;
use crate::prelude::*;
#[test]
fn load_store() {
let mut word = 0usize;
word.store_value(39usize);
assert_eq!(word.load_value(), 39usize);
let safe: &mut BitSafeUsize =
unsafe { &mut *(&mut word as *mut _ as *mut _) };
safe.store_value(57usize);
assert_eq!(safe.load_value(), 57);
let mut cell = Cell::new(0usize);
cell.store_value(39);
assert_eq!(cell.load_value(), 39);
radium::if_atomic!(if atomic(size) {
let mut atom = AtomicUsize::new(0);
atom.store_value(39);
assert_eq!(atom.load_value(), 39usize);
});
}
#[test]
fn unaliased_send_sync() {
assert_impl_all!(BitSlice<LocalBits, u8>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, u16>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, u32>: Send, Sync);
assert_impl_all!(BitSlice<LocalBits, usize>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_impl_all!(BitSlice<LocalBits, u64>: Send, Sync);
}
#[test]
fn cell_unsend_unsync() {
assert_not_impl_any!(BitSlice<LocalBits, Cell<u8>>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, Cell<u16>>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, Cell<u32>>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, Cell<usize>>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_not_impl_any!(BitSlice<LocalBits, Cell<u64>>: Send, Sync);
}
#[test]
#[cfg(not(feature = "atomic"))]
fn aliased_nonatomic_unsend_unsync() {
use crate::access::*;
assert_not_impl_any!(BitSlice<LocalBits, BitSafeU8>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, BitSafeU16>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, BitSafeU32>: Send, Sync);
assert_not_impl_any!(BitSlice<LocalBits, BitSafeUsize>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_not_impl_any!(BitSlice<LocalBits, BitSafeU64>: Send, Sync);
}
}