use core::{
any,
cmp,
convert::{
Infallible,
TryFrom,
TryInto,
},
fmt::{
self,
Debug,
Display,
Formatter,
Pointer,
},
hash::{
Hash,
Hasher,
},
marker::PhantomData,
ptr,
};
use funty::IsNumber;
use wyz::{
comu::Frozen,
fmt::FmtForward,
};
use super::{
Address,
AddressExt,
BitPtrRange,
BitRef,
BitSpan,
BitSpanError,
Const,
MisalignError,
Mut,
Mutability,
NullPtrError,
};
use crate::{
access::BitAccess,
devel as dvl,
index::{
BitIdx,
BitIdxError,
},
order::{
BitOrder,
Lsb0,
},
store::BitStore,
};
#[repr(C, packed)]
pub struct BitPtr<M, O = Lsb0, T = usize>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
addr: Address<M, T>,
head: BitIdx<T::Mem>,
_ord: PhantomData<O>,
}
impl<M, O, T> BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
pub const DANGLING: Self = Self {
addr: Address::DANGLING,
head: BitIdx::ZERO,
_ord: PhantomData,
};
#[cfg_attr(not(tarpaulin_include), inline(always))]
pub(crate) fn get_addr(&self) -> Address<M, T> {
unsafe { ptr::addr_of!(self.addr).read_unaligned() }
}
#[inline]
pub fn try_new<A>(addr: A, head: u8) -> Result<Self, BitPtrError<T>>
where
A: TryInto<Address<M, T>>,
BitPtrError<T>: From<A::Error>,
{
Ok(Self::new(addr.try_into()?, BitIdx::new(head)?))
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn new(addr: Address<M, T>, head: BitIdx<T::Mem>) -> Self {
Self {
addr,
head,
_ord: PhantomData,
}
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn raw_parts(self) -> (Address<M, T>, BitIdx<T::Mem>) {
(self.addr, self.head)
}
#[inline(always)]
#[cfg(feature = "alloc")]
pub(crate) fn head(self) -> BitIdx<T::Mem> {
self.head
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub(crate) fn span(
self,
bits: usize,
) -> Result<BitSpan<M, O, T>, BitSpanError<T>> {
BitSpan::new(self.addr, self.head, bits)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub(crate) unsafe fn span_unchecked(self, bits: usize) -> BitSpan<M, O, T> {
BitSpan::new_unchecked(self.addr, self.head, bits)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub unsafe fn range(self, count: usize) -> BitPtrRange<M, O, T> {
BitPtrRange {
start: self,
end: self.add(count),
}
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub unsafe fn into_bitref<'a>(self) -> BitRef<'a, M, O, T> {
BitRef::from_bitptr(self)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn immut(self) -> BitPtr<Const, O, T> {
let Self { addr, head, .. } = self;
BitPtr {
addr: addr.immut(),
head,
..BitPtr::DANGLING
}
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub unsafe fn assert_mut(self) -> BitPtr<Mut, O, T> {
let Self { addr, head, .. } = self;
BitPtr {
addr: addr.assert_mut(),
head,
..BitPtr::DANGLING
}
}
#[inline]
pub(crate) fn freeze(self) -> BitPtr<Frozen<M>, O, T> {
let Self { addr, head, .. } = self;
BitPtr {
addr: addr.freeze(),
head,
..BitPtr::DANGLING
}
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
#[deprecated = "`BitPtr` is never null"]
pub fn is_null(self) -> bool {
false
}
#[inline]
pub fn cast<U>(self) -> BitPtr<M, O, U>
where U: BitStore {
let (addr, head, _) =
unsafe { self.span_unchecked(1) }.cast::<U>().raw_parts();
BitPtr::new(addr, head)
}
#[inline]
pub unsafe fn as_ref<'a>(self) -> Option<BitRef<'a, Const, O, T>> {
Some(BitRef::from_bitptr(self.immut()))
}
#[inline]
pub unsafe fn offset(self, count: isize) -> Self {
let (elts, head) = self.head.offset(count);
Self::new(self.addr.offset(elts), head)
}
#[inline]
pub fn wrapping_offset(self, count: isize) -> Self {
let (elts, head) = self.head.offset(count);
Self::new(self.addr.wrapping_offset(elts), head)
}
#[inline]
pub unsafe fn offset_from(self, origin: Self) -> isize {
(self.addr
.to_const() as usize)
.wrapping_sub(origin.addr.to_const()as usize)
.wrapping_mul(<u8 as IsNumber>::BITS as usize)
.wrapping_add(self.head.into_inner() as usize)
.wrapping_sub(origin.head.into_inner() as usize) as isize
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub unsafe fn add(self, count: usize) -> Self {
self.offset(count as isize)
}
#[inline]
pub unsafe fn sub(self, count: usize) -> Self {
self.offset((count as isize).wrapping_neg())
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn wrapping_add(self, count: usize) -> Self {
self.wrapping_offset(count as isize)
}
#[inline]
#[cfg(not(tarpaulin_include))]
pub fn wrapping_sub(self, count: usize) -> Self {
self.wrapping_offset((count as isize).wrapping_neg())
}
#[inline]
pub unsafe fn read(self) -> bool {
(&*self.addr.to_const())
.load_value()
.get_bit::<O>(self.head)
}
#[inline]
pub unsafe fn read_volatile(self) -> bool {
self.addr.to_const().read_volatile().get_bit::<O>(self.head)
}
#[inline]
pub unsafe fn copy_to<O2, T2>(self, dest: BitPtr<Mut, O2, T2>, count: usize)
where
O2: BitOrder,
T2: BitStore,
{
if dvl::match_order::<O, O2>() {
let (addr, head) = dest.raw_parts();
let dst = BitPtr::<Mut, O, T2>::new(addr, head);
let src_pair = self.range(count);
let rev = src_pair.contains(&dst);
let iter = src_pair.zip(dest.range(count));
if rev {
for (from, to) in iter.rev() {
to.write(from.read());
}
}
else {
for (from, to) in iter {
to.write(from.read());
}
}
}
else {
self.copy_to_nonoverlapping(dest, count);
}
}
#[inline]
pub unsafe fn copy_to_nonoverlapping<O2, T2>(
self,
dest: BitPtr<Mut, O2, T2>,
count: usize,
) where
O2: BitOrder,
T2: BitStore,
{
for (from, to) in self.range(count).zip(dest.range(count)) {
to.write(from.read());
}
}
#[inline]
pub fn align_offset(self, align: usize) -> usize {
let width = <T::Mem as IsNumber>::BITS as usize;
match (
self.addr.to_const().align_offset(align),
self.head.into_inner() as usize,
) {
(0, 0) => 0,
(0, head) => align * 8 - head,
(usize::MAX, _) => !0,
(elts, head) => elts.wrapping_mul(width).wrapping_sub(head),
}
}
}
impl<O, T> BitPtr<Const, O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline]
pub fn from_ref(elem: &T) -> Self {
Self::new(elem.into(), BitIdx::ZERO)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn from_ptr(elem: *const T) -> Result<Self, BitPtrError<T>> {
Self::try_new(elem, 0)
}
#[inline]
pub fn from_slice(slice: &[T]) -> Self {
Self::new(unsafe { slice.as_ptr().force_wrap() }, BitIdx::ZERO)
}
#[inline]
#[cfg(not(tarpaulin_include))]
pub fn pointer(&self) -> *const T {
self.get_addr().to_const()
}
}
impl<O, T> BitPtr<Mut, O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline]
pub fn from_mut(elem: &mut T) -> Self {
Self::new(elem.into(), BitIdx::ZERO)
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub fn from_mut_ptr(elem: *mut T) -> Result<Self, BitPtrError<T>> {
Self::try_new(elem, 0)
}
#[inline]
pub fn from_mut_slice(slice: &mut [T]) -> Self {
Self::new(unsafe { slice.as_mut_ptr().force_wrap() }, BitIdx::ZERO)
}
#[inline]
#[cfg(not(tarpaulin_include))]
pub fn pointer(&self) -> *mut T {
self.get_addr().to_mut()
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub unsafe fn as_mut<'a>(self) -> Option<BitRef<'a, Mut, O, T>> {
Some(BitRef::from_bitptr(self))
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub unsafe fn copy_from<O2, T2>(
self,
src: BitPtr<Const, O2, T2>,
count: usize,
) where
O2: BitOrder,
T2: BitStore,
{
src.copy_to(self, count);
}
#[inline(always)]
#[cfg(not(tarpaulin_include))]
pub unsafe fn copy_from_nonoverlapping<O2, T2>(
self,
src: BitPtr<Const, O2, T2>,
count: usize,
) where
O2: BitOrder,
T2: BitStore,
{
src.copy_to_nonoverlapping(self, count);
}
#[inline]
#[allow(clippy::clippy::missing_safety_doc)]
pub unsafe fn write(self, value: bool) {
self.replace(value);
}
#[inline]
pub unsafe fn write_volatile(self, val: bool) {
let select = O::select(self.head).into_inner();
let ptr = self.addr.cast::<T::Mem>().to_mut();
let mut tmp = ptr.read_volatile();
if val {
tmp |= &select;
}
else {
tmp &= &!select;
}
ptr.write_volatile(tmp);
}
#[inline]
pub unsafe fn replace(self, src: bool) -> bool {
self.freeze().frozen_write_bit(src)
}
#[inline]
pub unsafe fn swap<O2, T2>(self, with: BitPtr<Mut, O2, T2>)
where
O2: BitOrder,
T2: BitStore,
{
self.write(with.replace(self.read()));
}
}
impl<M, O, T> BitPtr<Frozen<M>, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
pub(crate) unsafe fn frozen_write_bit(self, value: bool) -> bool {
(&*self.addr.cast::<T::Access>().to_const())
.write_bit::<O>(self.head, value)
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Clone for BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn clone(&self) -> Self {
Self {
addr: self.get_addr(),
..*self
}
}
}
impl<M, O, T> Eq for BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Ord for BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.partial_cmp(other).expect(
"BitPtr has a total ordering when type parameters are identical",
)
}
}
#[cfg(not(tarpaulin_include))]
impl<M1, M2, O, T1, T2> PartialEq<BitPtr<M2, O, T2>> for BitPtr<M1, O, T1>
where
M1: Mutability,
M2: Mutability,
O: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn eq(&self, other: &BitPtr<M2, O, T2>) -> bool {
if !dvl::match_store::<T1::Mem, T2::Mem>() {
return false;
}
self.get_addr().to_const() as usize
== other.get_addr().to_const() as usize
&& self.head.into_inner() == other.head.into_inner()
}
}
#[cfg(not(tarpaulin_include))]
impl<M1, M2, O, T1, T2> PartialOrd<BitPtr<M2, O, T2>> for BitPtr<M1, O, T1>
where
M1: Mutability,
M2: Mutability,
O: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, other: &BitPtr<M2, O, T2>) -> Option<cmp::Ordering> {
if !dvl::match_store::<T1::Mem, T2::Mem>() {
return None;
}
match (self.get_addr().to_const() as usize)
.cmp(&(other.get_addr().to_const() as usize))
{
cmp::Ordering::Equal => {
self.head.into_inner().partial_cmp(&other.head.into_inner())
},
ord => Some(ord),
}
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> From<&T> for BitPtr<Const, O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline]
fn from(elem: &T) -> Self {
Self::new(elem.into(), BitIdx::ZERO)
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> From<&mut T> for BitPtr<Mut, O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline]
fn from(elem: &mut T) -> Self {
Self::new(elem.into(), BitIdx::ZERO)
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> TryFrom<*const T> for BitPtr<Const, O, T>
where
O: BitOrder,
T: BitStore,
{
type Error = BitPtrError<T>;
#[inline(always)]
fn try_from(elem: *const T) -> Result<Self, Self::Error> {
Self::try_new(elem, 0)
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> TryFrom<*mut T> for BitPtr<Mut, O, T>
where
O: BitOrder,
T: BitStore,
{
type Error = BitPtrError<T>;
#[inline(always)]
fn try_from(elem: *mut T) -> Result<Self, Self::Error> {
Self::try_new(elem, 0)
}
}
impl<M, O, T> Debug for BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
write!(
fmt,
"{} Bit<{}, {}>",
M::RENDER,
any::type_name::<O>(),
any::type_name::<T>(),
)?;
Pointer::fmt(self, fmt)
}
}
impl<M, O, T> Pointer for BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
fmt.debug_tuple("")
.field(&self.get_addr().fmt_pointer())
.field(&self.head.fmt_binary())
.finish()
}
}
#[cfg(not(tarpaulin_include))]
impl<M, O, T> Hash for BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
#[inline]
fn hash<H>(&self, state: &mut H)
where H: Hasher {
self.get_addr().hash(state);
self.head.hash(state);
}
}
impl<M, O, T> Copy for BitPtr<M, O, T>
where
M: Mutability,
O: BitOrder,
T: BitStore,
{
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum BitPtrError<T>
where T: BitStore
{
Null(NullPtrError),
Misaligned(MisalignError<T>),
BadIndex(BitIdxError<T::Mem>),
}
#[cfg(not(tarpaulin_include))]
impl<T> From<NullPtrError> for BitPtrError<T>
where T: BitStore
{
#[inline(always)]
fn from(err: NullPtrError) -> Self {
Self::Null(err)
}
}
#[cfg(not(tarpaulin_include))]
impl<T> From<MisalignError<T>> for BitPtrError<T>
where T: BitStore
{
#[inline(always)]
fn from(err: MisalignError<T>) -> Self {
Self::Misaligned(err)
}
}
#[cfg(not(tarpaulin_include))]
impl<T> From<BitIdxError<T::Mem>> for BitPtrError<T>
where T: BitStore
{
#[inline(always)]
fn from(err: BitIdxError<T::Mem>) -> Self {
Self::BadIndex(err)
}
}
#[cfg(not(tarpaulin_include))]
impl<T> From<Infallible> for BitPtrError<T>
where T: BitStore
{
#[inline(always)]
fn from(_: Infallible) -> Self {
unreachable!("Infallible errors can never be produced");
}
}
#[cfg(not(tarpaulin_include))]
impl<T> Display for BitPtrError<T>
where T: BitStore
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
match self {
Self::Null(err) => Display::fmt(err, fmt),
Self::Misaligned(err) => Display::fmt(err, fmt),
Self::BadIndex(err) => Display::fmt(err, fmt),
}
}
}
#[cfg(feature = "std")]
impl<T> std::error::Error for BitPtrError<T> where T: BitStore
{
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
prelude::Lsb0,
ptr::Const,
};
#[test]
fn ctor() {
let data = 0u16;
let head = 5;
let bitptr = BitPtr::<Const, Lsb0, _>::try_new(&data, head).unwrap();
let (addr, indx) = bitptr.raw_parts();
assert_eq!(addr.to_const(), &data as *const _);
assert_eq!(indx.into_inner(), head);
}
#[test]
fn bitref() {
let data = 1u32 << 23;
let head = 23;
let bitptr = BitPtr::<Const, Lsb0, _>::try_new(&data, head).unwrap();
let bitref = unsafe { bitptr.as_ref() }.unwrap();
assert!(*bitref);
}
#[test]
fn assert_size() {
assert!(
core::mem::size_of::<BitPtr<Const, Lsb0, u8>>()
<= core::mem::size_of::<usize>() + core::mem::size_of::<u8>(),
);
}
#[test]
#[cfg(feature = "alloc")]
fn format() {
#[cfg(not(feature = "std"))]
use alloc::format;
use crate::order::Msb0;
let base = 0u16;
let bitptr = BitPtr::<_, Msb0, _>::from_ref(&base);
let text = format!("{:?}", unsafe { bitptr.add(3) });
let render = format!(
"*const Bit<bitvec::order::Msb0, u16>({:p}, {:04b})",
&base as *const u16, 3
);
assert_eq!(text, render);
}
}