#[cfg(feature = "alloc")]
use alloc::borrow::ToOwned;
use core::{
cmp,
convert::TryFrom,
fmt::{
self,
Binary,
Debug,
Display,
Formatter,
LowerHex,
Octal,
UpperHex,
},
hash::{
Hash,
Hasher,
},
hint,
str,
};
use funty::IsNumber;
use tap::pipe::Pipe;
use super::BitSlice;
#[cfg(feature = "alloc")]
use crate::vec::BitVec;
use crate::{
devel as dvl,
domain::Domain,
order::{
BitOrder,
Lsb0,
Msb0,
},
store::BitStore,
view::BitView,
};
impl<O, T> Eq for BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
}
impl<O, T> Ord for BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline]
fn cmp(&self, rhs: &Self) -> cmp::Ordering {
self.partial_cmp(rhs)
.expect("BitSlice has a total ordering")
}
}
impl<O1, O2, T1, T2> PartialEq<BitSlice<O2, T2>> for BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool {
let fallback = || {
if self.len() != rhs.len() {
return false;
}
self.iter()
.by_val()
.zip(rhs.iter().by_val())
.all(|(l, r)| l == r)
};
if dvl::match_types::<O1, T1, O2, T2>() {
if dvl::match_order::<O1, Lsb0>() {
let this: &BitSlice<Lsb0, T1> =
unsafe { &*(self as *const _ as *const _) };
let that: &BitSlice<Lsb0, T1> =
unsafe { &*(rhs as *const _ as *const _) };
this.sp_eq(that)
}
else if dvl::match_order::<O1, Msb0>() {
let this: &BitSlice<Msb0, T1> =
unsafe { &*(self as *const _ as *const _) };
let that: &BitSlice<Msb0, T1> =
unsafe { &*(rhs as *const _ as *const _) };
this.sp_eq(that)
}
else {
fallback()
}
}
else {
fallback()
}
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialEq<BitSlice<O2, T2>> for &BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool {
**self == rhs
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialEq<BitSlice<O2, T2>> for &mut BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn eq(&self, rhs: &BitSlice<O2, T2>) -> bool {
**self == rhs
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialEq<&BitSlice<O2, T2>> for BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn eq(&self, rhs: &&BitSlice<O2, T2>) -> bool {
*self == **rhs
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialEq<&mut BitSlice<O2, T2>> for BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn eq(&self, rhs: &&mut BitSlice<O2, T2>) -> bool {
*self == **rhs
}
}
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, rhs: &BitSlice<O2, T2>) -> Option<cmp::Ordering> {
for (l, r) in self.iter().zip(rhs.iter()) {
match (*l, *r) {
(true, false) => return Some(cmp::Ordering::Greater),
(false, true) => return Some(cmp::Ordering::Less),
_ => continue,
}
}
self.len().partial_cmp(&rhs.len())
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for &BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, rhs: &BitSlice<O2, T2>) -> Option<cmp::Ordering> {
(*self).partial_cmp(rhs)
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialOrd<BitSlice<O2, T2>> for &mut BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, rhs: &BitSlice<O2, T2>) -> Option<cmp::Ordering> {
(**self).partial_cmp(rhs)
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialOrd<&BitSlice<O2, T2>> for BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, rhs: &&BitSlice<O2, T2>) -> Option<cmp::Ordering> {
(*self).partial_cmp(&**rhs)
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialOrd<&mut BitSlice<O2, T2>> for BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, rhs: &&mut BitSlice<O2, T2>) -> Option<cmp::Ordering> {
(*self).partial_cmp(&**rhs)
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialOrd<&mut BitSlice<O2, T2>> for &BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, rhs: &&mut BitSlice<O2, T2>) -> Option<cmp::Ordering> {
(**self).partial_cmp(&**rhs)
}
}
#[cfg(not(tarpaulin_include))]
impl<O1, O2, T1, T2> PartialOrd<&BitSlice<O2, T2>> for &mut BitSlice<O1, T1>
where
O1: BitOrder,
O2: BitOrder,
T1: BitStore,
T2: BitStore,
{
#[inline]
fn partial_cmp(&self, rhs: &&BitSlice<O2, T2>) -> Option<cmp::Ordering> {
(**self).partial_cmp(&**rhs)
}
}
#[cfg(not(tarpaulin_include))]
impl<'a, O, T> TryFrom<&'a [T]> for &'a BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
type Error = &'a [T];
#[inline]
fn try_from(slice: &'a [T]) -> Result<Self, Self::Error> {
BitSlice::from_slice(slice).map_err(|_| slice)
}
}
#[cfg(not(tarpaulin_include))]
impl<'a, O, T> TryFrom<&'a mut [T]> for &'a mut BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
type Error = &'a mut [T];
#[inline]
fn try_from(slice: &'a mut [T]) -> Result<Self, Self::Error> {
let slice_ptr = slice as *mut [T];
BitSlice::from_slice_mut(slice).map_err(|_| unsafe { &mut *slice_ptr })
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> Default for &BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn default() -> Self {
BitSlice::empty()
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> Default for &mut BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn default() -> Self {
BitSlice::empty_mut()
}
}
impl<O, T> Debug for BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
self.as_bitspan().render(fmt, "Slice", None)?;
fmt.write_str(" ")?;
Display::fmt(self, fmt)
}
}
#[cfg(not(tarpaulin_include))]
impl<O, T> Display for BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline(always)]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
Binary::fmt(self, fmt)
}
}
macro_rules! fmt {
($trait:ident, $base:expr, $pfx:expr, $blksz:expr) => {
impl<O, T> $trait for BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
struct Seq<'a>(&'a [u8]);
#[cfg(not(tarpaulin_include))]
impl Debug for Seq<'_> {
#[inline(always)]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
fmt.write_str(unsafe {
str::from_utf8_unchecked(self.0)
})
}
}
let start = if fmt.alternate() { 0 } else { 2 };
let mut dbg = fmt.debug_list();
const D: usize = <usize as IsNumber>::BITS as usize / $blksz;
#[allow(clippy::modulo_one)]
const M: usize = <usize as IsNumber>::BITS as usize % $blksz;
const W: usize = D + (M != 0) as usize;
let mut w: [u8; W + 2] = [b'0'; W + 2];
w[1] = $pfx;
let mut writer = |bits: &BitSlice<O, T::Mem>| {
let mut end = 2;
for chunk in bits.rchunks($blksz).rev() {
let mut val = 0u8;
for bit in chunk {
val <<= 1;
val |= *bit as u8;
}
w[end] = match val {
v @ 0 ..= 9 => b'0' + v,
v @ 10 ..= 16 => $base + (v - 10),
_ => unsafe { hint::unreachable_unchecked() },
};
end += 1;
}
dbg.entry(&Seq(&w[start .. end]));
};
match self.domain() {
Domain::Enclave { head, elem, tail } => {
let tmp = elem.load_value();
let bits = tmp.view_bits::<O>();
unsafe {
bits.get_unchecked(
head.into_inner() as usize
.. tail.into_inner() as usize,
)
}
.pipe(writer);
},
Domain::Region { head, body, tail } => {
if let Some((head, elem)) = head {
let tmp = elem.load_value();
let bits = tmp.view_bits::<O>();
unsafe {
bits.get_unchecked(head.into_inner() as usize ..)
}
.pipe(&mut writer);
}
for elem in body.iter().map(BitStore::load_value) {
elem.view_bits::<O>().pipe(&mut writer);
}
if let Some((elem, tail)) = tail {
let tmp = elem.load_value();
let bits = tmp.view_bits::<O>();
unsafe {
bits.get_unchecked(.. tail.into_inner() as usize)
}
.pipe(&mut writer);
}
},
}
dbg.finish()
}
}
};
}
fmt!(Binary, b'0', b'b', 1);
fmt!(Octal, b'0', b'o', 3);
fmt!(LowerHex, b'a', b'x', 4);
fmt!(UpperHex, b'A', b'x', 4);
#[cfg(not(tarpaulin_include))]
impl<O, T> Hash for BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
#[inline]
fn hash<H>(&self, hasher: &mut H)
where H: Hasher {
for bit in self.as_bitptr_range() {
hasher.write_u8(unsafe { bit.read() } as u8);
}
}
}
unsafe impl<O, T> Send for BitSlice<O, T>
where
O: BitOrder,
T: BitStore + Sync,
{
}
unsafe impl<O, T> Sync for BitSlice<O, T>
where
O: BitOrder,
T: BitStore + Sync,
{
}
#[cfg(feature = "alloc")]
impl<O, T> ToOwned for BitSlice<O, T>
where
O: BitOrder,
T: BitStore,
{
type Owned = BitVec<O, T>;
#[cfg_attr(not(tarpaulin_include), inline(always))]
fn to_owned(&self) -> Self::Owned {
BitVec::from_bitslice(self)
}
}