#![feature(portable_simd)]
#![no_std]
#[cfg(any(test, feature = "std"))]
#[macro_use]
extern crate std;
#[cfg(feature = "std")]
use std::vec::Vec;
#[cfg(not(feature = "std"))]
extern crate alloc;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::fmt::{Binary, Debug, Display, Formatter, Result};
use core::ops::*;
use core::simd::*;
use smallvec::{Array, SmallVec};
#[cfg(test)]
mod tests;
pub type BitVec = BitVecSimd<[u64x4; 4], 4>;
#[derive(Debug, Clone)]
#[repr(C)]
pub struct BitVecSimd<A, const L: usize>
where
A: Array + Index<usize>,
A::Item: BitBlock<L>,
{
storage: SmallVec<A>,
nbits: usize,
}
macro_rules! impl_operation {
($name:ident, $name_cloned:ident, $name_inplace:ident, $op:tt) => {
pub fn $name(self, other: Self) -> Self {
assert_eq!(self.nbits, other.nbits);
let storage = self
.storage
.into_iter()
.zip(other.storage.into_iter())
.map(|(a, b)| a $op b)
.collect();
Self {
storage,
nbits: self.nbits,
}
}
pub fn $name_cloned(&self, other: &Self) -> Self {
assert_eq!(self.nbits, other.nbits);
let storage = self
.storage
.iter()
.cloned()
.zip(other.storage.iter().cloned())
.map(|(a, b)| a $op b)
.collect();
Self {
storage,
nbits: self.nbits,
}
}
pub fn $name_inplace(&mut self, other: &Self) {
assert_eq!(self.nbits, other.nbits);
self.storage.iter_mut().zip(other.storage.iter()).for_each(|(a, b)| a.$name_inplace(b));
}
};
}
impl<A, const L: usize> BitVecSimd<A, L>
where
A: Array + Index<usize>,
A::Item: BitBlock<L>,
{
#[inline]
fn bit_to_len(nbits: usize) -> (usize, usize, usize) {
(
nbits / (A::Item::BIT_WIDTH as usize),
(nbits % (A::Item::BIT_WIDTH as usize)) / A::Item::ELEMENT_BIT_WIDTH,
nbits % A::Item::ELEMENT_BIT_WIDTH,
)
}
#[inline]
fn set_bit(
flag: bool,
bytes: <A::Item as BitBlock<L>>::Item,
offset: u32,
) -> <A::Item as BitBlock<L>>::Item {
match flag {
true => bytes | A::Item::ONE_ELEMENT.wrapping_shl(offset),
false => bytes & !A::Item::ONE_ELEMENT.wrapping_shl(offset),
}
}
pub fn zeros(nbits: usize) -> Self {
let len = (nbits + A::Item::BIT_WIDTH - 1) / A::Item::BIT_WIDTH;
let storage = (0..len).map(|_| A::Item::ZERO).collect();
Self { storage, nbits }
}
pub fn ones(nbits: usize) -> Self {
let (len, bytes, bits) = Self::bit_to_len(nbits);
let mut storage = (0..len).map(|_| A::Item::MAX).collect::<SmallVec<_>>();
if bytes > 0 || bits > 0 {
let mut arr = A::Item::MAX.to_array();
arr[bytes] =
A::Item::MAX_ELEMENT.clear_high_bits((A::Item::ELEMENT_BIT_WIDTH - bits) as u32);
for item in arr.iter_mut().take(A::Item::LANES).skip(bytes + 1) {
*item = A::Item::ZERO_ELEMENT;
}
storage.push(A::Item::from(arr));
}
Self { storage, nbits }
}
pub fn from_bool_iterator<I: Iterator<Item = bool>>(i: I) -> Self {
let mut storage = SmallVec::new();
let mut current_slice = A::Item::ZERO.to_array();
let mut nbits = 0;
for b in i {
if b {
current_slice[nbits % A::Item::BIT_WIDTH / A::Item::ELEMENT_BIT_WIDTH] |=
A::Item::ONE_ELEMENT.wrapping_shl((nbits % A::Item::ELEMENT_BIT_WIDTH) as u32);
}
nbits += 1;
if nbits % A::Item::BIT_WIDTH == 0 {
storage.push(A::Item::from(current_slice));
current_slice = A::Item::ZERO.to_array();
}
}
if nbits % A::Item::BIT_WIDTH > 0 {
storage.push(A::Item::from(current_slice));
}
Self { storage, nbits }
}
pub fn from_slice(slice: &[usize]) -> Self {
let mut bv = BitVecSimd::zeros(slice.len());
for i in slice {
bv.set(*i, true);
}
bv
}
pub fn from_slice_copy(slice: &[<A::Item as BitBlock<L>>::Item], nbits: usize) -> Self {
let len = (nbits + A::Item::ELEMENT_BIT_WIDTH - 1) / A::Item::ELEMENT_BIT_WIDTH;
assert!(len <= slice.len());
let iter = &mut slice.iter();
let mut storage = SmallVec::with_capacity((len + A::Item::LANES - 1) / A::Item::LANES);
let (i, bytes, bits) = Self::bit_to_len(nbits);
while let Some(a0) = iter.next() {
let mut arr = A::Item::ZERO.to_array();
arr[0] = *a0;
for item in arr.iter_mut().take(A::Item::LANES).skip(1) {
*item = *(iter.next().unwrap_or(&A::Item::ZERO_ELEMENT));
}
if storage.len() == i && (bytes > 0 || bits > 0) {
Self::clear_arr_high_bits(&mut arr, bytes, bits);
}
storage.push(A::Item::from(arr));
}
Self { storage, nbits }
}
pub unsafe fn from_raw_copy(
ptr: *const <A::Item as BitBlock<L>>::Item,
buffer_len: usize,
nbits: usize,
) -> Self {
let len = (nbits + A::Item::ELEMENT_BIT_WIDTH - 1) / A::Item::ELEMENT_BIT_WIDTH;
assert!(len <= buffer_len);
let mut storage = SmallVec::with_capacity((len + A::Item::LANES - 1) / A::Item::LANES);
let (i, bytes, bits) = Self::bit_to_len(nbits);
for index in 0..(len as isize) {
let mut arr = A::Item::ZERO.to_array();
for (j, item) in arr.iter_mut().enumerate().take(A::Item::LANES) {
let k = index * A::Item::LANES as isize + j as isize;
*item = if k < len as isize {
*(ptr.offset(k))
} else {
A::Item::ZERO_ELEMENT
};
}
if storage.len() == i && (bytes > 0 || bits > 0) {
Self::clear_arr_high_bits(&mut arr, bytes, bits);
}
storage.push(A::Item::from(arr));
}
Self { storage, nbits }
}
#[inline]
pub fn len(&self) -> usize {
self.nbits
}
#[inline]
pub fn storage_len(&self) -> usize {
self.storage.len()
}
#[inline]
pub fn storage_capacity(&self) -> usize {
self.storage.capacity()
}
pub fn as_ptr(&self) -> *const A::Item {
self.storage.as_ptr()
}
pub fn as_mut_ptr(&mut self) -> *mut A::Item {
self.storage.as_mut_ptr()
}
#[inline]
pub fn spilled(&self) -> bool {
self.storage.spilled()
}
fn clear_arr_high_bits(arr: &mut [<A::Item as BitBlock<L>>::Item], bytes: usize, bits: usize) {
let mut end_bytes = bytes;
if bits > 0 {
arr[end_bytes] =
arr[end_bytes].clear_high_bits((A::Item::ELEMENT_BIT_WIDTH - bits) as u32);
end_bytes += 1;
}
for item in arr.iter_mut().take(A::Item::LANES).skip(end_bytes) {
*item = A::Item::ZERO_ELEMENT;
}
}
fn fill_arr_high_bits(
arr: &mut [<A::Item as BitBlock<L>>::Item],
bytes: usize,
bits: usize,
bytes_max: usize,
) {
let mut end_bytes = bytes;
if bits > 0 {
arr[end_bytes] |= A::Item::MAX_ELEMENT.clear_low_bits(bits as u32);
end_bytes += 1;
}
for item in arr.iter_mut().take(bytes_max).skip(end_bytes) {
*item = A::Item::MAX_ELEMENT;
}
}
fn clear_high_bits(&mut self, i: usize, bytes: usize, bits: usize) {
if bytes > 0 || bits > 0 {
let mut arr = self.storage[i].to_array();
Self::clear_arr_high_bits(&mut arr, bytes, bits);
self.storage[i] = A::Item::from(arr);
}
}
fn fill_high_bits(&mut self, i: usize, bytes: usize, bits: usize, bytes_max: usize) {
if bytes > 0 || bits > 0 {
let mut arr = self.storage[i].to_array();
Self::fill_arr_high_bits(&mut arr, bytes, bits, bytes_max);
self.storage[i] = A::Item::from(arr);
}
}
fn fix_high_bits(
&mut self,
old_i: usize,
old_bytes: usize,
old_bits: usize,
i: usize,
bytes: usize,
bits: usize,
) {
debug_assert!(old_i == i && old_bytes <= bytes && (bytes > 0 || bits > 0));
let mut arr = self.storage[i].to_array();
if old_bytes < bytes {
Self::fill_arr_high_bits(
&mut arr,
old_bytes,
old_bits,
if bits > 0 { bytes + 1 } else { bytes },
);
} else {
debug_assert!(old_bytes == bytes && bits >= old_bits);
if bits > old_bits {
arr[bytes] |= A::Item::MAX_ELEMENT.clear_low_bits(old_bits as u32);
}
}
Self::clear_arr_high_bits(&mut arr, bytes, bits);
self.storage[i] = A::Item::from(arr);
}
pub fn resize(&mut self, nbits: usize, value: bool) {
let (i, bytes, bits) = Self::bit_to_len(nbits);
self.storage.resize(
if bytes > 0 || bits > 0 { i + 1 } else { i },
if value { A::Item::MAX } else { A::Item::ZERO },
);
if nbits < self.nbits {
self.clear_high_bits(i, bytes, bits);
} else if value {
let (old_i, old_bytes, old_bits) = Self::bit_to_len(self.nbits);
if old_i < i {
self.fill_high_bits(old_i, old_bytes, old_bits, A::Item::LANES);
self.clear_high_bits(i, bytes, bits);
} else if bytes > 0 || bits > 0 {
self.fix_high_bits(old_i, old_bytes, old_bits, i, bytes, bits);
}
}
self.nbits = nbits;
}
pub fn shrink_to(&mut self, nbits: usize) {
if nbits >= self.nbits {
panic!(
"nbits {} should be less than current value {}",
nbits, self.nbits
);
}
self.resize(nbits, false);
}
pub fn set(&mut self, index: usize, flag: bool) {
let (i, bytes, bits) = Self::bit_to_len(index);
if self.nbits <= index {
let new_len = if bytes > 0 || bits > 0 { i + 1 } else { i };
self.storage
.extend((0..new_len - self.storage.len()).map(move |_| A::Item::ZERO));
self.nbits = index + 1;
}
let mut arr = self.storage[i].to_array();
arr[bytes] = Self::set_bit(flag, arr[bytes], bits as u32);
self.storage[i] = A::Item::from(arr);
}
pub unsafe fn set_raw_copy(&mut self, ptr: *mut A::Item, buffer_len: usize, nbits: usize) {
let new_len = (nbits + A::Item::BIT_WIDTH - 1) / A::Item::BIT_WIDTH;
assert!(new_len <= buffer_len);
if new_len > self.len() {
self.storage
.extend((0..new_len - self.storage.len()).map(move |_| A::Item::ZERO));
}
for i in 0..(new_len as isize) {
self.storage[i as usize] = *ptr.offset(i);
}
self.nbits = nbits;
}
pub unsafe fn set_raw(
&mut self,
ptr: *mut A::Item,
buffer_len: usize,
capacity: usize,
nbits: usize,
) {
self.storage = SmallVec::from_raw_parts(ptr, buffer_len, capacity);
self.nbits = nbits;
}
pub fn set_all_false(&mut self) {
self.storage
.iter_mut()
.for_each(move |x| *x = A::Item::ZERO);
}
pub fn set_all_true(&mut self) {
let (_, bytes, bits) = Self::bit_to_len(self.nbits);
self.storage.iter_mut().for_each(move |x| *x = A::Item::MAX);
if bytes > 0 || bits > 0 {
let mut arr = A::Item::MAX.to_array();
arr[bytes] =
A::Item::MAX_ELEMENT.clear_high_bits((A::Item::ELEMENT_BIT_WIDTH - bits) as u32);
for item in arr.iter_mut().take(A::Item::LANES).skip(bytes + 1) {
*item = A::Item::ZERO_ELEMENT;
}
*(self.storage.last_mut().unwrap()) = A::Item::from(arr);
}
}
pub fn set_all(&mut self, flag: bool) {
match flag {
true => self.set_all_true(),
false => self.set_all_false(),
}
}
pub fn get(&self, index: usize) -> Option<bool> {
if self.nbits <= index {
None
} else {
let (index, bytes, bits) = Self::bit_to_len(index);
Some(
self.storage[index].to_array()[bytes]
& A::Item::ONE_ELEMENT.wrapping_shl(bits as u32)
!= A::Item::ZERO_ELEMENT,
)
}
}
pub fn get_unchecked(&self, index: usize) -> bool {
if self.nbits <= index {
panic!("index out of bounds {} > {}", index, self.nbits);
} else {
let (index, bytes, bits) = Self::bit_to_len(index);
(self.storage[index].to_array()[bytes] & A::Item::ONE_ELEMENT.wrapping_shl(bits as u32))
!= A::Item::ZERO_ELEMENT
}
}
impl_operation!(and, and_cloned, and_inplace, &);
impl_operation!(or, or_cloned, or_inplace, |);
impl_operation!(xor, xor_cloned, xor_inplace, ^);
pub fn difference(self, other: Self) -> Self {
self.and(other.not())
}
pub fn difference_cloned(&self, other: &Self) -> Self {
self.and_cloned(&<&BitVecSimd<A, L>>::clone(&other).not())
}
pub fn inverse(&self) -> Self {
let (i, bytes, bits) = Self::bit_to_len(self.nbits);
let mut storage = self.storage.iter().map(|x| !(*x)).collect::<SmallVec<_>>();
if bytes > 0 || bits > 0 {
assert_eq!(storage.len(), i + 1);
let s: &mut A::Item = &mut storage[i];
let mut arr = s.to_array();
arr[bytes] = arr[bytes].clear_high_bits((A::Item::ELEMENT_BIT_WIDTH - bits) as u32);
for item in arr.iter_mut().take(A::Item::LANES).skip(bytes + 1) {
*item = A::Item::ZERO_ELEMENT;
}
*s = arr.into();
}
Self {
storage,
nbits: self.nbits,
}
}
pub fn count_ones(&self) -> usize {
self.storage
.iter()
.map(|x| {
x.to_array()
.into_iter()
.map(|a| a.count_ones())
.sum::<u32>()
})
.sum::<u32>() as usize
}
pub fn count_ones_before(&self, index: usize) -> usize {
assert!(index <= self.nbits);
if index == 0 {
return 0;
}
let (i, bytes, bits) = Self::bit_to_len(index - 1);
let mut ones = self
.storage
.iter()
.take(i)
.map(|x| {
x.to_array()
.into_iter()
.map(|a| a.count_ones())
.sum::<u32>()
})
.sum::<u32>();
if bytes > 0 || bits > 0 {
let arr = self.storage.iter().nth(i).unwrap().to_array();
ones += arr
.into_iter()
.take(bytes)
.map(|x| x.count_ones())
.sum::<u32>();
if bits > 0 {
let x = arr.into_iter().nth(bytes).unwrap();
ones += (x
& (A::Item::ONE_ELEMENT.wrapping_shl((bits + 1) as u32)
- A::Item::ONE_ELEMENT))
.count_ones();
}
}
ones as usize
}
pub fn leading_zeros(&self) -> usize {
let mut zero_item_count = 0;
let mut iter = self
.storage
.iter()
.rev()
.skip_while(|x| match **x == A::Item::ZERO {
true => {
zero_item_count += A::Item::LANES;
true
}
false => false,
});
if let Some(x) = iter.next() {
let arr = x.to_array();
let mut x_iter =
arr.into_iter()
.rev()
.skip_while(|y| match *y == A::Item::ZERO_ELEMENT {
true => {
zero_item_count += 1;
true
}
false => false,
});
let y = x_iter.next().unwrap();
let raw_leading_zeros =
zero_item_count * A::Item::ELEMENT_BIT_WIDTH + y.leading_zeros() as usize;
let mut extra_leading_zeros = self.nbits % A::Item::BIT_WIDTH;
if extra_leading_zeros > 0 {
extra_leading_zeros = A::Item::BIT_WIDTH - extra_leading_zeros
}
return raw_leading_zeros as usize - extra_leading_zeros;
}
self.nbits
}
pub fn any(&self) -> bool {
self.storage.iter().any(|x| {
x.to_array()
.into_iter()
.map(|a| a.count_ones())
.sum::<u32>()
> 0
})
}
pub fn all(&self) -> bool {
self.count_ones() == self.nbits
}
pub fn none(&self) -> bool {
!self.any()
}
pub fn is_empty(&self) -> bool {
!self.any()
}
pub fn into_bools(self) -> Vec<bool> {
self.into()
}
pub fn into_usizes(self) -> Vec<usize> {
self.into()
}
}
impl<A, I: Iterator<Item = bool>, const L: usize> From<I> for BitVecSimd<A, L>
where
A: Array + Index<usize>,
A::Item: BitBlock<L>,
{
fn from(i: I) -> Self {
Self::from_bool_iterator(i)
}
}
macro_rules! impl_trait {
(
( $( $name:tt )+ ),
( $( $name1:tt )+ ),
{ $( $body:tt )* }
) =>
{
impl<A, const L: usize> $( $name )+ for $( $name1 )+
where
A: Array + Index<usize>,
A::Item: BitBlock<L>,
{ $( $body )* }
};
}
impl_trait! {
(From< BitVecSimd<A, L> >),
(Vec<bool>),
{
fn from(v: BitVecSimd<A, L>) -> Self {
v.storage
.into_iter()
.flat_map(|x| x.to_array())
.flat_map(|x| {
(0..A::Item::ELEMENT_BIT_WIDTH)
.map(move |i| (x.wrapping_shr(i as u32)) & A::Item::ONE_ELEMENT != A::Item::ZERO_ELEMENT)
})
.take(v.nbits)
.collect()
}
}
}
impl_trait! {
(From< BitVecSimd<A, L> >),
(Vec<usize>),
{
fn from(v: BitVecSimd<A, L>) -> Self {
v.storage
.into_iter()
.flat_map(|x| x.to_array())
.flat_map(|x| { (0..A::Item::ELEMENT_BIT_WIDTH).map(move |i| (x.wrapping_shr(i as u32)) & A::Item::ONE_ELEMENT != A::Item::ZERO_ELEMENT) })
.take(v.nbits)
.enumerate()
.filter(|(_, b)| *b)
.map(|(i, _)| i)
.collect()
}
}
}
impl_trait! {
(Index<usize>),
(BitVecSimd<A, L>),
{
type Output = bool;
fn index(&self, index: usize) -> &Self::Output {
if self.get_unchecked(index) {
&true
} else {
&false
}
}
}
}
impl_trait! {
(Display),
(BitVecSimd<A, L>),
{
fn fmt(&self, f: &mut Formatter) -> Result {
for i in 0..self.nbits {
write!(f, "{}", if self.get_unchecked(i) { 1 } else { 0 })?;
}
Ok(())
}
}
}
macro_rules! impl_eq_fn {
($( $rhs:tt )+) => {
fn eq(&self, other: $( $rhs )+) -> bool {
assert_eq!(self.nbits, other.nbits);
self.storage
.iter()
.zip(other.storage.iter())
.all(|(a, b)| a == b)
}
}
}
impl_trait! { (PartialEq), (BitVecSimd<A, L>), { impl_eq_fn!(&Self); } }
impl_trait! { (PartialEq< &BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_eq_fn!(&&Self); } }
impl_trait! { (PartialEq< &mut BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_eq_fn!(&&mut Self); } }
impl_trait! { (PartialEq< BitVecSimd<A, L> >), (&BitVecSimd<A, L>), { impl_eq_fn!(&BitVecSimd<A, L>); } }
impl_trait! { (PartialEq< BitVecSimd<A, L> >), (&mut BitVecSimd<A, L>), { impl_eq_fn!(&BitVecSimd<A, L>); } }
macro_rules! impl_bit_op_fn {
($fn:ident, $op:ident, ( $( $rhs:tt )+ )) =>
{
type Output = BitVecSimd<A, L>;
fn $fn(self, rhs: $( $rhs )+) -> Self::Output {
self.$op(rhs)
}
};
($fn:ident, $op:ident, &, ( $( $rhs:tt )+ )) =>
{
type Output = BitVecSimd<A, L>;
fn $fn(self, rhs: $( $rhs )+) -> Self::Output {
self.$op(&rhs)
}
}
}
macro_rules! impl_bit_op {
($trait:ident, $fn:ident, $op:ident, $op_cloned:ident) => {
impl_trait! {($trait), (BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op, (Self)); } } impl_trait! {($trait< &BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, (&Self)); } } impl_trait! { ($trait< &mut BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, (&mut Self)); } } impl_trait! { ($trait< BitVecSimd<A, L> >), (&BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, &, (BitVecSimd<A, L>)); } } impl_trait! { ($trait), (&BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, (Self)); } } impl_trait! { ($trait< &mut BitVecSimd<A, L> >), (&BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, (&mut BitVecSimd<A, L>)); } } impl_trait! { ($trait< BitVecSimd<A, L> >), (&mut BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, &, (BitVecSimd<A, L>)); } } impl_trait! { ($trait< &BitVecSimd<A, L> >), (&mut BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, (&BitVecSimd<A, L>)); } } impl_trait! { ($trait), (&mut BitVecSimd<A, L>), { impl_bit_op_fn!($fn, $op_cloned, (Self)); } } };
}
impl_bit_op!(BitAnd, bitand, and, and_cloned);
impl_bit_op!(BitOr, bitor, or, or_cloned);
impl_bit_op!(BitXor, bitxor, xor, xor_cloned);
macro_rules! impl_not_fn {
() => {
type Output = BitVecSimd<A, L>;
fn not(self) -> Self::Output {
self.inverse()
}
};
}
impl_trait! {(Not), (BitVecSimd<A, L>), { impl_not_fn!(); }}
impl_trait! {(Not), (&BitVecSimd<A, L>), { impl_not_fn!(); }}
impl_trait! {(Not), (&mut BitVecSimd<A, L>), { impl_not_fn!(); }}
macro_rules! impl_bit_assign_fn {
(($( $rhs:tt )+), $fn:ident, $fn1:ident, &) => {
fn $fn(&mut self, rhs: $( $rhs )+) {
self.$fn1(&rhs);
}
};
(($( $rhs:tt )+), $fn:ident, $fn1:ident) => {
fn $fn(&mut self, rhs: $( $rhs )+) {
self.$fn1(rhs);
}
}
}
impl_trait! {(BitAndAssign), (BitVecSimd<A, L>), { impl_bit_assign_fn!((Self), bitand_assign, and_inplace, &); } }
impl_trait! {(BitAndAssign< &BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_assign_fn!((&BitVecSimd<A, L>), bitand_assign, and_inplace); } }
impl_trait! {(BitAndAssign< &mut BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_assign_fn!((&mut BitVecSimd<A, L>), bitand_assign, and_inplace); } }
impl_trait! {(BitOrAssign), (BitVecSimd<A, L>), { impl_bit_assign_fn!((Self), bitor_assign, or_inplace, &); } }
impl_trait! {(BitOrAssign< &BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_assign_fn!((&BitVecSimd<A, L>), bitor_assign, or_inplace); } }
impl_trait! {(BitOrAssign< &mut BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_assign_fn!((&mut BitVecSimd<A, L>), bitor_assign, or_inplace); } }
impl_trait! {(BitXorAssign), (BitVecSimd<A, L>), { impl_bit_assign_fn!((Self), bitxor_assign, xor_inplace, &); } }
impl_trait! {(BitXorAssign< &BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_assign_fn!((&BitVecSimd<A, L>), bitxor_assign, xor_inplace); } }
impl_trait! {(BitXorAssign< &mut BitVecSimd<A, L> >), (BitVecSimd<A, L>), { impl_bit_assign_fn!((&mut BitVecSimd<A, L>), bitxor_assign, xor_inplace); } }
pub trait BitBlockItem:
Not<Output = Self>
+ BitAnd<Output = Self>
+ BitOr<Output = Self>
+ BitXor<Output = Self>
+ Shl<u32, Output = Self>
+ Shr<u32, Output = Self>
+ BitAndAssign
+ BitOrAssign
+ Add<Output = Self>
+ Sub<Output = Self>
+ PartialEq
+ Sized
+ Copy
+ Clone
+ Binary
{
const BIT_WIDTH: usize;
const ZERO: Self;
const ONE: Self;
const MAX: Self;
fn count_ones(self) -> u32;
fn leading_zeros(self) -> u32;
fn wrapping_shl(self, rhs: u32) -> Self;
fn wrapping_shr(self, rhs: u32) -> Self;
fn clear_high_bits(self, rhs: u32) -> Self;
fn clear_low_bits(self, rhs: u32) -> Self;
}
macro_rules! impl_bitblock_item {
($type: ty, $zero: expr, $one: expr, $max: expr) => {
impl BitBlockItem for $type {
const BIT_WIDTH: usize = Self::BITS as usize;
const ZERO: Self = $zero;
const ONE: Self = $one;
const MAX: Self = $max;
#[inline]
fn count_ones(self) -> u32 {
Self::count_ones(self)
}
#[inline]
fn leading_zeros(self) -> u32 {
Self::leading_zeros(self)
}
#[inline]
fn wrapping_shl(self, rhs: u32) -> Self {
self.wrapping_shl(rhs)
}
#[inline]
fn wrapping_shr(self, rhs: u32) -> Self {
self.wrapping_shr(rhs)
}
#[inline]
fn clear_high_bits(self, rhs: u32) -> Self {
self.wrapping_shl(rhs).wrapping_shr(rhs)
}
#[inline]
fn clear_low_bits(self, rhs: u32) -> Self {
self.wrapping_shr(rhs).wrapping_shl(rhs)
}
}
};
}
impl_bitblock_item!(u8, 0u8, 1u8, 0xFFu8);
impl_bitblock_item!(u16, 0u16, 1u16, 0xFFFFu16);
impl_bitblock_item!(u32, 0u32, 1u32, 0xFFFFFFFFu32);
impl_bitblock_item!(u64, 0u64, 1u64, 0xFFFFFFFFFFFFFFFFu64);
pub trait BitBlock<const L: usize>:
Not<Output = Self>
+ BitAnd<Output = Self>
+ BitOr<Output = Self>
+ BitXor<Output = Self>
+ Add<Output = Self>
+ Sub<Output = Self>
+ Eq
+ Sized
+ Copy
+ Clone
+ Debug
+ From<[Self::Item; L]>
{
type Item: BitBlockItem;
const BIT_WIDTH: usize;
const ELEMENT_BIT_WIDTH: usize;
const LANES: usize;
const ZERO_ELEMENT: Self::Item;
const ONE_ELEMENT: Self::Item;
const MAX_ELEMENT: Self::Item;
const ZERO: Self;
const MAX: Self;
fn to_array(self) -> [Self::Item; L];
fn and_inplace(&mut self, rhs: &Self);
fn or_inplace(&mut self, rhs: &Self);
fn xor_inplace(&mut self, rhs: &Self);
}
macro_rules! impl_bitblock {
($type: ty, $item_type: ty, $lanes: expr) => {
impl BitBlock<$lanes> for $type {
type Item = $item_type;
const BIT_WIDTH: usize = ($lanes * <$item_type>::BIT_WIDTH) as usize;
const ELEMENT_BIT_WIDTH: usize = <$item_type>::BIT_WIDTH;
const LANES: usize = $lanes;
const ZERO_ELEMENT: $item_type = <$item_type>::ZERO;
const ONE_ELEMENT: $item_type = <$item_type>::ONE;
const MAX_ELEMENT: $item_type = <$item_type>::MAX;
const ZERO: Self = <$type>::splat(0);
const MAX: Self = <$type>::splat(<$item_type>::MAX);
#[inline]
fn to_array(self) -> [$item_type; $lanes] {
<$type>::to_array(self)
}
#[inline]
fn and_inplace(&mut self, rhs: &Self) {
*self &= rhs;
}
#[inline]
fn or_inplace(&mut self, rhs: &Self) {
*self |= rhs;
}
#[inline]
fn xor_inplace(&mut self, rhs: &Self) {
*self ^= rhs;
}
}
};
}
impl_bitblock!(u8x16, u8, 16);
impl_bitblock!(u16x8, u16, 8);
impl_bitblock!(u32x4, u32, 4);
impl_bitblock!(u32x8, u32, 8);
impl_bitblock!(u64x2, u64, 2);
impl_bitblock!(u64x4, u64, 4);
impl_bitblock!(u64x8, u64, 8);