pub trait Vector: Copy + core::fmt::Debug {
const BYTES: usize;
const ALIGN: usize;
type Mask: MoveMask;
fn splat(byte: u8) -> Self;
unsafe fn load_aligned(data: *const u8) -> Self;
unsafe fn load_unaligned(data: *const u8) -> Self;
fn movemask(self) -> Self::Mask;
fn cmpeq(self, vector2: Self) -> Self;
fn or(self, vector2: Self) -> Self;
fn add(self, vector2: Self) -> Self;
fn gt(self, vector2: Self) -> Self;
#[inline(always)]
fn movemask_will_have_non_zero(self) -> bool {
self.movemask().has_non_zero()
}
}
pub trait MoveMask: Copy + core::fmt::Debug {
fn has_non_zero(self) -> bool;
fn shr(self, rhs: u32) -> Self;
fn clear_least_significant_bit(self) -> Self;
fn first_offset(self) -> usize;
}
#[cfg(any(
target_arch = "x86_64",
all(target_arch = "wasm32", target_feature = "simd128")
))]
#[derive(Clone, Copy)]
pub struct SensibleMoveMask(u32);
#[cfg(any(
target_arch = "x86_64",
all(target_arch = "wasm32", target_feature = "simd128")
))]
impl core::fmt::Debug for SensibleMoveMask {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{:b}", self.0)
}
}
#[cfg(any(
target_arch = "x86_64",
all(target_arch = "wasm32", target_feature = "simd128")
))]
impl SensibleMoveMask {
#[inline(always)]
fn get_for_offset(self) -> u32 {
#[cfg(target_endian = "big")]
{
self.0.swap_bytes()
}
#[cfg(target_endian = "little")]
{
self.0
}
}
}
#[cfg(any(
target_arch = "x86_64",
all(target_arch = "wasm32", target_feature = "simd128")
))]
impl MoveMask for SensibleMoveMask {
#[inline(always)]
fn has_non_zero(self) -> bool {
self.0 != 0
}
#[inline(always)]
fn clear_least_significant_bit(self) -> SensibleMoveMask {
SensibleMoveMask(self.0 & (self.0 - 1))
}
#[inline(always)]
fn first_offset(self) -> usize {
self.get_for_offset().trailing_zeros() as usize
}
fn shr(self, rhs: u32) -> Self {
SensibleMoveMask(self.0.wrapping_shr(rhs))
}
}
impl Vector for () {
const BYTES: usize = 0;
const ALIGN: usize = 0;
type Mask = ();
#[inline(always)]
fn splat(_byte: u8) -> Self {
unreachable!()
}
#[inline(always)]
unsafe fn load_aligned(_data: *const u8) -> Self {
unreachable!()
}
#[inline(always)]
unsafe fn load_unaligned(_data: *const u8) -> Self {
unreachable!()
}
#[inline(always)]
fn movemask(self) -> Self::Mask {
unreachable!()
}
#[inline(always)]
fn cmpeq(self, _vector2: Self) -> Self {
unreachable!()
}
#[inline(always)]
fn or(self, _vector2: Self) -> Self {
unreachable!()
}
#[inline(always)]
fn add(self, _vector2: Self) -> Self {
unreachable!()
}
#[inline(always)]
fn gt(self, _vector2: Self) -> Self {
unreachable!()
}
}
impl MoveMask for () {
#[inline(always)]
fn has_non_zero(self) -> bool {
unreachable!()
}
#[inline(always)]
fn shr(self, _rhs: u32) -> Self {
unreachable!()
}
#[inline(always)]
fn clear_least_significant_bit(self) -> Self {
unreachable!()
}
#[inline(always)]
fn first_offset(self) -> usize {
unreachable!()
}
}
#[cfg(target_arch = "x86_64")]
mod x86sse2 {
use core::arch::x86_64::*;
use super::{SensibleMoveMask, Vector};
impl Vector for __m128i {
const BYTES: usize = 16;
const ALIGN: usize = Self::BYTES - 1;
type Mask = SensibleMoveMask;
#[inline(always)]
fn splat(byte: u8) -> Self {
unsafe { _mm_set1_epi8(byte as i8) }
}
#[inline(always)]
unsafe fn load_aligned(data: *const u8) -> Self {
unsafe { _mm_load_si128(data as *const __m128i) }
}
#[inline(always)]
unsafe fn load_unaligned(data: *const u8) -> Self {
unsafe { _mm_loadu_si128(data as *const __m128i) }
}
#[inline(always)]
fn movemask(self) -> Self::Mask {
SensibleMoveMask(unsafe { _mm_movemask_epi8(self) } as u32)
}
#[inline(always)]
fn cmpeq(self, vector2: Self) -> Self {
unsafe { _mm_cmpeq_epi8(self, vector2) }
}
#[inline(always)]
fn or(self, vector2: Self) -> Self {
unsafe { _mm_or_si128(self, vector2) }
}
#[inline(always)]
fn add(self, vector2: Self) -> Self {
unsafe { _mm_add_epi8(self, vector2) }
}
#[inline(always)]
fn gt(self, vector2: Self) -> Self {
unsafe { _mm_cmpgt_epi8(self, vector2) }
}
}
}
#[cfg(target_arch = "x86_64")]
mod x86avx2 {
use core::arch::x86_64::*;
use super::{SensibleMoveMask, Vector};
impl Vector for __m256i {
const BYTES: usize = 32;
const ALIGN: usize = Self::BYTES - 1;
type Mask = SensibleMoveMask;
#[inline(always)]
fn splat(byte: u8) -> Self {
unsafe { _mm256_set1_epi8(byte as i8) }
}
#[inline(always)]
unsafe fn load_aligned(data: *const u8) -> Self {
unsafe { _mm256_load_si256(data as *const __m256i) }
}
#[inline(always)]
unsafe fn load_unaligned(data: *const u8) -> Self {
unsafe { _mm256_loadu_si256(data as *const __m256i) }
}
#[inline(always)]
fn movemask(self) -> Self::Mask {
SensibleMoveMask(unsafe { _mm256_movemask_epi8(self) } as u32)
}
#[inline(always)]
fn cmpeq(self, vector2: Self) -> Self {
unsafe { _mm256_cmpeq_epi8(self, vector2) }
}
#[inline(always)]
fn or(self, vector2: Self) -> Self {
unsafe { _mm256_or_si256(self, vector2) }
}
fn add(self, vector2: Self) -> Self {
unsafe { _mm256_add_epi8(self, vector2) }
}
fn gt(self, vector2: Self) -> Self {
unsafe { _mm256_cmpgt_epi8(self, vector2) }
}
}
}
#[cfg(target_arch = "aarch64")]
mod aarch64neon {
use core::arch::aarch64::*;
use super::{MoveMask, Vector};
impl Vector for int8x16_t {
const BYTES: usize = 16;
const ALIGN: usize = Self::BYTES - 1;
type Mask = NeonMoveMask;
#[inline(always)]
fn splat(byte: u8) -> Self {
unsafe { vdupq_n_s8(byte as i8) }
}
#[inline(always)]
unsafe fn load_aligned(data: *const u8) -> Self {
unsafe { Self::load_unaligned(data) }
}
#[inline(always)]
unsafe fn load_unaligned(data: *const u8) -> Self {
unsafe { vld1q_s8(data as *const i8) }
}
#[inline(always)]
fn movemask(self) -> NeonMoveMask {
let asu16s = unsafe { vreinterpretq_u16_s8(self) };
let mask = unsafe { vshrn_n_u16(asu16s, 4) };
let asu64 = unsafe { vreinterpret_u64_u8(mask) };
let scalar64 = unsafe { vget_lane_u64(asu64, 0) };
NeonMoveMask(scalar64 & 0x8888888888888888)
}
#[inline(always)]
fn cmpeq(self, vector2: Self) -> Self {
unsafe { vreinterpretq_s8_u8(vceqq_s8(self, vector2)) }
}
#[inline(always)]
fn or(self, vector2: Self) -> Self {
unsafe { vorrq_s8(self, vector2) }
}
#[inline(always)]
fn movemask_will_have_non_zero(self) -> bool {
let self_ = unsafe { vreinterpretq_u8_s8(self) };
let low = unsafe { vreinterpretq_u64_u8(vpmaxq_u8(self_, self_)) };
unsafe { vgetq_lane_u64(low, 0) != 0 }
}
#[inline(always)]
fn add(self, vector2: Self) -> Self {
unsafe { vaddq_s8(self, vector2) }
}
#[inline(always)]
fn gt(self, vector2: Self) -> Self {
unsafe { vreinterpretq_s8_u8(vcgtq_s8(self, vector2)) }
}
}
#[derive(Clone, Copy)]
pub struct NeonMoveMask(u64);
impl core::fmt::Debug for NeonMoveMask {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{:b}", self.0)
}
}
impl NeonMoveMask {
#[inline(always)]
fn get_for_offset(self) -> u64 {
#[cfg(target_endian = "big")]
{
self.0.swap_bytes()
}
#[cfg(target_endian = "little")]
{
self.0
}
}
}
impl MoveMask for NeonMoveMask {
#[inline(always)]
fn has_non_zero(self) -> bool {
self.0 != 0
}
#[inline(always)]
fn shr(self, rhs: u32) -> Self {
NeonMoveMask(self.0.wrapping_shr(rhs << 2))
}
#[inline(always)]
fn clear_least_significant_bit(self) -> NeonMoveMask {
NeonMoveMask(self.0 & (self.0 - 1))
}
#[inline(always)]
fn first_offset(self) -> usize {
(self.get_for_offset().trailing_zeros() >> 2) as usize
}
}
}
#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
mod wasm_simd128 {
use core::arch::wasm32::*;
use super::{SensibleMoveMask, Vector};
impl Vector for v128 {
const BYTES: usize = 16;
const ALIGN: usize = Self::BYTES - 1;
type Mask = SensibleMoveMask;
#[inline(always)]
fn splat(byte: u8) -> Self {
u8x16_splat(byte)
}
#[inline(always)]
unsafe fn load_aligned(data: *const u8) -> Self {
unsafe { *data.cast() }
}
#[inline(always)]
unsafe fn load_unaligned(data: *const u8) -> Self {
unsafe { v128_load(data.cast()) }
}
#[inline(always)]
fn movemask(self) -> SensibleMoveMask {
SensibleMoveMask(u8x16_bitmask(self).into())
}
#[inline(always)]
fn cmpeq(self, vector2: Self) -> Self {
i8x16_eq(self, vector2)
}
#[inline(always)]
fn or(self, vector2: Self) -> Self {
v128_or(self, vector2)
}
fn add(self, vector2: Self) -> Self {
i8x16_add(self, vector2)
}
fn gt(self, vector2: Self) -> Self {
i8x16_gt(self, vector2)
}
}
}