#![allow(unsafe_code, reason = "SIMD")]
#![allow(unsafe_op_in_unsafe_fn, reason = "SIMD")]
#![allow(clippy::cast_possible_wrap, reason = "SIMD")]
#![allow(clippy::similar_names, reason = "SIMD")]
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use core::mem::MaybeUninit;
use crate::backend::generic::{decode_generic_unchecked, encode_generic_unchecked};
use crate::error::InvalidInput;
use crate::util::{DIGITS_LOWER_16, DIGITS_LOWER_32, DIGITS_UPPER_16, DIGITS_UPPER_32};
#[cfg(feature = "experimental-x86-avx512-simd")]
use crate::util::{DIGITS_LOWER_64, DIGITS_UPPER_64};
#[repr(align(16))]
struct Aligned16<T>([T; 16]);
impl Aligned16<u8> {
#[inline(always)]
const fn get_ptr(self) -> *const __m128i {
self.0.as_ptr().cast()
}
}
#[inline(always)]
const fn digits16<const UPPER: bool>() -> Aligned16<u8> {
if UPPER {
Aligned16(DIGITS_UPPER_16)
} else {
Aligned16(DIGITS_LOWER_16)
}
}
#[repr(align(32))]
struct Aligned32<T>([T; 32]);
impl Aligned32<u8> {
#[inline(always)]
const fn get_ptr(self) -> *const __m256i {
self.0.as_ptr().cast()
}
}
#[inline(always)]
const fn digits32<const UPPER: bool>() -> Aligned32<u8> {
if UPPER {
Aligned32(DIGITS_UPPER_32)
} else {
Aligned32(DIGITS_LOWER_32)
}
}
#[cfg(feature = "experimental-x86-avx512-simd")]
#[repr(align(64))]
struct Aligned64<T>([T; 64]);
#[cfg(feature = "experimental-x86-avx512-simd")]
impl Aligned64<u8> {
#[inline(always)]
const fn get_ptr(self) -> *const __m256i {
self.0.as_ptr().cast()
}
}
#[cfg(feature = "experimental-x86-avx512-simd")]
#[inline(always)]
const fn digits64<const UPPER: bool>() -> Aligned64<u8> {
if UPPER {
Aligned64(DIGITS_UPPER_64)
} else {
Aligned64(DIGITS_LOWER_64)
}
}
#[target_feature(enable = "ssse3")]
pub(crate) unsafe fn encode_ssse3_unchecked<const UPPER: bool>(
mut src: &[u8],
mut dst: &mut [[MaybeUninit<u8>; 2]],
) {
const BATCH: usize = size_of::<__m128i>();
if src.len() >= BATCH {
let m = _mm_set1_epi8(0b_0000_1111);
let lut = _mm_load_si128(digits16::<UPPER>().get_ptr());
while src.len() >= BATCH {
let invec = _mm_loadu_si128(src.as_ptr().cast());
let hi = _mm_and_si128(_mm_srli_epi16(invec, 4), m);
let lo = _mm_and_si128(invec, m);
let hi = _mm_shuffle_epi8(lut, hi);
let lo = _mm_shuffle_epi8(lut, lo);
let out0 = _mm_unpacklo_epi8(hi, lo);
let out1 = _mm_unpackhi_epi8(hi, lo);
{
let dst = dst.as_mut_ptr().cast();
_mm_storeu_si128(dst, out0);
_mm_storeu_si128(dst.add(1), out1);
}
src = &src[BATCH..];
dst = dst.get_unchecked_mut(BATCH..);
}
}
encode_generic_unchecked::<UPPER>(src, dst);
}
#[target_feature(enable = "avx2")]
pub(crate) unsafe fn encode_avx2_unchecked<const UPPER: bool>(
mut src: &[u8],
mut dst: &mut [[MaybeUninit<u8>; 2]],
) {
const BATCH: usize = size_of::<__m256i>();
if src.len() >= BATCH {
let m = _mm256_set1_epi8(0b_0000_1111);
let lut = _mm256_load_si256(digits32::<UPPER>().get_ptr());
while src.len() >= BATCH {
let chunk = _mm256_loadu_si256(src.as_ptr().cast());
let hi = _mm256_and_si256(_mm256_srli_epi16::<4>(chunk), m);
let lo = _mm256_and_si256(chunk, m);
let ac = _mm256_unpacklo_epi8(hi, lo);
let bd = _mm256_unpackhi_epi8(hi, lo);
let ab = _mm256_permute2x128_si256::<0x20>(ac, bd);
let cd = _mm256_permute2x128_si256::<0x31>(ac, bd);
let out0 = _mm256_shuffle_epi8(lut, ab);
let out1 = _mm256_shuffle_epi8(lut, cd);
{
let dst = dst.as_mut_ptr().cast();
_mm256_storeu_si256(dst, out0);
_mm256_storeu_si256(dst.add(1), out1);
}
src = &src[BATCH..];
dst = dst.get_unchecked_mut(BATCH..);
}
}
encode_ssse3_unchecked::<UPPER>(src, dst);
}
#[cfg(feature = "experimental-x86-avx512-simd")]
#[target_feature(enable = "avx512f,avx512bw,avx512vl")]
pub(crate) unsafe fn encode_avx512_unchecked<const UPPER: bool>(
mut src: &[u8],
mut dst: &mut [[MaybeUninit<u8>; 2]],
) {
const BATCH: usize = size_of::<__m512i>();
if src.len() >= BATCH {
let m = _mm512_set1_epi8(0b1111);
let lut = _mm512_load_si512(digits64::<UPPER>().get_ptr().cast());
let idx_abcd = _mm512_set_epi64(11, 10, 3, 2, 9, 8, 1, 0);
let idx_efgh = _mm512_set_epi64(15, 14, 7, 6, 13, 12, 5, 4);
while src.len() >= BATCH {
let chunk = _mm512_loadu_si512(src.as_ptr().cast());
let hi = _mm512_and_si512(_mm512_srli_epi16::<4>(chunk), m);
let lo = _mm512_and_si512(chunk, m);
let fbea = _mm512_unpacklo_epi8(hi, lo);
let hdgc = _mm512_unpackhi_epi8(hi, lo);
let abcd = _mm512_permutex2var_epi64(fbea, idx_abcd, hdgc);
let efgh = _mm512_permutex2var_epi64(fbea, idx_efgh, hdgc);
let out1 = _mm512_shuffle_epi8(lut, abcd);
let out2 = _mm512_shuffle_epi8(lut, efgh);
{
let out = dst.as_mut_ptr().cast();
_mm512_storeu_si512(out, out1);
_mm512_storeu_si512(out.add(1), out2);
}
src = &src[BATCH..];
dst = dst.get_unchecked_mut(BATCH..);
}
}
encode_avx2_unchecked::<UPPER>(src, dst);
}
#[target_feature(enable = "ssse3")]
pub(crate) unsafe fn decode_ssse3_unchecked(
mut src: &[[u8; 2]],
mut dst: &mut [MaybeUninit<u8>],
) -> Result<(), InvalidInput> {
const BATCH: usize = size_of::<__m128i>();
const TRAILING_BATCH: usize = BATCH / 2;
if src.len() >= TRAILING_BATCH {
let n_c6 = _mm_set1_epi8((0xFF_u8 - b'9') as i8);
let n_06 = _mm_set1_epi8(0x06);
let n_f0 = _mm_set1_epi8(0xF0_u8 as i8);
let n_df = _mm_set1_epi8(0xDF_u8 as i8);
let u_a = _mm_set1_epi8(b'A' as i8);
let n_0a = _mm_set1_epi8(0x0A);
let check_bias = _mm_set1_epi8(127 - 15);
let weights = _mm_set1_epi16(0x0110);
while src.len() >= BATCH {
let chunk1 = _mm_loadu_si128(src.as_ptr().cast::<__m128i>());
let chunk2 = _mm_loadu_si128(src.as_ptr().cast::<__m128i>().add(1));
let d1 = _mm_sub_epi8(_mm_subs_epu8(_mm_add_epi8(chunk1, n_c6), n_06), n_f0);
let d2 = _mm_sub_epi8(_mm_subs_epu8(_mm_add_epi8(chunk2, n_c6), n_06), n_f0);
let a1 = _mm_adds_epu8(_mm_sub_epi8(_mm_and_si128(chunk1, n_df), u_a), n_0a);
let a2 = _mm_adds_epu8(_mm_sub_epi8(_mm_and_si128(chunk2, n_df), u_a), n_0a);
let n1 = _mm_min_epu8(d1, a1);
let n2 = _mm_min_epu8(d2, a2);
{
let c1 = _mm_adds_epu8(n1, check_bias);
let c2 = _mm_adds_epu8(n2, check_bias);
if _mm_movemask_epi8(_mm_or_si128(c1, c2)) != 0 {
return Err(InvalidInput);
}
}
let bytes = {
let b1 = _mm_maddubs_epi16(n1, weights);
let b2 = _mm_maddubs_epi16(n2, weights);
_mm_packus_epi16(b1, b2)
};
{
let out = dst.as_mut_ptr().cast::<__m128i>();
_mm_storeu_si128(out, bytes);
}
src = &src[BATCH..];
dst = dst.get_unchecked_mut(BATCH..);
}
if src.len() >= TRAILING_BATCH {
let chunk = _mm_loadu_si128(src.as_ptr().cast::<__m128i>());
let d = _mm_sub_epi8(_mm_subs_epu8(_mm_add_epi8(chunk, n_c6), n_06), n_f0);
let a = _mm_adds_epu8(_mm_sub_epi8(_mm_and_si128(chunk, n_df), u_a), n_0a);
let n = _mm_min_epu8(d, a);
{
let c = _mm_adds_epu8(n, check_bias);
if _mm_movemask_epi8(c) != 0 {
return Err(InvalidInput);
}
}
let bytes = _mm_shuffle_epi8(
_mm_maddubs_epi16(n, weights),
_mm_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1),
);
{
let dst = dst.as_mut_ptr().cast::<__m128i>();
_mm_storel_epi64(dst, bytes);
}
src = &src[TRAILING_BATCH..];
dst = dst.get_unchecked_mut(TRAILING_BATCH..);
}
}
decode_generic_unchecked::<false>(src, dst)
}
#[target_feature(enable = "avx2")]
pub(crate) unsafe fn decode_avx2_unchecked(
mut src: &[[u8; 2]],
mut dst: &mut [MaybeUninit<u8>],
) -> Result<(), InvalidInput> {
const BATCH: usize = size_of::<__m256i>();
const TRAILING_BATCH: usize = BATCH / 2;
if src.len() >= TRAILING_BATCH {
let n_c6 = _mm256_set1_epi8((0xFF_u8 - b'9') as i8);
let n_06 = _mm256_set1_epi8(0x06);
let n_f0 = _mm256_set1_epi8(0xF0_u8 as i8);
let n_df = _mm256_set1_epi8(0xDF_u8 as i8);
let u_a = _mm256_set1_epi8(b'A' as i8);
let n_0a = _mm256_set1_epi8(0x0A);
let check_bias = _mm256_set1_epi8(127 - 15);
let weights = _mm256_set1_epi16(0x0110);
while src.len() >= BATCH {
let chunk1 = _mm256_loadu_si256(src.as_ptr().cast::<__m256i>());
let chunk2 = _mm256_loadu_si256(src.as_ptr().cast::<__m256i>().add(1));
let d1 = _mm256_sub_epi8(_mm256_subs_epu8(_mm256_add_epi8(chunk1, n_c6), n_06), n_f0);
let d2 = _mm256_sub_epi8(_mm256_subs_epu8(_mm256_add_epi8(chunk2, n_c6), n_06), n_f0);
let a1 = _mm256_adds_epu8(_mm256_sub_epi8(_mm256_and_si256(chunk1, n_df), u_a), n_0a);
let a2 = _mm256_adds_epu8(_mm256_sub_epi8(_mm256_and_si256(chunk2, n_df), u_a), n_0a);
let n1 = _mm256_min_epu8(d1, a1);
let n2 = _mm256_min_epu8(d2, a2);
{
let c1 = _mm256_adds_epu8(n1, check_bias);
let c2 = _mm256_adds_epu8(n2, check_bias);
if _mm256_movemask_epi8(_mm256_or_si256(c1, c2)) != 0 {
return Err(InvalidInput);
}
}
let bytes = {
let b1 = _mm256_maddubs_epi16(n1, weights);
let b2 = _mm256_maddubs_epi16(n2, weights);
let packed = _mm256_packus_epi16(b1, b2);
_mm256_permute4x64_epi64::<0b11_01_10_00>(packed)
};
{
let dst = dst.as_mut_ptr().cast::<__m256i>();
_mm256_storeu_si256(dst, bytes);
}
src = &src[BATCH..];
dst = dst.get_unchecked_mut(BATCH..);
}
if src.len() >= TRAILING_BATCH {
let chunk = _mm256_loadu_si256(src.as_ptr().cast::<__m256i>());
let d = _mm256_sub_epi8(_mm256_subs_epu8(_mm256_add_epi8(chunk, n_c6), n_06), n_f0);
let a = _mm256_adds_epu8(_mm256_sub_epi8(_mm256_and_si256(chunk, n_df), u_a), n_0a);
let n = _mm256_min_epu8(d, a);
{
let c = _mm256_adds_epu8(n, check_bias);
if _mm256_movemask_epi8(c) != 0 {
return Err(InvalidInput);
}
}
let bytes = _mm256_cvtepi16_epi8(_mm256_maddubs_epi16(n, weights));
{
let dst = dst.as_mut_ptr().cast::<__m128i>();
_mm_storeu_si128(dst, bytes);
}
src = &src[TRAILING_BATCH..];
dst = dst.get_unchecked_mut(TRAILING_BATCH..);
}
}
decode_ssse3_unchecked(src, dst)
}
#[cfg(feature = "experimental-x86-avx512-simd")]
#[target_feature(enable = "avx512f,avx512bw")]
pub(crate) unsafe fn decode_avx512_unchecked(
mut src: &[[u8; 2]],
mut dst: &mut [MaybeUninit<u8>],
) -> Result<(), InvalidInput> {
const BATCH: usize = size_of::<__m512i>();
const TRAILING_BATCH: usize = BATCH / 2;
if src.len() >= TRAILING_BATCH {
let n_c6 = _mm512_set1_epi8((0xFF_u8 - b'9') as i8);
let n_06 = _mm512_set1_epi8(0x06);
let n_f0 = _mm512_set1_epi8(0xF0_u8 as i8);
let n_df = _mm512_set1_epi8(0xDF_u8 as i8);
let u_a = _mm512_set1_epi8(b'A' as i8);
let n_0a = _mm512_set1_epi8(0x0A);
let n_0f = _mm512_set1_epi8(0x0F);
let weights = _mm512_set1_epi16(0x0110);
let permute = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
while src.len() >= BATCH {
let chunk1 = _mm512_loadu_si512(src.as_ptr().cast::<__m512i>());
let chunk2 = _mm512_loadu_si512(src.as_ptr().cast::<__m512i>().add(1));
let d1 = _mm512_sub_epi8(_mm512_subs_epu8(_mm512_add_epi8(chunk1, n_c6), n_06), n_f0);
let d2 = _mm512_sub_epi8(_mm512_subs_epu8(_mm512_add_epi8(chunk2, n_c6), n_06), n_f0);
let a1 = _mm512_adds_epu8(_mm512_sub_epi8(_mm512_and_si512(chunk1, n_df), u_a), n_0a);
let a2 = _mm512_adds_epu8(_mm512_sub_epi8(_mm512_and_si512(chunk2, n_df), u_a), n_0a);
let n1 = _mm512_min_epu8(d1, a1);
let n2 = _mm512_min_epu8(d2, a2);
{
let bad1: u64 = _mm512_cmpgt_epu8_mask(n1, n_0f);
let bad2: u64 = _mm512_cmpgt_epu8_mask(n2, n_0f);
if (bad1 | bad2) != 0 {
return Err(InvalidInput);
}
}
let bytes = {
let b1 = _mm512_maddubs_epi16(n1, weights);
let b2 = _mm512_maddubs_epi16(n2, weights);
let packed = _mm512_packus_epi16(b1, b2);
_mm512_permutexvar_epi64(permute, packed)
};
{
let dst = dst.as_mut_ptr().cast::<__m512i>();
_mm512_storeu_si512(dst, bytes);
}
src = &src[BATCH..];
dst = dst.get_unchecked_mut(BATCH..);
}
if src.len() >= TRAILING_BATCH {
let chunk = _mm512_loadu_si512(src.as_ptr().cast::<__m512i>());
let d = _mm512_sub_epi8(_mm512_subs_epu8(_mm512_add_epi8(chunk, n_c6), n_06), n_f0);
let a = _mm512_adds_epu8(_mm512_sub_epi8(_mm512_and_si512(chunk, n_df), u_a), n_0a);
let n = _mm512_min_epu8(d, a);
if _mm512_cmpgt_epu8_mask(n, n_0f) != 0 {
return Err(InvalidInput);
}
let bytes = _mm512_cvtepi16_epi8(_mm512_maddubs_epi16(n, weights));
{
let dst = dst.as_mut_ptr().cast::<__m256i>();
_mm256_storeu_si256(dst, bytes);
}
src = &src[TRAILING_BATCH..];
dst = dst.get_unchecked_mut(TRAILING_BATCH..);
}
}
decode_avx2_unchecked(src, dst)
}
#[cfg(test)]
mod smoking {
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use core::mem::MaybeUninit;
use core::slice;
use super::*;
use crate::util::{DIGITS_LOWER_16, DIGITS_UPPER_16};
macro_rules! test {
(
Encode = $encode_f:ident;
Decode = $($decode_f:ident),*;
Case = $i:expr
) => {{
let input = $i;
let expected_lower = input
.iter()
.flat_map(|b| [
DIGITS_LOWER_16[(*b >> 4) as usize] as char,
DIGITS_LOWER_16[(*b & 0b1111) as usize] as char,
])
.collect::<String>();
let expected_upper = input
.iter()
.flat_map(|b| [
DIGITS_UPPER_16[(*b >> 4) as usize] as char,
DIGITS_UPPER_16[(*b & 0b1111) as usize] as char,
])
.collect::<String>();
let mut output_lower = vec![[MaybeUninit::<u8>::uninit(); 2]; input.len()];
let mut output_upper = vec![[MaybeUninit::<u8>::uninit(); 2]; input.len()];
unsafe {
$encode_f::<false>(input, &mut output_lower);
$encode_f::<true>(input, &mut output_upper);
}
let output_lower = unsafe {
slice::from_raw_parts(
output_lower.as_ptr().cast::<[u8; 2]>(),
output_lower.len(),
)
};
let output_upper = unsafe {
slice::from_raw_parts(
output_upper.as_ptr().cast::<[u8; 2]>(),
output_upper.len(),
)
};
assert_eq!(
output_lower.as_flattened(),
expected_lower.as_bytes(),
"Encode error, expect \"{expected_lower}\", got \"{}\" ({:?})",
str::from_utf8(output_lower.as_flattened()).unwrap_or("<invalid utf-8>"),
output_lower.as_flattened()
);
assert_eq!(
output_upper.as_flattened(),
expected_upper.as_bytes(),
"Encode error, expect \"{expected_upper}\", got \"{}\" ({:?})",
str::from_utf8(output_upper.as_flattened()).unwrap_or("<invalid utf-8>"),
output_upper.as_flattened()
);
$({
let mut decoded_lower = vec![MaybeUninit::<u8>::uninit(); input.len()];
let mut decoded_upper = vec![MaybeUninit::<u8>::uninit(); input.len()];
unsafe {
$decode_f(output_lower, &mut decoded_lower).unwrap();
$decode_f(output_upper, &mut decoded_upper).unwrap();
assert_eq!(
decoded_lower.assume_init_ref(),
input,
"Decode error for {}, expect {:?}, got {:?}",
stringify!($decode_f),
input,
decoded_lower.assume_init_ref()
);
assert_eq!(
decoded_upper.assume_init_ref(),
input,
"Decode error for {}, expect {:?}, got {:?}",
stringify!($decode_f),
input,
decoded_upper.assume_init_ref()
);
}
})*
}};
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ssse3() {
const CASE: &[u8; 33] = &[
0x62, 0xBE, 0x66, 0xE0, 0x1C, 0x1E, 0xFB, 0x43, 0x16, 0xA0, 0x9F, 0x8A, 0xE4, 0x93,
0xE3, 0x7F, 0x23, 0x9F, 0x0D, 0xEF, 0x94, 0x25, 0xE0, 0x60, 0x62, 0xBA, 0x10, 0xB2,
0x7B, 0xB6, 0x2B, 0xFB, 0x44,
];
test! {
Encode = encode_ssse3_unchecked;
Decode = decode_ssse3_unchecked;
Case = &[]
}
test! {
Encode = encode_ssse3_unchecked;
Decode = decode_ssse3_unchecked;
Case = &CASE[..15]
}
test! {
Encode = encode_ssse3_unchecked;
Decode = decode_ssse3_unchecked;
Case = &CASE[..16]
}
test! {
Encode = encode_ssse3_unchecked;
Decode = decode_ssse3_unchecked;
Case = &CASE[..17]
};
test! {
Encode = encode_ssse3_unchecked;
Decode = decode_ssse3_unchecked;
Case = &CASE[..31]
}
test! {
Encode = encode_ssse3_unchecked;
Decode = decode_ssse3_unchecked;
Case = &CASE[..32]
}
test! {
Encode = encode_ssse3_unchecked;
Decode = decode_ssse3_unchecked;
Case = &CASE[..33]
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_avx2() {
const CASE: &[u8; 65] = &[
0xA1, 0xA4, 0xA2, 0x49, 0x4A, 0x43, 0x03, 0x31, 0x5F, 0x60, 0xE7, 0x8F, 0x17, 0x36,
0x31, 0xAD, 0xB3, 0xE4, 0xF2, 0x35, 0x33, 0x6F, 0x05, 0xF0, 0xAA, 0x52, 0xD2, 0x6F,
0x3A, 0xB7, 0x4A, 0xAB, 0x66, 0x32, 0xB0, 0xD6, 0x1C, 0x8C, 0xED, 0x85, 0x9E, 0x03,
0x90, 0x87, 0x16, 0x9C, 0xBA, 0x34, 0xAD, 0x59, 0x35, 0x66, 0xED, 0x80, 0x22, 0x85,
0xDB, 0x54, 0x5E, 0x79, 0xD3, 0x9A, 0x6F, 0x24, 0x43,
];
test! {
Encode = encode_avx2_unchecked;
Decode = decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &[]
}
test! {
Encode = encode_avx2_unchecked;
Decode = decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..31]
}
test! {
Encode = encode_avx2_unchecked;
Decode = decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..32]
}
test! {
Encode = encode_avx2_unchecked;
Decode = decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..33]
}
test! {
Encode = encode_avx2_unchecked;
Decode = decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..63]
}
test! {
Encode = encode_avx2_unchecked;
Decode = decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..64]
}
test! {
Encode = encode_avx2_unchecked;
Decode = decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..65]
}
}
#[cfg(feature = "experimental-x86-avx512-simd")]
#[test]
#[cfg_attr(miri, ignore)]
fn test_avx512() {
const CASE: &[u8; 129] = &[
0xF4, 0xA7, 0x5B, 0x1F, 0xD2, 0xDB, 0x63, 0xB7, 0x08, 0x71, 0xAF, 0x4B, 0xDD, 0x66,
0x45, 0x7E, 0x51, 0xF1, 0x29, 0xD8, 0x9D, 0xDB, 0x74, 0x0E, 0xC4, 0x3B, 0xEE, 0x8C,
0x14, 0x95, 0x85, 0xB9, 0x4A, 0xA4, 0xD4, 0x2F, 0xEC, 0x5A, 0x0C, 0x95, 0xAB, 0x46,
0x7C, 0xFB, 0xE7, 0xC0, 0x94, 0x22, 0xB0, 0x2C, 0x4F, 0x50, 0x53, 0x64, 0xA6, 0x26,
0x26, 0x2A, 0x8B, 0x99, 0xC7, 0x7F, 0x2F, 0xAF, 0xC0, 0xE8, 0xEF, 0xDB, 0x33, 0x2A,
0xCD, 0xC7, 0x83, 0xD9, 0xE9, 0x76, 0x91, 0xE7, 0x0D, 0x18, 0x1D, 0xB4, 0xAC, 0x1A,
0x33, 0xCA, 0xDD, 0x80, 0x98, 0x3E, 0xC4, 0xC0, 0x91, 0x33, 0x02, 0x01, 0x1F, 0xCE,
0xAF, 0x77, 0x48, 0x75, 0x09, 0x3F, 0xD5, 0x2E, 0xAD, 0x91, 0x7C, 0x15, 0xD5, 0x03,
0xA6, 0xC5, 0xB7, 0xB3, 0x15, 0x55, 0xAC, 0x11, 0x3C, 0x39, 0x31, 0xD0, 0x5F, 0xF9,
0x89, 0x9B, 0xA6,
];
test! {
Encode = encode_avx512_unchecked;
Decode = decode_avx512_unchecked, decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &[]
}
test! {
Encode = encode_avx512_unchecked;
Decode = decode_avx512_unchecked, decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..63]
}
test! {
Encode = encode_avx512_unchecked;
Decode = decode_avx512_unchecked, decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..64]
}
test! {
Encode = encode_avx512_unchecked;
Decode = decode_avx512_unchecked, decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..65]
}
test! {
Encode = encode_avx512_unchecked;
Decode = decode_avx512_unchecked, decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..127]
}
test! {
Encode = encode_avx512_unchecked;
Decode = decode_avx512_unchecked, decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..128]
}
test! {
Encode = encode_avx512_unchecked;
Decode = decode_avx512_unchecked, decode_avx2_unchecked, decode_ssse3_unchecked;
Case = &CASE[..129]
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_validation() {
for l in [15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129] {
for c in 0u8..=255 {
let mut bytes = vec![b'a'; l * 2];
bytes[l] = c;
let bytes = unsafe { bytes.as_chunks_unchecked() };
if c.is_ascii_hexdigit() {
unsafe {
assert!(
decode_ssse3_unchecked(
bytes,
Vec::with_capacity(l).spare_capacity_mut()
)
.is_ok(),
"ssse3 validation failed for byte {c} (l={l})",
);
assert!(
decode_avx2_unchecked(
bytes,
Vec::with_capacity(l).spare_capacity_mut()
)
.is_ok(),
"avx2 validation failed for byte {c} (l={l})"
);
assert!(
decode_avx512_unchecked(
bytes,
Vec::with_capacity(l).spare_capacity_mut()
)
.is_ok(),
"avx512 validation failed for byte {c} (l={l})"
);
}
} else {
unsafe {
assert!(
decode_ssse3_unchecked(
bytes,
Vec::with_capacity(l).spare_capacity_mut()
)
.is_err(),
"ssse3 validation failed for byte {c} (l={l})"
);
assert!(
decode_avx2_unchecked(
bytes,
Vec::with_capacity(l).spare_capacity_mut()
)
.is_err(),
"avx2 validation failed for byte {c} (l={l})"
);
assert!(
decode_avx512_unchecked(
bytes,
Vec::with_capacity(l).spare_capacity_mut()
)
.is_err(),
"avx512 validation failed for byte {c} (l={l})"
);
}
}
}
}
}
}