use super::*;
pub struct AVX2(());
impl Feature<FeatureGroup> for AVX2 {
fn get_support(runtime: &RuntimeSupport) -> Option<Self> {
runtime.avx2().then_some(Self(()))
}
}
impl<FS> Use<FS>
where FS: HasFeature<FeatureGroup, AVX2> {
defn_simd_set_pair!("avx2", {
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_u8x16x2(value: [u8x16; 2]) -> u8x32;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_u16x8x2(value: [u16x8; 2]) -> u16x16;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_u32x4x2(value: [u32x4; 2]) -> u32x8;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_u64x2x2(value: [u64x2; 2]) -> u64x4;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_s8x16x2(value: [s8x16; 2]) -> s8x32;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_s16x8x2(value: [s16x8; 2]) -> s16x16;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_s32x4x2(value: [s32x4; 2]) -> s32x8;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_set_m128i")]
pub fn set_s64x2x2(value: [s64x2; 2]) -> s64x4;
});
defn_simd_shared!("avx2", fn(T) -> R {
simd_shuffle(value, T::splat(0), const {
[0u32; R::LEN]
})
}, {
#[intrinsic_for("vbroadcastb")]
#[intel_equivalents("_mm256_broadcastb_epi8")]
pub fn set_all_from_first_u8x32(value: u8x32) -> u8x32;
#[intrinsic_for("vbroadcastw")]
#[intel_equivalents("_mm256_broadcastw_epi16")]
pub fn set_all_from_first_u16x16(value: u16x16) -> u16x16;
#[intrinsic_for("vbroadcastd")]
#[intel_equivalents("_mm256_broadcastd_epi32")]
pub fn set_all_from_first_u32x8(value: u32x8) -> u32x8;
#[intrinsic_for("vbroadcastq")]
#[intel_equivalents("_mm256_broadcastq_epi64")]
pub fn set_all_from_first_u64x4(value: u64x4) -> u64x4;
#[intrinsic_for("vbroadcastb")]
#[intel_equivalents("_mm256_broadcastb_epi8")]
pub fn set_all_from_first_s8x32(value: s8x32) -> s8x32;
#[intrinsic_for("vbroadcastw")]
#[intel_equivalents("_mm256_broadcastw_epi16")]
pub fn set_all_from_first_s16x16(value: s16x16) -> s16x16;
#[intrinsic_for("vbroadcastd")]
#[intel_equivalents("_mm256_broadcastd_epi32")]
pub fn set_all_from_first_s32x8(value: s32x8) -> s32x8;
#[intrinsic_for("vbroadcastq")]
#[intel_equivalents("_mm256_broadcastq_epi64")]
pub fn set_all_from_first_s64x4(value: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", fn(T) -> R {
simd_shuffle(value, value, const {
simd_slice_indices::<R>(0)
})
}, {
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_u8x16x2(value: u8x16) -> u8x32;
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_u16x8x2(value: u16x8) -> u16x16;
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_u32x4x2(value: u32x4) -> u32x8;
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_u64x2x2(value: u64x2) -> u64x4;
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_s8x16x2(value: s8x16) -> s8x32;
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_s16x8x2(value: s16x8) -> s16x16;
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_s32x4x2(value: s32x4) -> s32x8;
#[intrinsic_for("vbroadcasti128")]
#[intel_equivalents("_mm256_broadcastsi128_si256")]
pub fn set_all_s64x2x2(value: s64x2) -> s64x4;
});
defn_simd_shared!("avx2", fn(T, E) -> R {
const_assert!(INDEX as usize * E::LEN < R::LEN);
let e: R = simd_shuffle(e, E::splat(0), const {
simd_slice_indices::<R>(0)
});
simd_shuffle(x, e, const {
simd_insert_indices::<E, R>(INDEX as usize)
})
}, {
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_u8x16x2<INDEX: u8>(x: u8x32, e: u8x16) -> u8x32;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_u16x8x2<INDEX: u8>(x: u16x16, e: u16x8) -> u16x16;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_u32x4x2<INDEX: u8>(x: u32x8, e: u32x4) -> u32x8;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_u64x2x2<INDEX: u8>(x: u64x4, e: u64x2) -> u64x4;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_s8x16x2<INDEX: u8>(x: s8x32, e: s8x16) -> s8x32;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_s16x8x2<INDEX: u8>(x: s16x16, e: s16x8) -> s16x16;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_s32x4x2<INDEX: u8>(x: s32x8, e: s32x4) -> s32x8;
#[intrinsic_for("vinserti128")]
#[intel_equivalents("_mm256_inserti128_si256")]
pub fn put_s64x2x2<INDEX: u8>(x: s64x4, e: s64x2) -> s64x4;
});
defn_simd_shared!("avx2", fn(T) -> R {
const_assert!(INDEX < 2);
simd_shuffle(x, T::splat(0), const {
simd_slice_indices::<R>(R::LEN * INDEX as usize)
})
}, {
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_u8x16x2<INDEX: u8>(x: u8x32) -> u8x16;
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_u16x8x2<INDEX: u8>(x: u16x16) -> u16x8;
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_u32x4x2<INDEX: u8>(x: u32x8) -> u32x4;
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_u64x2x2<INDEX: u8>(x: u64x4) -> u64x2;
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_s8x16x2<INDEX: u8>(x: s8x32) -> s8x16;
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_s16x8x2<INDEX: u8>(x: s16x16) -> s16x8;
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_s32x4x2<INDEX: u8>(x: s32x8) -> s32x4;
#[intrinsic_for("vextracti128")]
#[intel_equivalents("_mm256_extracti128_si256")]
pub fn get_s64x2x2<INDEX: u8>(x: s64x4) -> s64x2;
});
defn_simd_shared!("avx2", { simd_add(a, b) }, {
#[intrinsic_for("vpaddb")]
#[intel_equivalents("_mm256_add_epi8")]
pub fn add_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpaddw")]
#[intel_equivalents("_mm256_add_epi16")]
pub fn add_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpaddd")]
#[intel_equivalents("_mm256_add_epi32")]
pub fn add_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpaddq")]
#[intel_equivalents("_mm256_add_epi64")]
pub fn add_u64x4(a: u64x4, b: u64x4) -> u64x4;
#[intrinsic_for("vpaddb")]
#[intel_equivalents("_mm256_add_epi8")]
pub fn add_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpaddw")]
#[intel_equivalents("_mm256_add_epi16")]
pub fn add_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpaddd")]
#[intel_equivalents("_mm256_add_epi32")]
pub fn add_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpaddq")]
#[intel_equivalents("_mm256_add_epi64")]
pub fn add_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", { simd_sub(a, b) }, {
#[intrinsic_for("vpsubb")]
#[intel_equivalents("_mm256_sub_epi8")]
pub fn sub_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpsubw")]
#[intel_equivalents("_mm256_sub_epi16")]
pub fn sub_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpsubd")]
#[intel_equivalents("_mm256_sub_epi32")]
pub fn sub_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpsubq")]
#[intel_equivalents("_mm256_sub_epi64")]
pub fn sub_u64x4(a: u64x4, b: u64x4) -> u64x4;
#[intrinsic_for("vpsubb")]
#[intel_equivalents("_mm256_sub_epi8")]
pub fn sub_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpsubw")]
#[intel_equivalents("_mm256_sub_epi16")]
pub fn sub_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpsubd")]
#[intel_equivalents("_mm256_sub_epi32")]
pub fn sub_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpsubq")]
#[intel_equivalents("_mm256_sub_epi64")]
pub fn sub_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", { simd_saturating_add(a, b) }, {
#[intrinsic_for("vpaddusb")]
#[intel_equivalents("_mm256_adds_epu8")]
pub fn saturating_add_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpaddusw")]
#[intel_equivalents("_mm256_adds_epu16")]
pub fn saturating_add_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpaddsb")]
#[intel_equivalents("_mm256_adds_epi8")]
pub fn saturating_add_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpaddsw")]
#[intel_equivalents("_mm256_adds_epi16")]
pub fn saturating_add_s16x16(a: s16x16, b: s16x16) -> s16x16;
});
defn_simd_shared!("avx2", { simd_saturating_sub(a, b) }, {
#[intrinsic_for("vpsubusb")]
#[intel_equivalents("_mm256_subs_epu8")]
pub fn saturating_sub_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpsubusw")]
#[intel_equivalents("_mm256_subs_epu16")]
pub fn saturating_sub_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpsubsb")]
#[intel_equivalents("_mm256_subs_epi8")]
pub fn saturating_sub_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpsubsw")]
#[intel_equivalents("_mm256_subs_epi16")]
pub fn saturating_sub_s16x16(a: s16x16, b: s16x16) -> s16x16;
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vphaddw")]
#[intel_equivalents("_mm256_hadd_epi16")]
pub fn concat_and_reduce_add_u16x2x8(x: u16x16, y: u16x16) -> u16x16
= "llvm.x86.avx2.phadd.w";
#[intrinsic_for("vphaddd")]
#[intel_equivalents("_mm256_hadd_epi32")]
pub fn concat_and_reduce_add_u32x2x4(x: u32x8, y: u32x8) -> u32x8
= "llvm.x86.avx2.phadd.d";
#[intrinsic_for("vphaddw")]
#[intel_equivalents("_mm256_hadd_epi16")]
pub fn concat_and_reduce_add_s16x2x8(x: s16x16, y: s16x16) -> s16x16
= "llvm.x86.avx2.phadd.w";
#[intrinsic_for("vphaddd")]
#[intel_equivalents("_mm256_hadd_epi32")]
pub fn concat_and_reduce_add_s32x2x4(x: s32x8, y: s32x8) -> s32x8
= "llvm.x86.avx2.phadd.d";
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vphaddsw")]
#[intel_equivalents("_mm256_hadds_epi16")]
pub fn concat_and_reduce_saturating_add_s16x2x8
(x: s16x16, y: s16x16) -> s16x16
= "llvm.x86.avx2.phadd.sw";
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vphsubw")]
#[intel_equivalents("_mm256_hsub_epi16")]
pub fn concat_and_reduce_sub_u16x2x8(x: u16x16, y: u16x16) -> u16x16
= "llvm.x86.avx2.phsub.w";
#[intrinsic_for("vphsubd")]
#[intel_equivalents("_mm256_hsub_epi32")]
pub fn concat_and_reduce_sub_u32x2x4(x: u32x8, y: u32x8) -> u32x8
= "llvm.x86.avx2.phsub.d";
#[intrinsic_for("vphsubw")]
#[intel_equivalents("_mm256_hsub_epi16")]
pub fn concat_and_reduce_sub_s16x2x8(x: s16x16, y: s16x16) -> s16x16
= "llvm.x86.avx2.phsub.w";
#[intrinsic_for("vphsubd")]
#[intel_equivalents("_mm256_hsub_epi32")]
pub fn concat_and_reduce_sub_s32x2x4(x: s32x8, y: s32x8) -> s32x8
= "llvm.x86.avx2.phsub.d";
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vphsubsw")]
#[intel_equivalents("_mm256_hsubs_epi16")]
pub fn concat_and_reduce_saturating_sub_s16x2x8
(x: s16x16, y: s16x16) -> s16x16
= "llvm.x86.avx2.phsub.sw";
});
defn_simd_shared!("avx2", { simd_eq(a, b) }, {
#[intrinsic_for("vpcmpeqb")]
#[intel_equivalents("_mm256_cmpeq_epi8")]
pub fn cmp_eq_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpcmpeqw")]
#[intel_equivalents("_mm256_cmpeq_epi16")]
pub fn cmp_eq_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpcmpeqd")]
#[intel_equivalents("_mm256_cmpeq_epi32")]
pub fn cmp_eq_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpcmpeqd")]
#[intel_equivalents("_mm256_cmpeq_epi64")]
pub fn cmp_eq_u64x4(a: u64x4, b: u64x4) -> u64x4;
#[intrinsic_for("vpcmpeqb")]
#[intel_equivalents("_mm256_cmpeq_epi8")]
pub fn cmp_eq_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpcmpeqw")]
#[intel_equivalents("_mm256_cmpeq_epi16")]
pub fn cmp_eq_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpcmpeqd")]
#[intel_equivalents("_mm256_cmpeq_epi32")]
pub fn cmp_eq_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpcmpeqd")]
#[intel_equivalents("_mm256_cmpeq_epi64")]
pub fn cmp_eq_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", { simd_gt(a, b) }, {
#[intrinsic_for("vpcmpgtb")]
#[intel_equivalents("_mm256_cmpgt_epi8")]
pub fn cmp_gt_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpcmpgtw")]
#[intel_equivalents("_mm256_cmpgt_epi16")]
pub fn cmp_gt_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpcmpgtd")]
#[intel_equivalents("_mm256_cmpgt_epi32")]
pub fn cmp_gt_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpcmpgtd")]
#[intel_equivalents("_mm256_cmpgt_epi64")]
pub fn cmp_gt_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", { simd_and(a, b) }, {
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_u64x4(a: u64x4, b: u64x4) -> u64x4;
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpand")]
#[intel_equivalents("_mm256_and_si256")]
pub fn and_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", { simd_or(a, b) }, {
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_u64x4(a: u64x4, b: u64x4) -> u64x4;
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpor")]
#[intel_equivalents("_mm256_or_si256")]
pub fn ior_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", { simd_xor(a, b) }, {
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_u64x4(a: u64x4, b: u64x4) -> u64x4;
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpxor")]
#[intel_equivalents("_mm256_xor_si256")]
pub fn xor_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_shared!("avx2", { simd_andnot(a, b) }, {
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_u64x4(a: u64x4, b: u64x4) -> u64x4;
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_s32x8(a: s32x8, b: s32x8) -> s32x8;
#[intrinsic_for("vpandn")]
#[intel_equivalents("_mm256_andnot_si256")]
pub fn and_not_s64x4(a: s64x4, b: s64x4) -> s64x4;
});
defn_simd_manual!("avx2", {
#[intrinsic_for("vpavgb")]
#[intel_equivalents("_mm256_avg_epu8")]
pub fn avg_u8x32(a: u8x32, b: u8x32) -> u8x32
= simd_avg::<u8x32, u16x32>;
#[intrinsic_for("vpavgw")]
#[intel_equivalents("_mm256_avg_epu16")]
pub fn avg_u16x16(a: u16x16, b: u16x16) -> u16x16
= simd_avg::<u16x16, u32x16>;
});
defn_simd_shared!("avx2", { simd_max(a, b) }, {
#[intrinsic_for("vpmaxub")]
#[intel_equivalents("_mm256_max_epu8")]
pub fn max_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpmaxuw")]
#[intel_equivalents("_mm256_max_epu16")]
pub fn max_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpmaxud")]
#[intel_equivalents("_mm256_max_epu32")]
pub fn max_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpmaxsb")]
#[intel_equivalents("_mm256_max_epi8")]
pub fn max_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpmaxsw")]
#[intel_equivalents("_mm256_max_epi16")]
pub fn max_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpmaxsd")]
#[intel_equivalents("_mm256_max_epi32")]
pub fn max_s32x8(a: s32x8, b: s32x8) -> s32x8;
});
defn_simd_shared!("avx2", { simd_min(a, b) }, {
#[intrinsic_for("vpminub")]
#[intel_equivalents("_mm256_min_epu8")]
pub fn min_u8x32(a: u8x32, b: u8x32) -> u8x32;
#[intrinsic_for("vpminuw")]
#[intel_equivalents("_mm256_min_epu16")]
pub fn min_u16x16(a: u16x16, b: u16x16) -> u16x16;
#[intrinsic_for("vpminud")]
#[intel_equivalents("_mm256_min_epu32")]
pub fn min_u32x8(a: u32x8, b: u32x8) -> u32x8;
#[intrinsic_for("vpminsb")]
#[intel_equivalents("_mm256_min_epi8")]
pub fn min_s8x32(a: s8x32, b: s8x32) -> s8x32;
#[intrinsic_for("vpminsw")]
#[intel_equivalents("_mm256_min_epi16")]
pub fn min_s16x16(a: s16x16, b: s16x16) -> s16x16;
#[intrinsic_for("vpminsd")]
#[intel_equivalents("_mm256_min_epi32")]
pub fn min_s32x8(a: s32x8, b: s32x8) -> s32x8;
});
defn_simd_shared!("avx2", { simd_abs(x) }, {
#[intrinsic_for("vpabsb")]
#[intel_equivalents("_mm256_abs_epi8")]
pub fn abs_s8x32(x: s8x32) -> s8x32;
#[intrinsic_for("vpabsw")]
#[intel_equivalents("_mm256_abs_epi16")]
pub fn abs_s16x16(x: s16x16) -> s16x16;
#[intrinsic_for("vpabsd")]
#[intel_equivalents("_mm256_abs_epi32")]
pub fn abs_s32x8(x: s32x8) -> s32x8;
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vpsllw")]
#[intel_equivalents("_mm256_sll_epi16")]
pub fn shl_all_u16x16(x: u16x16, s: u64x4) -> u16x16
= "llvm.x86.avx2.psll.w";
#[intrinsic_for("vpslld")]
#[intel_equivalents("_mm256_sll_epi32")]
pub fn shl_all_u32x8(x: u32x8, s: u64x4) -> u32x8
= "llvm.x86.avx2.psll.d";
#[intrinsic_for("vpsllq")]
#[intel_equivalents("_mm256_sll_epi64")]
pub fn shl_all_u64x4(x: u64x4, s: u64x4) -> u64x4
= "llvm.x86.avx2.psll.q";
#[intrinsic_for("vpsllw")]
#[intel_equivalents("_mm256_sll_epi16")]
pub fn shl_all_s16x16(x: s16x16, s: u64x4) -> s16x16
= "llvm.x86.avx2.psll.w";
#[intrinsic_for("vpslld")]
#[intel_equivalents("_mm256_sll_epi32")]
pub fn shl_all_s32x8(x: s32x8, s: u64x4) -> s32x8
= "llvm.x86.avx2.psll.d";
#[intrinsic_for("vpsllq")]
#[intel_equivalents("_mm256_sll_epi64")]
pub fn shl_all_s64x4(x: s64x4, s: u64x4) -> s64x4
= "llvm.x86.avx2.psll.q";
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vpsllvd")]
#[intel_equivalents("_mm_sllv_epi32")]
pub fn shl_u32x4(x: u32x4, s: u32x4) -> u32x4
= "llvm.x86.avx2.psllv.d";
#[intrinsic_for("vpsllvq")]
#[intel_equivalents("_mm_sllv_epi64")]
pub fn shl_u64x2(x: u64x2, s: u64x2) -> u64x2
= "llvm.x86.avx2.psllv.q";
#[intrinsic_for("vpsllvd")]
#[intel_equivalents("_mm_sllv_epi32")]
pub fn shl_s32x4(x: s32x4, s: u32x4) -> s32x4
= "llvm.x86.avx2.psllv.d";
#[intrinsic_for("vpsllvq")]
#[intel_equivalents("_mm_sllv_epi64")]
pub fn shl_s64x2(x: s64x2, s: u64x2) -> s64x2
= "llvm.x86.avx2.psllv.q";
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vpsllvd")]
#[intel_equivalents("_mm256_sllv_epi32")]
pub fn shl_u32x8(x: u32x8, s: u32x8) -> u32x8
= "llvm.x86.avx2.psllv.d.256";
#[intrinsic_for("vpsllvq")]
#[intel_equivalents("_mm256_sllv_epi64")]
pub fn shl_u64x4(x: u64x4, s: u64x4) -> u64x4
= "llvm.x86.avx2.psllv.q.256";
#[intrinsic_for("vpsllvd")]
#[intel_equivalents("_mm256_sllv_epi32")]
pub fn shl_s32x8(x: s32x8, s: u32x8) -> s32x8
= "llvm.x86.avx2.psllv.d.256";
#[intrinsic_for("vpsllvq")]
#[intel_equivalents("_mm256_sllv_epi64")]
pub fn shl_s64x4(x: s64x4, s: u64x4) -> s64x4
= "llvm.x86.avx2.psllv.q.256";
});
defn_simd_shared!("avx2", { simd_shl_all::<_, BITS>(x) }, {
#[intrinsic_for("vpsllw")]
#[intel_equivalents("_mm256_slli_epi16")]
pub fn shl_all_by_u16x16<BITS: u8>(x: u16x16) -> u16x16;
#[intrinsic_for("vpslld")]
#[intel_equivalents("_mm256_slli_epi32")]
pub fn shl_all_by_u32x8<BITS: u8>(x: u32x8) -> u32x8;
#[intrinsic_for("vpsllq")]
#[intel_equivalents("_mm256_slli_epi64")]
pub fn shl_all_by_u64x4<BITS: u8>(x: u64x4) -> u64x4;
#[intrinsic_for("vpsllw")]
#[intel_equivalents("_mm256_slli_epi16")]
pub fn shl_all_by_s16x16<BITS: u8>(x: s16x16) -> s16x16;
#[intrinsic_for("vpslld")]
#[intel_equivalents("_mm256_slli_epi32")]
pub fn shl_all_by_s32x8<BITS: u8>(x: s32x8) -> s32x8;
#[intrinsic_for("vpsllq")]
#[intel_equivalents("_mm256_slli_epi64")]
pub fn shl_all_by_s64x4<BITS: u8>(x: s64x4) -> s64x4;
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vpsrlw")]
#[intel_equivalents("_mm256_srl_epi16")]
pub fn shr_all_u16x16(x: u16x16, s: u64x4) -> u16x16
= "llvm.x86.avx2.psrl.w";
#[intrinsic_for("vpsrld")]
#[intel_equivalents("_mm256_srl_epi32")]
pub fn shr_all_u32x8(x: u32x8, s: u64x4) -> u32x8
= "llvm.x86.avx2.psrl.d";
#[intrinsic_for("vpsrlq")]
#[intel_equivalents("_mm256_srl_epi64")]
pub fn shr_all_u64x4(x: u64x4, s: u64x4) -> u64x4
= "llvm.x86.avx2.psrl.q";
#[intrinsic_for("vpsraw")]
#[intel_equivalents("_mm256_sra_epi16")]
pub fn shr_all_s16x16(x: s16x16, s: u64x4) -> s16x16
= "llvm.x86.avx2.psra.w";
#[intrinsic_for("vpsrad")]
#[intel_equivalents("_mm256_sra_epi32")]
pub fn shr_all_s32x8(x: s32x8, s: u64x4) -> s32x8
= "llvm.x86.avx2.psra.d";
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vpsrlvd")]
#[intel_equivalents("_mm_srlv_epi32")]
pub fn shr_u32x4(x: u32x4, s: u32x4) -> u32x4
= "llvm.x86.avx2.psrlv.d";
#[intrinsic_for("vpsrlvq")]
#[intel_equivalents("_mm_srlv_epi64")]
pub fn shr_u64x2(x: u64x2, s: u64x2) -> u64x2
= "llvm.x86.avx2.psrlv.q";
#[intrinsic_for("vpsravd")]
#[intel_equivalents("_mm_srav_epi32")]
pub fn shr_s32x4(x: s32x4, s: u32x4) -> s32x4
= "llvm.x86.avx2.psrav.d";
});
defn_simd_llvm!("avx2", {
#[intrinsic_for("vpsrlvd")]
#[intel_equivalents("_mm256_srlv_epi32")]
pub fn shr_u32x8(x: u32x8, s: u32x8) -> u32x8
= "llvm.x86.avx2.psrlv.d.256";
#[intrinsic_for("vpsrlvq")]
#[intel_equivalents("_mm256_srlv_epi64")]
pub fn shr_u64x4(x: u64x4, s: u64x4) -> u64x4
= "llvm.x86.avx2.psrlv.q.256";
#[intrinsic_for("vpsravd")]
#[intel_equivalents("_mm256_srav_epi32")]
pub fn shr_s32x8(x: s32x8, s: u32x8) -> s32x8
= "llvm.x86.avx2.psrav.d.256";
});
defn_simd_shared!("avx2", { simd_shr_all::<_, BITS>(x) }, {
#[intrinsic_for("vpsrlw")]
#[intel_equivalents("_mm256_srli_epi16")]
pub fn shr_all_by_u16x16<BITS: u8>(x: u16x16) -> u16x16;
#[intrinsic_for("vpsrld")]
#[intel_equivalents("_mm256_srli_epi32")]
pub fn shr_all_by_u32x8<BITS: u8>(x: u32x8) -> u32x8;
#[intrinsic_for("vpsrlq")]
#[intel_equivalents("_mm256_srli_epi64")]
pub fn shr_all_by_u64x4<BITS: u8>(x: u64x4) -> u64x4;
#[intrinsic_for("vpsraw")]
#[intel_equivalents("_mm256_srai_epi16")]
pub fn shr_all_by_s16x16<BITS: u8>(x: s16x16) -> s16x16;
#[intrinsic_for("vpsrad")]
#[intel_equivalents("_mm256_srai_epi32")]
pub fn shr_all_by_s32x8<BITS: u8>(x: s32x8) -> s32x8;
});
defn_simd_manual!("avx2", {
#[intrinsic_for("vpmovzxbw")]
#[intel_equivalents("_mm256_cvtepu8_epi16")]
pub fn expand_u8x32_u16x16(x: u8x32) -> u16x16
= simd_expand::<_, u8x16, _>;
#[intrinsic_for("vpmovzxbw")]
#[intel_equivalents("_mm256_cvtepu8_epi16")]
pub fn expand_u8x32_s16x16(x: u8x32) -> s16x16
= simd_expand::<_, u8x16, _>;
#[intrinsic_for("vpmovzxbd")]
#[intel_equivalents("_mm256_cvtepu8_epi32")]
pub fn expand_u8x32_u32x8(x: u8x32) -> u32x8
= simd_expand::<_, u8x8, _>;
#[intrinsic_for("vpmovzxbd")]
#[intel_equivalents("_mm256_cvtepu8_epi32")]
pub fn expand_u8x32_s32x8(x: u8x32) -> s32x8
= simd_expand::<_, u8x8, _>;
#[intrinsic_for("vpmovzxbq")]
#[intel_equivalents("_mm256_cvtepu8_epi64")]
pub fn expand_u8x32_u64x4(x: u8x32) -> u64x4
= simd_expand::<_, u8x4, _>;
#[intrinsic_for("vpmovzxbq")]
#[intel_equivalents("_mm256_cvtepu8_epi64")]
pub fn expand_u8x32_s64x4(x: u8x32) -> s64x4
= simd_expand::<_, u8x4, _>;
#[intrinsic_for("vpmovzxwd")]
#[intel_equivalents("_mm256_cvtepu16_epi32")]
pub fn expand_u16x16_u32x8(x: u16x16) -> u32x8
= simd_expand::<_, u16x8, _>;
#[intrinsic_for("vpmovzxwd")]
#[intel_equivalents("_mm256_cvtepu16_epi32")]
pub fn expand_u16x16_s32x8(x: u16x16) -> s32x8
= simd_expand::<_, u16x8, _>;
#[intrinsic_for("vpmovzxwq")]
#[intel_equivalents("_mm256_cvtepu16_epi64")]
pub fn expand_u16x16_u64x4(x: u16x16) -> u64x4
= simd_expand::<_, u16x4, _>;
#[intrinsic_for("vpmovzxwq")]
#[intel_equivalents("_mm256_cvtepu16_epi64")]
pub fn expand_u16x16_s64x4(x: u16x16) -> s64x4
= simd_expand::<_, u16x4, _>;
#[intrinsic_for("vpmovzxdq")]
#[intel_equivalents("_mm256_cvtepu32_epi64")]
pub fn expand_u32x8_u64x4(x: u32x8) -> u64x4
= simd_expand::<_, u32x4, _>;
#[intrinsic_for("vpmovzxdq")]
#[intel_equivalents("_mm256_cvtepu32_epi64")]
pub fn expand_u32x8_s64x4(x: u32x8) -> s64x4
= simd_expand::<_, u32x4, _>;
#[intrinsic_for("vpmovsxbw")]
#[intel_equivalents("_mm256_cvtepi8_epi16")]
pub fn expand_s8x32_s16x16(x: s8x32) -> s16x16
= simd_expand::<_, s8x16, _>;
#[intrinsic_for("vpmovsxbd")]
#[intel_equivalents("_mm256_cvtepi8_epi32")]
pub fn expand_s8x32_s32x8(x: s8x32) -> s32x8
= simd_expand::<_, s8x8, _>;
#[intrinsic_for("vpmovsxbq")]
#[intel_equivalents("_mm256_cvtepi8_epi64")]
pub fn expand_s8x32_s64x4(x: s8x32) -> s64x4
= simd_expand::<_, s8x4, _>;
#[intrinsic_for("vpmovsxwd")]
#[intel_equivalents("_mm256_cvtepi16_epi32")]
pub fn expand_s16x16_s32x8(x: s16x16) -> s32x8
= simd_expand::<_, s16x8, _>;
#[intrinsic_for("vpmovsxwq")]
#[intel_equivalents("_mm256_cvtepi16_epi64")]
pub fn expand_s16x16_s64x4(x: s16x16) -> s64x4
= simd_expand::<_, s16x4, _>;
#[intrinsic_for("vpmovsxdq")]
#[intel_equivalents("_mm256_cvtepi32_epi64")]
pub fn expand_s32x8_s64x4(x: s32x8) -> s64x4
= simd_expand::<_, s32x4, _>;
});
defn_simd_shared!("avx2", fn(T, U) -> R {
const fn indices(start: usize) -> [i32; R::LEN] {
let mut indices = [0i32; R::LEN];
let mut offset = 0;
while offset < indices.len() {
indices[offset] = (
(start + offset) % 16 +
(start + offset) / 16 * T::LEN) as i32;
offset += 1;
}
indices
}
const_assert!(SHIFT < 16);
simd_shuffle(x, y, const { indices(SHIFT as usize) })
}, {
#[intrinsic_for("vpalignr")]
#[intel_equivalents("_mm256_alignr_epi8")]
pub fn align_elems_by_u8x16x2<SHIFT: u8>
(x: u8x32, y: u8x32) -> u8x32;
#[intrinsic_for("vpalignr")]
#[intel_equivalents("_mm256_alignr_epi8")]
pub fn align_elems_by_s8x16x2<SHIFT: u8>
(x: s8x32, y: s8x32) -> s8x32;
});
defn_simd_shared!("avx2", {
simd_select_bitmask(MASK, y, x)
}, {
#[intrinsic_for("vpblendd")]
#[intel_equivalents("_mm_blend_epi32")]
pub fn blend_by_u32x4<MASK: u8>(x: u32x4, y: u32x4) -> u32x4;
#[intrinsic_for("vpblendd")]
#[intel_equivalents("_mm_blend_epi32")]
pub fn blend_by_s32x4<MASK: u8>(x: s32x4, y: s32x4) -> s32x4;
});
defn_simd_manual!("avx2", {
#[intrinsic_for("vpblendw")]
#[intel_equivalents("_mm256_blend_epi16")]
pub fn blend_by_u16x8x2<MASK: u8>(x: u16x16, y: u16x16) -> u16x16 {
simd_select_bitmask(u16::from_le_bytes([MASK; 2]), y, x)
}
#[intrinsic_for("vpblendw")]
#[intel_equivalents("_mm256_blend_epi16")]
pub fn blend_by_s16x8x2<MASK: u8>(x: s16x16, y: s16x16) -> s16x16 {
simd_select_bitmask(u16::from_le_bytes([MASK; 2]), y, x)
}
});
defn_simd_shared!("avx2", {
simd_select_bitmask(MASK, y, x)
}, {
#[intrinsic_for("vpblendd")]
#[intel_equivalents("_mm256_blend_epi32")]
pub fn blend_by_u32x8<MASK: u8>(x: u32x8, y: u32x8) -> u32x8;
#[intrinsic_for("vpblendd")]
#[intel_equivalents("_mm256_blend_epi32")]
pub fn blend_by_s32x8<MASK: u8>(x: s32x8, y: s32x8) -> s32x8;
});
defn_simd_shared!("avx2", fn(T, U, M) -> R {
let mask: M = simd_lt(mask, M::splat(0));
simd_select(mask, y, x)
}, {
#[intrinsic_for("vpblendvb")]
#[intel_equivalents("_mm256_blendv_epi8")]
pub fn blend_u8x32(x: u8x32, y: u8x32, mask: s8x32) -> u8x32;
#[intrinsic_for("vpblendvb")]
#[intel_equivalents("_mm256_blendv_epi8")]
pub fn blend_s8x32(x: s8x32, y: s8x32, mask: s8x32) -> s8x32;
});
defn_simd_shared!("avx2", fn(T) -> R {
simd_shuffle(T::splat(0), x, const {
simd_subslice_indices::<u8x16, R>(16 - (ELEMS as usize))
})
}, {
#[intrinsic_for("vpsrldq")]
#[intel_equivalents("_mm256_bslli_epi128", "_mm256_slli_si256")]
pub fn move_l_by_u8x32<ELEMS: u8>(x: u8x32) -> u8x32;
#[intrinsic_for("vpsrldq")]
#[intel_equivalents("_mm256_bslli_epi128", "_mm256_slli_si256")]
pub fn move_l_by_s8x32<ELEMS: u8>(x: s8x32) -> s8x32;
});
defn_simd_shared!("avx2", fn(T) -> R {
simd_shuffle(T::splat(0), x, const {
simd_subslice_indices::<u8x16, R>(ELEMS as usize)
})
}, {
#[intrinsic_for("vpsrldq")]
#[intel_equivalents("_mm256_bsrli_epi128", "_mm256_srli_si256")]
pub fn move_r_by_u8x32<ELEMS: u8>(x: u8x32) -> u8x32;
#[intrinsic_for("vpsrldq")]
#[intel_equivalents("_mm256_bsrli_epi128", "_mm256_srli_si256")]
pub fn move_r_by_s8x32<ELEMS: u8>(x: s8x32) -> s8x32;
});
defn_simd_manual!("avx2", {
#[intrinsic_for("vpmovmskb")]
#[intel_equivalents("_mm256_movemask_epi8")]
pub fn bitmask_u8x32(x: u8x32) -> u16 {
simd_bitmask(simd_ge::<_, u8x32>(x, u8x32::splat(0x80)))
}
#[intrinsic_for("vpmovmskb")]
#[intel_equivalents("_mm256_movemask_epi8")]
pub fn bitmask_s8x32(x: s8x32) -> u16 {
simd_bitmask(simd_lt::<_, s8x32>(x, s8x32::splat(0)))
}
});
}