use super::{common_types::ConstUnionHack128bit, macros::*, num_traits::*};
use auto_ops_det::impl_op_ex;
#[cfg(target_feature = "sse2")]
use core::arch::x86_64::*;
#[cfg(not(target_arch = "spirv"))]
use core::fmt;
use core::ops;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "scalar-math")]
type Inner = [i16; 8];
#[cfg(not(feature = "scalar-math"))]
type Inner = crate::wide::i16x8;
#[allow(non_camel_case_types)]
#[cfg_attr(
all(
feature = "std",
not(feature = "libm_force"),
not(feature = "scalar-math")
),
repr(transparent)
)]
#[cfg_attr(
any(
feature = "libm_force",
feature = "scalar-math",
all(feature = "libm_fallback", not(feature = "std"))
),
repr(align(16))
)]
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct i16x8(Inner);
macro_rules! define_const {
( $( $const_name:ident ),* ) => {
$(
pub const $const_name: Self = Self::const_splat(core::i16::$const_name);
)*
};
}
impl i16x8 {
define_const!(MIN, MAX);
#[allow(dead_code)]
#[inline]
pub fn map(self, f: impl Fn(i16) -> i16) -> Self {
let arr: &[i16; 8] = self.as_ref();
Self::from([
f(arr[0]),
f(arr[1]),
f(arr[2]),
f(arr[3]),
f(arr[4]),
f(arr[5]),
f(arr[6]),
f(arr[7]),
])
}
#[inline]
pub fn zip_map(self, rhs: Self, f: impl Fn(i16, i16) -> i16) -> Self {
let arr: &[i16; 8] = self.as_ref();
let rhs: &[i16; 8] = rhs.as_ref();
Self::from([
f(arr[0], rhs[0]),
f(arr[1], rhs[1]),
f(arr[2], rhs[2]),
f(arr[3], rhs[3]),
f(arr[4], rhs[4]),
f(arr[5], rhs[5]),
f(arr[6], rhs[6]),
f(arr[7], rhs[7]),
])
}
#[inline]
pub const fn const_splat(val: i16) -> Self {
unsafe { ConstUnionHack128bit { i16a8: [val; 8] }.i16x8 }
}
#[inline]
#[allow(clippy::too_many_arguments)]
pub fn mm_set_epi16(
e7: i16,
e6: i16,
e5: i16,
e4: i16,
e3: i16,
e2: i16,
e1: i16,
e0: i16,
) -> Self {
#[cfg(target_feature = "sse2")]
unsafe {
let sse_val: __m128i = _mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0);
ConstUnionHack128bit { __m128i: sse_val }.i16x8
}
#[cfg(not(target_feature = "sse2"))]
Self::from([e0, e1, e2, e3, e4, e5, e6, e7])
}
#[inline]
pub fn swizzle<const IMM8: i32>(self) -> i16x8 {
#[cfg(target_feature = "sse2")]
unsafe {
let mut sse_val: __m128i = ConstUnionHack128bit { i16x8: self }.__m128i;
sse_val = _mm_shufflehi_epi16::<IMM8>(sse_val);
sse_val = _mm_shufflelo_epi16::<IMM8>(sse_val);
ConstUnionHack128bit { __m128i: sse_val }.i16x8
}
#[cfg(not(target_feature = "sse2"))]
{
let indexs = [
IMM8 as usize & 0b11,
(IMM8 as usize >> 2) & 0b11,
(IMM8 as usize >> 4) & 0b11,
(IMM8 as usize >> 6) & 0b11,
];
let arr: &[i16; 8] = self.as_ref();
Self::from([
arr[indexs[0]],
arr[indexs[1]],
arr[indexs[2]],
arr[indexs[3]],
arr[indexs[0] + 4],
arr[indexs[1] + 4],
arr[indexs[2] + 4],
arr[indexs[3] + 4],
])
}
}
#[inline]
pub fn swizzle_lo<const IMM8: i32>(self: i16x8) -> i16x8 {
#[cfg(target_feature = "sse2")]
unsafe {
let mut sse_val: __m128i = ConstUnionHack128bit { i16x8: self }.__m128i;
sse_val = _mm_shufflelo_epi16::<IMM8>(sse_val);
ConstUnionHack128bit { __m128i: sse_val }.i16x8
}
#[cfg(not(target_feature = "sse2"))]
{
let indexs = [
IMM8 as usize & 0b11,
(IMM8 as usize >> 2) & 0b11,
(IMM8 as usize >> 4) & 0b11,
(IMM8 as usize >> 6) & 0b11,
];
let arr: &[i16; 8] = self.as_ref();
Self::from([
arr[indexs[0]],
arr[indexs[1]],
arr[indexs[2]],
arr[indexs[3]],
arr[4],
arr[5],
arr[6],
arr[7],
])
}
}
#[inline]
pub fn swizzle_hi<const IMM8: i32>(self: i16x8) -> i16x8 {
#[cfg(target_feature = "sse2")]
unsafe {
let mut sse_val: __m128i = ConstUnionHack128bit { i16x8: self }.__m128i;
sse_val = _mm_shufflehi_epi16::<IMM8>(sse_val);
ConstUnionHack128bit { __m128i: sse_val }.i16x8
}
#[cfg(not(target_feature = "sse2"))]
{
let indexs = [
IMM8 as usize & 0b11,
(IMM8 as usize >> 2) & 0b11,
(IMM8 as usize >> 4) & 0b11,
(IMM8 as usize >> 6) & 0b11,
];
let arr: &[i16; 8] = self.as_ref();
Self::from([
arr[0],
arr[1],
arr[2],
arr[3],
arr[indexs[0] + 4],
arr[indexs[1] + 4],
arr[indexs[2] + 4],
arr[indexs[3] + 4],
])
}
}
}
impl From<[i16; 8]> for i16x8 {
fn from(value: [i16; 8]) -> Self {
#[cfg(not(feature = "scalar-math"))]
{
i16x8(crate::wide::i16x8::from(value))
}
#[cfg(feature = "scalar-math")]
{
Self(value)
}
}
}
impl From<i16x8> for [i16; 8] {
fn from(value: i16x8) -> Self {
add_into!(value.0)
}
}
impl NumConstEx for i16x8 {
const ZERO: Self = Self::const_splat(0_i16);
const ONE: Self = Self::const_splat(1_i16);
const TWO: Self = Self::const_splat(2_i16);
}
impl From<i16> for i16x8 {
#[inline]
fn from(val: i16) -> Self {
Self::from([val; 8])
}
}
#[cfg(all(
feature = "std",
not(feature = "libm_force"),
not(feature = "scalar-math")
))]
mod impl_i16_ops {
use super::*;
impl_op_ex!(-|a: &i16x8| -> i16x8 { i16x8(-a.0) });
impl_op_ex!(+ |a: &i16x8, b: &i16x8| -> i16x8 { i16x8(a.0 + b.0) });
impl_op_ex!(-|a: &i16x8, b: &i16x8| -> i16x8 { i16x8(a.0 - b.0) });
impl_op_ex!(*|a: &i16x8, b: &i16x8| -> i16x8 { i16x8(a.0 * b.0) });
impl_op_ex!(/ |a: &i16x8, b: &i16x8| -> i16x8 { i16x8::zip_map(*a, *b, |x, y| x / y) });
impl_op_ex!(% |a: &i16x8, b: &i16x8| -> i16x8 { i16x8::zip_map(*a, *b, |x, y| x % y) });
impl_op_ex!(+= |a: &mut i16x8, b: &i16x8| { a.0 += b.0 });
impl_op_ex!(-= |a: &mut i16x8, b: &i16x8| { a.0 -= b.0 });
impl_op_ex!(/= |a: &mut i16x8, b: &i16x8| { *a = *a / *b });
impl_op_ex!(*= |a: &mut i16x8, b: &i16x8| { a.0 *= b.0 });
impl_op_ex!(%= |a: &mut i16x8, b: &i16x8| { *a = *a % *b });
}
#[cfg(any(
feature = "libm_force",
feature = "scalar-math",
all(feature = "libm_fallback", not(feature = "std"))
))]
mod impl_i16_ops {
use super::*;
impl_op_ex!(-|a: &i16x8| -> i16x8 { i16x8::map(*a, |x| -x) });
impl_op_ex!(+ |a: &i16x8, b: &i16x8| -> i16x8 { i16x8::zip_map(*a, *b, |x, y| x + y) });
impl_op_ex!(-|a: &i16x8, b: &i16x8| -> i16x8 { i16x8::zip_map(*a, *b, |x, y| x - y) });
impl_op_ex!(*|a: &i16x8, b: &i16x8| -> i16x8 { i16x8::zip_map(*a, *b, |x, y| x * y) });
impl_op_ex!(/ |a: &i16x8, b: &i16x8| -> i16x8 { i16x8::zip_map(*a, *b, |x, y| x / y) });
impl_op_ex!(% |a: &i16x8, b: &i16x8| -> i16x8 { i16x8::zip_map(*a, *b, |x, y| x % y) });
impl_op_ex!(+= |a: &mut i16x8, b: &i16x8| { *a = *a + *b });
impl_op_ex!(-= |a: &mut i16x8, b: &i16x8| { *a = *a - *b });
impl_op_ex!(/= |a: &mut i16x8, b: &i16x8| { *a = *a / *b });
impl_op_ex!(*= |a: &mut i16x8, b: &i16x8| { *a = *a * *b });
impl_op_ex!(%= |a: &mut i16x8, b: &i16x8| { *a = *a % *b });
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Display for i16x8 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let arr = self.as_ref();
write!(
f,
"({}, {}, {}, {}, {}, {}, {}, {})",
arr[0], arr[1], arr[2], arr[3], arr[4], arr[5], arr[6], arr[7]
)
}
}
impl_wide_partial_eq!(i16x8);
impl_wide_interge_shift_ops!(i16x8, i8, i16, i32, u8, u16, u32);
impl_wide_bit_ops!(i16x8);
impl IntegerBitOps for i16x8 {}
impl_default!(i16x8);
impl AsRef<[i16; 8]> for i16x8 {
#[inline]
fn as_ref(&self) -> &[i16; 8] {
as_array_ref!(self.0)
}
}