use crate::Vec4;
#[cfg(not(target_arch = "spirv"))]
use core::fmt;
use core::ops::*;
#[cfg(all(vec4_sse2, target_arch = "x86"))]
use core::arch::x86::*;
#[cfg(all(vec4_sse2, target_arch = "x86_64"))]
use core::arch::x86_64::*;
#[cfg(vec4_sse2)]
use core::{cmp::Ordering, hash};
#[cfg(all(target_feature = "sse2", not(feature = "scalar-math")))]
#[derive(Clone, Copy)]
#[repr(C)]
pub struct Vec4Mask(pub(crate) __m128);
#[cfg(any(not(target_feature = "sse2"), feature = "scalar-math"))]
#[derive(Clone, Copy, Default, PartialEq, Eq, Ord, PartialOrd, Hash)]
#[cfg_attr(not(feature = "scalar-math"), repr(align(16)))]
#[cfg_attr(not(target_arch = "spirv"), repr(C))]
#[cfg_attr(target_arch = "spirv", repr(simd))]
pub struct Vec4Mask(u32, u32, u32, u32);
#[cfg(vec4_sse2)]
impl Default for Vec4Mask {
#[inline]
fn default() -> Self {
unsafe { Self(_mm_setzero_ps()) }
}
}
#[cfg(vec4_sse2)]
impl PartialEq for Vec4Mask {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_ref().eq(other.as_ref())
}
}
#[cfg(vec4_sse2)]
impl Eq for Vec4Mask {}
#[cfg(vec4_sse2)]
impl Ord for Vec4Mask {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.as_ref().cmp(other.as_ref())
}
}
#[cfg(vec4_sse2)]
impl PartialOrd for Vec4Mask {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[cfg(vec4_sse2)]
impl hash::Hash for Vec4Mask {
#[inline]
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.as_ref().hash(state);
}
}
impl Vec4Mask {
#[inline]
pub fn new(x: bool, y: bool, z: bool, w: bool) -> Self {
const MASK: [u32; 2] = [0, 0xff_ff_ff_ff];
#[cfg(vec4_sse2)]
unsafe {
Self(_mm_set_ps(
f32::from_bits(MASK[w as usize]),
f32::from_bits(MASK[z as usize]),
f32::from_bits(MASK[y as usize]),
f32::from_bits(MASK[x as usize]),
))
}
#[cfg(vec4_f32)]
{
Self(
MASK[x as usize],
MASK[y as usize],
MASK[z as usize],
MASK[w as usize],
)
}
}
#[inline]
pub fn bitmask(self) -> u32 {
#[cfg(vec4_sse2)]
unsafe {
_mm_movemask_ps(self.0) as u32
}
#[cfg(vec4_f32)]
{
(self.0 & 0x1) | (self.1 & 0x1) << 1 | (self.2 & 0x1) << 2 | (self.3 & 0x1) << 3
}
}
#[inline]
pub fn any(self) -> bool {
#[cfg(vec4_sse2)]
unsafe {
_mm_movemask_ps(self.0) != 0
}
#[cfg(vec4_f32)]
{
((self.0 | self.1 | self.2 | self.3) & 0x1) != 0
}
}
#[inline]
pub fn all(self) -> bool {
#[cfg(vec4_sse2)]
unsafe {
_mm_movemask_ps(self.0) == 0xf
}
#[cfg(vec4_f32)]
{
((self.0 & self.1 & self.2 & self.3) & 0x1) != 0
}
}
#[inline]
pub fn select(self, if_true: Vec4, if_false: Vec4) -> Vec4 {
#[cfg(vec4_sse2)]
unsafe {
Vec4(_mm_or_ps(
_mm_andnot_ps(self.0, if_false.0),
_mm_and_ps(if_true.0, self.0),
))
}
#[cfg(vec4_f32)]
{
Vec4 {
x: if self.0 != 0 { if_true.x } else { if_false.x },
y: if self.1 != 0 { if_true.y } else { if_false.y },
z: if self.2 != 0 { if_true.z } else { if_false.z },
w: if self.3 != 0 { if_true.w } else { if_false.w },
}
}
}
}
impl BitAnd for Vec4Mask {
type Output = Self;
#[inline]
fn bitand(self, other: Self) -> Self {
#[cfg(vec4_sse2)]
unsafe {
Self(_mm_and_ps(self.0, other.0))
}
#[cfg(vec4_f32)]
{
Self(
self.0 & other.0,
self.1 & other.1,
self.2 & other.2,
self.3 & other.3,
)
}
}
}
impl BitAndAssign for Vec4Mask {
#[inline]
fn bitand_assign(&mut self, other: Self) {
#[cfg(vec4_sse2)]
{
self.0 = unsafe { _mm_and_ps(self.0, other.0) };
}
#[cfg(vec4_f32)]
{
self.0 &= other.0;
self.1 &= other.1;
self.2 &= other.2;
self.3 &= other.3;
}
}
}
impl BitOr for Vec4Mask {
type Output = Self;
#[inline]
fn bitor(self, other: Self) -> Self {
#[cfg(vec4_sse2)]
unsafe {
Self(_mm_or_ps(self.0, other.0))
}
#[cfg(vec4_f32)]
{
Self(
self.0 | other.0,
self.1 | other.1,
self.2 | other.2,
self.3 | other.3,
)
}
}
}
impl BitOrAssign for Vec4Mask {
#[inline]
fn bitor_assign(&mut self, other: Self) {
#[cfg(vec4_sse2)]
{
self.0 = unsafe { _mm_or_ps(self.0, other.0) };
}
#[cfg(vec4_f32)]
{
self.0 |= other.0;
self.1 |= other.1;
self.2 |= other.2;
self.3 |= other.3;
}
}
}
impl Not for Vec4Mask {
type Output = Self;
#[inline]
fn not(self) -> Self {
#[cfg(vec4_sse2)]
unsafe {
Self(_mm_andnot_ps(
self.0,
_mm_set_ps1(f32::from_bits(0xff_ff_ff_ff)),
))
}
#[cfg(vec4_f32)]
{
Self(!self.0, !self.1, !self.2, !self.3)
}
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Debug for Vec4Mask {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(vec4_sse2)]
{
let arr = self.as_ref();
write!(
f,
"Vec4Mask({:#x}, {:#x}, {:#x}, {:#x})",
arr[0], arr[1], arr[2], arr[3]
)
}
#[cfg(vec4_f32)]
{
write!(
f,
"Vec4Mask({:#x}, {:#x}, {:#x}, {:#x})",
self.0, self.1, self.2, self.3
)
}
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Display for Vec4Mask {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let arr = self.as_ref();
write!(
f,
"[{}, {}, {}, {}]",
arr[0] != 0,
arr[1] != 0,
arr[2] != 0,
arr[3] != 0
)
}
}
impl From<Vec4Mask> for [u32; 4] {
#[inline]
fn from(mask: Vec4Mask) -> Self {
*mask.as_ref()
}
}
#[cfg(vec4_sse2)]
impl From<Vec4Mask> for __m128 {
#[inline]
fn from(t: Vec4Mask) -> Self {
t.0
}
}
impl AsRef<[u32; 4]> for Vec4Mask {
#[inline]
fn as_ref(&self) -> &[u32; 4] {
unsafe { &*(self as *const Self as *const [u32; 4]) }
}
}