use crate::f32::ffi::{UnitVec4, Vec3A};
use crate::{BVec4A, Vec2, Vec3};
#[cfg(not(target_arch = "spirv"))]
use core::fmt;
use core::iter::{Product, Sum};
use core::ops::*;
use crate::nums::*;
use auto_ops_det::impl_op_ex;
use core::ops;
union UnionCastFfi {
simd: crate::f32::simd_alias::Vec4,
ffi: Vec4,
}
impl From<crate::f32::simd_alias::Vec4> for Vec4 {
#[inline]
fn from(simd: crate::f32::simd_alias::Vec4) -> Self {
unsafe { UnionCastFfi { simd }.ffi }
}
}
impl From<Vec4> for crate::f32::simd_alias::Vec4 {
#[inline]
fn from(ffi: Vec4) -> Self {
unsafe { UnionCastFfi { ffi }.simd }
}
}
impl From<super::C128> for crate::f32::simd_alias::Vec4 {
#[inline]
fn from(t: super::C128) -> Self {
Vec4(t).into()
}
}
#[inline]
pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
Vec4::new(x, y, z, w)
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Vec4(pub(crate) super::C128);
impl Vec4 {
pub const ZERO: Self = Self::splat(0.0_f32);
pub const ONE: Self = Self::splat(1.0_f32);
pub const NEG_ONE: Self = Self::splat(-1.0_f32);
pub const NAN: Self = Self::splat(f32::NAN);
pub const X: Self = Self::new(1.0_f32, 0.0_f32, 0.0_f32, 0.0_f32);
pub const Y: Self = Self::new(0.0_f32, 1.0_f32, 0.0_f32, 0.0_f32);
pub const Z: Self = Self::new(0.0_f32, 0.0_f32, 1.0_f32, 0.0_f32);
pub const W: Self = Self::new(0.0_f32, 0.0_f32, 0.0_f32, 1.0_f32);
pub const NEG_X: Self = Self::new(-1.0_f32, 0.0_f32, 0.0_f32, 0.0_f32);
pub const NEG_Y: Self = Self::new(0.0_f32, -1.0_f32, 0.0_f32, 0.0_f32);
pub const NEG_Z: Self = Self::new(0.0_f32, 0.0_f32, -1.0_f32, 0.0_f32);
pub const NEG_W: Self = Self::new(0.0_f32, 0.0_f32, 0.0_f32, -1.0_f32);
pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
#[inline]
pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
Self(super::C128(x, y, z, w))
}
#[inline]
pub const fn splat(v: f32) -> Self {
Self(super::C128(v, v, v, v))
}
#[inline]
pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
crate::f32::simd_alias::Vec4::select(mask.into(), if_true.into(), if_false.into()).into()
}
#[inline]
pub const fn from_array(a: [f32; 4]) -> Self {
Self::new(a[0], a[1], a[2], a[3])
}
#[inline]
pub const fn to_array(&self) -> [f32; 4] {
unsafe { *(self as *const Vec4 as *const [f32; 4]) }
}
#[inline]
pub const fn from_slice(slice: &[f32]) -> Self {
Self::new(slice[0], slice[1], slice[2], slice[3])
}
#[inline]
pub fn write_to_slice(self, slice: &mut [f32]) {
crate::f32::simd_alias::Vec4::write_to_slice(self.into(), slice)
}
#[inline]
pub fn truncate(self) -> Vec3 {
use crate::swizzles::Vec4Swizzles;
self.xyz()
}
#[inline]
pub(crate) fn dot(self, rhs: Self) -> f32 {
crate::f32::simd_alias::Vec4::dot(self.into(), rhs.into())
}
#[inline]
pub fn min(self, rhs: Self) -> Self {
crate::f32::simd_alias::Vec4::min(self.into(), rhs.into()).into()
}
#[inline]
pub fn max(self, rhs: Self) -> Self {
crate::f32::simd_alias::Vec4::max(self.into(), rhs.into()).into()
}
#[inline]
pub fn clamp(self, min: Self, max: Self) -> Self {
glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
self.max(min).min(max)
}
#[inline]
pub fn min_element(self) -> f32 {
crate::f32::simd_alias::Vec4::min_element(self.into())
}
#[inline]
pub fn max_element(self) -> f32 {
crate::f32::simd_alias::Vec4::max_element(self.into())
}
#[inline]
pub fn cmpeq(self, rhs: Self) -> BVec4A {
crate::f32::simd_alias::Vec4::cmpeq(self.into(), rhs.into()).into()
}
#[inline]
pub fn cmpne(self, rhs: Self) -> BVec4A {
crate::f32::simd_alias::Vec4::cmpne(self.into(), rhs.into()).into()
}
#[inline]
pub fn cmpge(self, rhs: Self) -> BVec4A {
crate::f32::simd_alias::Vec4::cmpge(self.into(), rhs.into()).into()
}
#[inline]
pub fn cmpgt(self, rhs: Self) -> BVec4A {
crate::f32::simd_alias::Vec4::cmpgt(self.into(), rhs.into()).into()
}
#[inline]
pub fn cmple(self, rhs: Self) -> BVec4A {
crate::f32::simd_alias::Vec4::cmple(self.into(), rhs.into()).into()
}
#[inline]
pub fn cmplt(self, rhs: Self) -> BVec4A {
crate::f32::simd_alias::Vec4::cmplt(self.into(), rhs.into()).into()
}
#[inline]
pub fn abs(self) -> Self {
crate::f32::simd_alias::Vec4::abs(self.into()).into()
}
#[inline]
pub fn signum(self) -> Self {
crate::f32::simd_alias::Vec4::signum(self.into()).into()
}
#[inline]
pub fn is_finite(self) -> bool {
self.x.is_finite() && self.y.is_finite() && self.z.is_finite() && self.w.is_finite()
}
#[inline]
pub fn is_nan(self) -> bool {
self.is_nan_mask().any()
}
#[inline]
pub fn is_nan_mask(self) -> BVec4A {
crate::f32::simd_alias::Vec4::is_nan_mask(self.into()).into()
}
#[inline]
pub fn round(self) -> Self {
crate::f32::simd_alias::Vec4::round(self.into()).into()
}
#[inline]
pub fn floor(self) -> Self {
crate::f32::simd_alias::Vec4::floor(self.into()).into()
}
#[inline]
pub fn ceil(self) -> Self {
crate::f32::simd_alias::Vec4::ceil(self.into()).into()
}
#[inline]
pub fn fract(self) -> Self {
self - self.floor()
}
#[inline]
pub fn exp(self) -> Self {
Self::new(self.x.expf(), self.y.expf(), self.z.expf(), self.w.expf())
}
#[inline]
pub fn powf(self, n: f32) -> Self {
Self::new(
self.x.powff(n),
self.y.powff(n),
self.z.powff(n),
self.w.powff(n),
)
}
#[inline]
pub fn recip(self) -> Self {
crate::f32::simd_alias::Vec4::recip(self.into()).into()
}
#[doc(alias = "magnitude")]
#[inline]
pub fn length(self) -> f32 {
self.dot(self).sqrtf()
}
#[doc(alias = "magnitude2")]
#[inline]
pub fn length_squared(self) -> f32 {
self.dot(self)
}
#[inline]
pub fn length_recip(self) -> f32 {
self.length().recip()
}
#[inline]
pub fn distance(self, rhs: Self) -> f32 {
(self - rhs).length()
}
#[inline]
pub fn distance_squared(self, rhs: Self) -> f32 {
(self - rhs).length_squared()
}
#[must_use]
#[inline]
pub fn normalize(self) -> Self {
crate::f32::simd_alias::Vec4::normalize(self.into()).into()
}
#[must_use]
#[inline]
pub fn normalize_and_length(self) -> (Self, f32) {
let (v, len) = crate::f32::simd_alias::Vec4::normalize_and_length(self.into());
(v.into(), len)
}
#[must_use]
#[inline]
pub fn normalize_to_unit(self) -> UnitVec4 {
self.normalize().as_unit_vec4_unchecked()
}
#[must_use]
#[inline]
pub fn normalize_to_unit_and_length(self) -> (UnitVec4, f32) {
let res = self.normalize_and_length();
(res.0.as_unit_vec4_unchecked(), res.1)
}
#[must_use]
#[inline]
pub fn try_normalize(self, min_len: f32) -> Option<Self> {
let length = self.length();
if length.is_finite() && length > min_len {
Some(self * length.recip())
} else {
None
}
}
#[must_use]
#[inline]
pub fn try_normalize_to_unit(self, min_len: f32) -> Option<UnitVec4> {
let length = self.length();
if length.is_finite() && length > min_len {
Some((self * length.recip()).as_unit_vec4_unchecked())
} else {
None
}
}
#[must_use]
#[inline]
pub fn try_normalize_to_unit_and_length(self, min_len: f32) -> Option<(UnitVec4, f32)> {
let length = self.length();
if length.is_finite() && length > min_len {
Some(((self * length.recip()).as_unit_vec4_unchecked(), length))
} else {
None
}
}
#[must_use]
#[inline]
pub fn normalize_or_zero(self) -> Self {
let rcp = self.length_recip();
if rcp.is_finite() && rcp > 0.0 {
self * rcp
} else {
Self::ZERO
}
}
#[inline]
pub fn is_normalized(self) -> bool {
(self.length_squared() - 1.0_f32).absf() <= 1e-4
}
#[must_use]
#[inline]
pub fn project_onto(self, rhs: Self) -> Self {
let other_len_sq_rcp = rhs.dot(rhs).recip();
glam_assert!(
other_len_sq_rcp.is_finite(),
"Trying to project onto infinite rhs = {:?}",
rhs
);
rhs * self.dot(rhs) * other_len_sq_rcp
}
#[must_use]
#[inline]
pub fn reject_from(self, rhs: Self) -> Self {
self - self.project_onto(rhs)
}
#[must_use]
#[inline]
pub fn project_onto_normalized(self, rhs: UnitVec4) -> Self {
rhs.as_vec4() * self.dot(rhs.as_vec4())
}
#[must_use]
#[inline]
pub fn reject_from_normalized(self, rhs: UnitVec4) -> Self {
self - self.project_onto_normalized(rhs)
}
#[doc(alias = "mix")]
#[inline]
pub fn lerp(self, rhs: Self, s: f32) -> Self {
self + ((rhs - self) * s)
}
#[inline]
pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
}
#[inline]
pub fn clamp_length(self, min: f32, max: f32) -> Self {
glam_assert!(min <= max);
let length_sq = self.length_squared();
if length_sq < min * min {
self * (length_sq.sqrtf().recip() * min)
} else if length_sq > max * max {
self * (length_sq.sqrtf().recip() * max)
} else {
self
}
}
#[inline]
pub fn clamp_length_max(self, max: f32) -> Self {
let length_sq = self.length_squared();
if length_sq > max * max {
self * (length_sq.sqrtf().recip() * max)
} else {
self
}
}
#[inline]
pub fn clamp_length_min(self, min: f32) -> Self {
let length_sq = self.length_squared();
if length_sq < min * min {
self * (length_sq.sqrtf().recip() * min)
} else {
self
}
}
#[inline]
pub fn mul_add(self, a: Self, b: Self) -> Self {
#[cfg(target_feature = "fma")]
{
Self::from(crate::f32::simd_alias::Vec4::mul_add(
self.into(),
a.into(),
b.into(),
))
}
#[cfg(not(target_feature = "fma"))]
Self::new(
self.x.mul_addf(a.x, b.x),
self.y.mul_addf(a.y, b.y),
self.z.mul_addf(a.z, b.z),
self.w.mul_addf(a.w, b.w),
)
}
#[inline]
pub fn as_dvec4(&self) -> crate::DVec4 {
crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
}
#[inline]
pub fn as_ivec4(&self) -> crate::IVec4 {
crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
}
#[inline]
pub fn as_uvec4(&self) -> crate::UVec4 {
crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
}
}
impl Default for Vec4 {
#[inline]
fn default() -> Self {
Self::ZERO
}
}
impl PartialEq for Vec4 {
#[inline]
fn eq(&self, rhs: &Self) -> bool {
self.cmpeq(*rhs).all()
}
}
impl_op_ex!(/ |a: &Vec4, b: &Vec4| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) / crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl_op_ex!(/= |a: &mut Vec4, b: &Vec4| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) / crate::f32::simd_alias::Vec4::from(b.0))
});
impl_op_ex!(/ |a: &Vec4, b: &f32| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) / b).into()
});
impl_op_ex!(/= |a: &mut Vec4, b: &f32| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) / b)
});
impl_op_ex!(/ |a: &f32, b: &Vec4| -> Vec4 {
(a / crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl_op_ex!(*|a: &Vec4, b: &Vec4| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) * crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl_op_ex!(*= |a: &mut Vec4, b: &Vec4| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) * crate::f32::simd_alias::Vec4::from(b.0))
});
impl_op_ex!(*|a: &Vec4, b: &f32| -> Vec4 { (crate::f32::simd_alias::Vec4::from(a.0) * b).into() });
impl_op_ex!(*= |a: &mut Vec4, b: &f32| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) * b)
});
impl_op_ex!(*|a: &f32, b: &Vec4| -> Vec4 { (a * crate::f32::simd_alias::Vec4::from(b.0)).into() });
impl_op_ex!(+ |a: &Vec4, b: &Vec4| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) + crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl_op_ex!(+= |a: &mut Vec4, b: &Vec4| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) + crate::f32::simd_alias::Vec4::from(b.0))
});
impl_op_ex!(+ |a: &Vec4, b: &f32| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) + b).into()
});
impl_op_ex!(+= |a: &mut Vec4, b: &f32| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) + b)
});
impl_op_ex!(+ |a: &f32, b: &Vec4| -> Vec4 {
(a + crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl_op_ex!(-|a: &Vec4, b: &Vec4| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) - crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl_op_ex!(-= |a: &mut Vec4, b: &Vec4| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) - crate::f32::simd_alias::Vec4::from(b.0))
});
impl_op_ex!(-|a: &Vec4, b: &f32| -> Vec4 { (crate::f32::simd_alias::Vec4::from(a.0) - b).into() });
impl_op_ex!(-= |a: &mut Vec4, b: &f32| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) - b)
});
impl_op_ex!(-|a: &f32, b: &Vec4| -> Vec4 { (a - crate::f32::simd_alias::Vec4::from(b.0)).into() });
impl_op_ex!(% |a: &Vec4, b: &Vec4| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) % crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl_op_ex!(%= |a: &mut Vec4, b: &Vec4| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) % crate::f32::simd_alias::Vec4::from(b.0))
});
impl_op_ex!(% |a: &Vec4, b: &f32| -> Vec4 {
(crate::f32::simd_alias::Vec4::from(a.0) % b).into()
});
impl_op_ex!(%= |a: &mut Vec4, b: &f32| {
*a = Vec4::from(crate::f32::simd_alias::Vec4::from(a.0) % b)
});
impl_op_ex!(% |a: &f32, b: &Vec4| -> Vec4 {
(a % crate::f32::simd_alias::Vec4::from(b.0)).into()
});
impl<'a> Sum<&'a Self> for Vec4 {
#[inline]
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = &'a Self>,
{
iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
}
}
impl<'a> Product<&'a Self> for Vec4 {
#[inline]
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = &'a Self>,
{
iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
}
}
impl Neg for Vec4 {
type Output = Self;
#[inline]
fn neg(self) -> Self {
crate::f32::simd_alias::Vec4::neg(self.into()).into()
}
}
impl Index<usize> for Vec4 {
type Output = f32;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
match index {
0 => &self.x,
1 => &self.y,
2 => &self.z,
3 => &self.w,
_ => panic!("index out of bounds"),
}
}
}
impl IndexMut<usize> for Vec4 {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
match index {
0 => &mut self.x,
1 => &mut self.y,
2 => &mut self.z,
3 => &mut self.w,
_ => panic!("index out of bounds"),
}
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Display for Vec4 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Debug for Vec4 {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_tuple(stringify!(Vec4))
.field(&self.x)
.field(&self.y)
.field(&self.z)
.field(&self.w)
.finish()
}
}
impl From<[f32; 4]> for Vec4 {
#[inline]
fn from(a: [f32; 4]) -> Self {
crate::f32::simd_alias::Vec4::from(a).into()
}
}
impl From<Vec4> for [f32; 4] {
#[inline]
fn from(v: Vec4) -> Self {
crate::f32::simd_alias::Vec4::from(v).into()
}
}
impl From<(f32, f32, f32, f32)> for Vec4 {
#[inline]
fn from(t: (f32, f32, f32, f32)) -> Self {
Self::new(t.0, t.1, t.2, t.3)
}
}
impl From<Vec4> for (f32, f32, f32, f32) {
#[inline]
fn from(v: Vec4) -> Self {
crate::f32::simd_alias::Vec4::from(v).into()
}
}
impl From<(Vec3A, f32)> for Vec4 {
#[inline]
fn from((v, w): (Vec3A, f32)) -> Self {
v.extend(w)
}
}
impl From<(f32, Vec3A)> for Vec4 {
#[inline]
fn from((x, v): (f32, Vec3A)) -> Self {
Self::new(x, v.x, v.y, v.z)
}
}
impl From<(Vec3, f32)> for Vec4 {
#[inline]
fn from((v, w): (Vec3, f32)) -> Self {
Self::new(v.x, v.y, v.z, w)
}
}
impl From<(f32, Vec3)> for Vec4 {
#[inline]
fn from((x, v): (f32, Vec3)) -> Self {
Self::new(x, v.x, v.y, v.z)
}
}
impl From<(Vec2, f32, f32)> for Vec4 {
#[inline]
fn from((v, z, w): (Vec2, f32, f32)) -> Self {
Self::new(v.x, v.y, z, w)
}
}
impl From<(Vec2, Vec2)> for Vec4 {
#[inline]
fn from((v, u): (Vec2, Vec2)) -> Self {
Self::new(v.x, v.y, u.x, u.y)
}
}
impl Deref for Vec4 {
type Target = crate::deref::Vec4<f32>;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe { &*(self as *const Self).cast() }
}
}
impl DerefMut for Vec4 {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *(self as *mut Self).cast() }
}
}
#[cfg(not(target_arch = "spirv"))]
impl AsRef<[f32; 4]> for Vec4 {
#[inline]
fn as_ref(&self) -> &[f32; 4] {
unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
}
}
#[cfg(not(target_arch = "spirv"))]
impl AsMut<[f32; 4]> for Vec4 {
#[inline]
fn as_mut(&mut self) -> &mut [f32; 4] {
unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
}
}