use crate::f32::simd_alias::{Mat4, Quat, UnitVec3A, UnitVec4, Vec3A, Vec4};
use crate::{
euler::EulerRot::{self, *},
neon, FloatEx, Mat3, UnitDQuat, UnitVec2, UnitVec3, Vec3,
};
use crate::nums::*;
use core::arch::aarch64::*;
use auto_ops_det::{impl_op_ex, impl_op_ex_commutative};
#[cfg(not(target_arch = "spirv"))]
use core::fmt;
use core::iter::Product;
use core::ops::{self, Deref, Neg};
union UnionCast {
a: [f32; 4],
v: UnitQuat,
}
union VecUnionCast {
v: Quat,
uv: UnitQuat,
}
impl Quat {
#[inline]
pub fn as_unit_quat_unchecked(self) -> UnitQuat {
unsafe { VecUnionCast { v: self }.uv }
}
}
impl UnitQuat {
#[inline]
pub fn as_quat(self) -> Quat {
unsafe { VecUnionCast { uv: self }.v }
}
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct UnitQuat(pub(crate) float32x4_t);
impl UnitQuat {
pub const IDENTITY: Self = Self::from_xyzw_unchecked(0.0_f32, 0.0_f32, 0.0_f32, 1.0_f32);
#[inline]
pub const fn from_xyzw_unchecked(x: f32, y: f32, z: f32, w: f32) -> Self {
unsafe { UnionCast { a: [x, y, z, w] }.v }
}
#[inline]
pub const fn from_array_unchecked(a: [f32; 4]) -> Self {
Self::from_xyzw_unchecked(a[0], a[1], a[2], a[3])
}
#[inline]
pub fn from_unit_vec4(v: UnitVec4) -> Self {
let q: Self = Self(v.0);
glam_assert!(q.is_normalized(), "{:?} is not normalized.", q);
q
}
#[inline]
pub fn from_slice_unchecked(slice: &[f32]) -> Self {
Self::from_xyzw_unchecked(slice[0], slice[1], slice[2], slice[3])
}
#[inline]
pub fn write_to_slice(self, slice: &mut [f32]) {
slice[0] = self.x;
slice[1] = self.y;
slice[2] = self.z;
slice[3] = self.w;
}
#[inline]
pub fn from_axis_angle(axis: UnitVec3, angle: f32) -> Self {
#[cfg_attr(
not(any(
all(debug_assertions, feature = "debug-glam-assert"),
feature = "glam-assert"
)),
allow(clippy::let_and_return)
)]
let q = Quat::from_axis_angle(axis, angle).as_unit_quat_unchecked();
glam_assert!(q.is_normalized(), "{:?} is not normalized.", q);
q
}
#[inline]
pub fn from_scaled_axis(v: Vec3) -> Self {
#[cfg_attr(
not(any(
all(debug_assertions, feature = "debug-glam-assert"),
feature = "glam-assert"
)),
allow(clippy::let_and_return)
)]
let q = Quat::from_scaled_axis(v).as_unit_quat_unchecked();
glam_assert!(q.is_normalized(), "{:?} is not normalized.", q);
q
}
#[inline]
pub fn from_rotation_x(angle: f32) -> Self {
glam_assert!(angle.is_finite(), "angle {:?} is not finite.", angle);
let (s, c) = (angle * 0.5_f32).sin_cosf();
Self::from_xyzw_unchecked(s, 0.0_f32, 0.0_f32, c)
}
#[inline]
pub fn from_rotation_y(angle: f32) -> Self {
glam_assert!(angle.is_finite(), "angle {:?} is not finite.", angle);
let (s, c) = (angle * 0.5_f32).sin_cosf();
Self::from_xyzw_unchecked(0.0_f32, s, 0.0_f32, c)
}
#[inline]
pub fn from_rotation_z(angle: f32) -> Self {
glam_assert!(angle.is_finite(), "angle {:?} is not finite.", angle);
let (s, c) = (angle * 0.5_f32).sin_cosf();
Self::from_xyzw_unchecked(0.0_f32, 0.0_f32, s, c)
}
#[inline]
pub fn from_euler(euler: EulerRot, a: f32, b: f32, c: f32) -> Self {
let (sa, ca) = (a * 0.5_f32).sin_cosf();
let (sb, cb) = (b * 0.5_f32).sin_cosf();
let (sc, cc) = (c * 0.5_f32).sin_cosf();
match euler {
ZYX => Self::from_xyzw_unchecked(
ca * cb * sc - cc * sa * sb,
ca * cc * sb + cb * sa * sc,
cb * cc * sa - ca * sb * sc,
ca * cb * cc + sa * sb * sc,
),
ZXY => Self::from_xyzw_unchecked(
ca * cc * sb - cb * sa * sc,
cc * sa * sb + ca * cb * sc,
cb * cc * sa + ca * sb * sc,
ca * cb * cc - sa * sb * sc,
),
YXZ => Self::from_xyzw_unchecked(
ca * cc * sb + cb * sa * sc,
cb * cc * sa - ca * sb * sc,
ca * cb * sc - cc * sa * sb,
ca * cb * cc + sa * sb * sc,
),
YZX => Self::from_xyzw_unchecked(
cc * sa * sb + ca * cb * sc,
cb * cc * sa + ca * sb * sc,
ca * cc * sb - cb * sa * sc,
ca * cb * cc - sa * sb * sc,
),
XYZ => Self::from_xyzw_unchecked(
cb * cc * sa + ca * sb * sc,
ca * cc * sb - cb * sa * sc,
cc * sa * sb + ca * cb * sc,
ca * cb * cc - sa * sb * sc,
),
XZY => Self::from_xyzw_unchecked(
cb * cc * sa - ca * sb * sc,
ca * cb * sc - cc * sa * sb,
ca * cc * sb + cb * sa * sc,
ca * cb * cc + sa * sb * sc,
),
ZYZ => Self::from_xyzw_unchecked(
ca * sb * sc - cc * sa * sb,
ca * cc * sb + sa * sb * sc,
cb * cc * sa + ca * cb * sc,
ca * cb * cc - cb * sa * sc,
),
ZXZ => Self::from_xyzw_unchecked(
ca * cc * sb + sa * sb * sc,
cc * sa * sb - ca * sb * sc,
cb * cc * sa + ca * cb * sc,
ca * cb * cc - cb * sa * sc,
),
YXY => Self::from_xyzw_unchecked(
ca * cc * sb + sa * sb * sc,
cb * cc * sa + ca * cb * sc,
ca * sb * sc - cc * sa * sb,
ca * cb * cc - cb * sa * sc,
),
YZY => Self::from_xyzw_unchecked(
cc * sa * sb - ca * sb * sc,
cb * cc * sa + ca * cb * sc,
ca * cc * sb + sa * sb * sc,
ca * cb * cc - cb * sa * sc,
),
XYX => Self::from_xyzw_unchecked(
cb * cc * sa + ca * cb * sc,
ca * cc * sb + sa * sb * sc,
cc * sa * sb - ca * sb * sc,
ca * cb * cc - cb * sa * sc,
),
XZX => Self::from_xyzw_unchecked(
cb * cc * sa + ca * cb * sc,
ca * sb * sc - cc * sa * sb,
ca * cc * sb + sa * sb * sc,
ca * cb * cc - cb * sa * sc,
),
}
.renormalize()
}
#[inline]
pub fn from_euler_default(a: f32, b: f32, c: f32) -> Self {
Self::from_euler(EulerRot::default(), a, b, c)
}
#[inline]
pub(crate) fn from_rotation_axes(x_axis: Vec3, y_axis: Vec3, z_axis: Vec3) -> Self {
#[cfg_attr(
not(any(
all(debug_assertions, feature = "debug-glam-assert"),
feature = "glam-assert"
)),
allow(clippy::let_and_return)
)]
let q = Quat::from_rotation_axes(x_axis, y_axis, z_axis).as_unit_quat_unchecked();
glam_assert!(q.is_normalized(), "{:?} is not normalized.", q);
q
}
#[inline]
pub fn from_mat3(mat: &Mat3) -> Self {
Self::from_rotation_axes(mat.x_axis, mat.y_axis, mat.z_axis)
}
#[inline]
pub fn from_mat4(mat: &Mat4) -> Self {
Self::from_rotation_axes(
mat.x_axis.truncate(),
mat.y_axis.truncate(),
mat.z_axis.truncate(),
)
}
#[inline]
pub fn from_affine3(a: &crate::Affine3A) -> Self {
#[allow(clippy::useless_conversion)]
Self::from_rotation_axes(
a.matrix3.x_axis.into(),
a.matrix3.y_axis.into(),
a.matrix3.z_axis.into(),
)
}
#[inline]
pub fn from_rotation_arc(from: UnitVec3, to: UnitVec3) -> Self {
#[cfg_attr(
not(any(
all(debug_assertions, feature = "debug-glam-assert"),
feature = "glam-assert"
)),
allow(clippy::let_and_return)
)]
let q = Quat::from_rotation_arc(from, to).as_unit_quat_unchecked();
glam_assert!(q.is_normalized(), "{:?} is not normalized.", q);
q
}
#[inline]
pub fn from_rotation_arc_colinear(from: UnitVec3, to: UnitVec3) -> Self {
#[cfg_attr(
not(any(
all(debug_assertions, feature = "debug-glam-assert"),
feature = "glam-assert"
)),
allow(clippy::let_and_return)
)]
let q = Quat::from_rotation_arc_colinear(from, to).as_unit_quat_unchecked();
glam_assert!(q.is_normalized(), "{:?} is not normalized.", q);
q
}
#[inline]
pub fn from_rotation_arc_2d(from: UnitVec2, to: UnitVec2) -> Self {
#[cfg_attr(
not(any(
all(debug_assertions, feature = "debug-glam-assert"),
feature = "glam-assert"
)),
allow(clippy::let_and_return)
)]
let q = Quat::from_rotation_arc_2d(from, to).as_unit_quat_unchecked();
glam_assert!(q.is_normalized(), "{:?} is not normalized.", q);
q
}
#[inline]
pub fn to_axis_angle(self) -> (UnitVec3, f32) {
const EPSILON: f64 = 1.0e-8;
const EPSILON_SQUARED: f64 = EPSILON * EPSILON;
let w = self.w;
let v3 = self.xyz();
let sin_theta_abs = v3.length();
let angle_valid = sin_theta_abs > EPSILON_SQUARED as f32;
let axis0 = v3 * sin_theta_abs.recip();
let axis1 = Vec3::X;
let a = if angle_valid { axis0 } else { axis1 };
let angle = if angle_valid {
(sin_theta_abs * w.signumf()).atan2f(w.absf()) * 2.0_f32
} else {
0.0_f32
};
(a.as_unit_vec3_unchecked(), angle)
}
#[inline]
pub fn to_scaled_axis(self) -> Vec3 {
let (axis, angle) = self.to_axis_angle();
axis * angle
}
#[inline]
pub fn to_euler(self, euler: EulerRot) -> (f32, f32, f32) {
const PI: f32 = core::f32::consts::PI;
#[allow(non_snake_case)]
let TWO_PI: f32 = PI + PI;
#[allow(clippy::excessive_precision)]
const SQRT_2: f32 = 1.41421356237309504880168872420969808;
let not_proper = euler.not_proper();
let (i, j, k) = euler.map_sequence();
let levi_civita_sig = euler.levi_civita_sig();
let q = [self.w, self.x, self.y, self.z];
let (a, b, c, d) = if not_proper {
(
(q[0] - q[j]) / SQRT_2,
(q[k] * levi_civita_sig + q[i]) / SQRT_2,
(q[0] + q[j]) / SQRT_2,
(q[k] * levi_civita_sig - q[i]) / SQRT_2,
)
} else {
(q[0], q[i], q[j], q[k] * levi_civita_sig)
};
let cos_theta_2 = 2.0_f32 * (a * a + b * b) - 1.0_f32;
let theta_positive = b.atan2f(a);
let theta_negative = -d.atan2f(c);
let mut theta_1: f32;
let mut theta_2: f32;
let mut theta_3: f32;
if cos_theta_2 + f32::EPSILON >= 1.0 {
theta_1 = 0.0;
theta_2 = 0.0;
theta_3 = 2.0 * theta_positive;
} else if cos_theta_2 - f32::EPSILON <= -1.0 {
theta_1 = 0.0;
theta_2 = PI;
theta_3 = -2.0 * theta_negative;
} else {
theta_1 = theta_positive + theta_negative;
theta_2 = cos_theta_2.acosf();
theta_3 = theta_positive - theta_negative;
};
if not_proper {
theta_3 *= levi_civita_sig;
theta_2 -= PI / 2.0_f32;
}
theta_1 %= TWO_PI;
theta_3 %= TWO_PI;
if theta_1.absf() >= PI {
theta_1 -= theta_1.signumf() * TWO_PI;
}
if theta_3.absf() >= PI {
theta_3 -= theta_3.signumf() * TWO_PI;
}
(theta_3, theta_2, theta_1)
}
#[inline]
pub fn to_euler_default(self) -> (f32, f32, f32) {
self.to_euler(EulerRot::default())
}
#[inline]
pub fn to_array(&self) -> [f32; 4] {
[self.x, self.y, self.z, self.w]
}
#[inline]
pub fn xyz(self) -> Vec3 {
Vec3::new(self.x, self.y, self.z)
}
#[must_use]
#[inline]
pub fn conjugate(self) -> Self {
const SIGN: float32x4_t = neon::float32x4_from_f32x4([-1.0, -1.0, -1.0, 1.0]);
Self(unsafe { vmulq_f32(self.0, SIGN) })
}
#[must_use]
#[inline]
pub fn inverse(self) -> Self {
self.conjugate()
}
#[inline]
pub fn dot(self, rhs: Self) -> f32 {
Vec4::from(self).dot(Vec4::from(rhs))
}
#[inline]
pub fn is_normalized(self) -> bool {
Vec4::from(self).is_normalized()
}
#[must_use]
#[inline]
pub fn renormalize(self) -> Self {
Self::from_unit_vec4(Vec4::from(self).normalize_to_unit())
}
#[inline]
pub fn is_near_identity(self) -> bool {
let threshold_angle = 0.002_847_144_6;
let positive_w_angle = self.w.absf().acos_approx() * 2.0_f32;
positive_w_angle < threshold_angle
}
#[inline]
pub fn angle_between(self, rhs: Self) -> f32 {
self.dot(rhs).absf().acos_approx() * 2.0_f32
}
#[inline]
pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
Vec4::from(self).abs_diff_eq(Vec4::from(rhs), max_abs_diff)
}
#[inline]
#[doc(alias = "mix")]
pub fn lerp(self, end: Self, s: f32) -> Self {
const NEG_ZERO: float32x4_t = neon::float32x4_from_f32x4([-0.0; 4]);
let start = self.0;
let end = end.0;
unsafe {
let dot = crate::neon::dot4_into_float32x4(start, end);
let bias = vandq_u32(vreinterpretq_u32_f32(dot), vreinterpretq_u32_f32(NEG_ZERO));
let interpolated = vaddq_f32(
vmulq_f32(
vsubq_f32(
vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(end), bias)),
start,
),
vdupq_n_f32(s),
),
start,
);
Quat(interpolated).normalize_to_unit()
}
}
#[inline]
pub fn slerp(self, mut end: Self, s: f32) -> Self {
const DOT_THRESHOLD: f32 = 0.9995;
let mut dot = self.dot(end);
if dot < 0.0 {
end = -end;
dot = -dot;
}
if dot > DOT_THRESHOLD {
self.lerp(end, s)
} else {
let theta = dot.acos_approx();
let x = 1.0 - s;
let y = s;
let z = 1.0;
unsafe {
let tmp = vmulq_f32(vdupq_n_f32(theta), neon::float32x4_from_f32x4([x, y, z, z]));
let tmp = neon::float32x4_sin(tmp);
let scale1 = vgetq_lane_f32(tmp, 0);
let scale2 = vgetq_lane_f32(tmp, 1);
let theta_sin = vgetq_lane_f32(tmp, 2);
Self(vdivq_f32(
vaddq_f32(vmulq_n_f32(self.0, scale1), vmulq_n_f32(end.0, scale2)),
vdupq_n_f32(theta_sin),
))
}
}
}
#[inline]
pub fn mul_vec3(self, rhs: Vec3) -> Vec3 {
self.mul_vec3a(rhs.into()).into()
}
#[inline]
pub fn mul_unit_quat(self, rhs: Self) -> Self {
unsafe {
const CONTROL_WZYX: float32x4_t = neon::float32x4_from_f32x4([1., -1., 1., -1.]);
const CONTROL_ZWXY: float32x4_t = neon::float32x4_from_f32x4([1., 1., -1., -1.]);
const CONTROL_YXWZ: float32x4_t = neon::float32x4_from_f32x4([-1., 1., 1., -1.]);
let q1 = rhs.0;
let q2 = self.0;
let q2l = vget_low_f32(q2);
let q2h = vget_high_f32(q2);
let q2x = vdupq_lane_f32(q2l, 0);
let q2y = vdupq_lane_f32(q2l, 1);
let q2z = vdupq_lane_f32(q2h, 0);
let v_result = vmulq_lane_f32(q1, q2h, 1);
let v_temp_0 = vrev64q_f32(q1);
let v_temp_1 = vcombine_f32(vget_high_f32(v_temp_0), vget_low_f32(v_temp_0));
let q2x = vmulq_f32(q2x, v_temp_1);
let v_result = vmlaq_f32(v_result, q2x, CONTROL_WZYX);
let v_temp_2 = vrev64q_f32(v_temp_1);
let q2y = vmulq_f32(q2y, v_temp_2);
let v_result = vmlaq_f32(v_result, q2y, CONTROL_ZWXY);
let q2z = vmulq_f32(q2z, v_temp_0);
Self(vmlaq_f32(v_result, q2z, CONTROL_YXWZ))
}
}
#[inline]
pub fn mul_vec3a(self, rhs: Vec3A) -> Vec3A {
unsafe {
const TWO: float32x4_t = neon::float32x4_from_f32x4([2.0; 4]);
const SIGN: float32x4_t = neon::float32x4_from_f32x4([-1.0, -1.0, -1.0, 1.0]);
let u = self.0;
let v = rhs.0;
let a = vmulq_n_f32(v, vaddvq_f32(vmulq_f32(vmulq_f32(u, u), SIGN)));
let b = vmulq_f32(u, vmulq_f32(neon::dot3_into_float32x4(u, v), TWO));
let c = vmulq_f32(Vec3A(u).cross(rhs).into(), vmulq_laneq_f32(TWO, u, 3));
Vec3A(vaddq_f32(vaddq_f32(a, b), c))
}
}
#[inline]
pub fn as_f64(self) -> UnitDQuat {
UnitDQuat::from_xyzw_unchecked(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Debug for UnitQuat {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_tuple(stringify!(UnitQuat))
.field(&self.x)
.field(&self.y)
.field(&self.z)
.field(&self.w)
.finish()
}
}
#[cfg(not(target_arch = "spirv"))]
impl fmt::Display for UnitQuat {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
}
}
impl_op_ex!(+ |a: &UnitQuat, b: &UnitQuat| -> Quat{
Quat::from_vec4(Vec4::from(a) + Vec4::from(b))
});
impl_op_ex!(-|a: &UnitQuat, b: &UnitQuat| -> Quat {
Quat::from_vec4(Vec4::from(a) - Vec4::from(b))
});
impl_op_ex_commutative!(*|a: &UnitQuat, b: &f32| -> Quat { Quat::from_vec4(Vec4::from(a) * b) });
impl_op_ex!(/ |a: &UnitQuat, b: &f32| -> Quat{
Quat::from_vec4(Vec4::from(a) / b)
});
impl_op_ex!(*|a: &UnitQuat, b: &UnitQuat| -> UnitQuat { a.mul_unit_quat(*b) });
impl_op_ex!(*= |a: &mut UnitQuat, b: &UnitQuat| {
*a = a.mul_unit_quat(*b)
});
impl_op_ex!(*|a: &UnitQuat, b: &Quat| -> Quat { a.as_quat().mul_quat(*b) });
impl_op_ex!(*|a: &Quat, b: &UnitQuat| -> Quat { a.mul_quat(b.as_quat()) });
impl_op_ex!(*= |a: &mut Quat, b: &UnitQuat| {
*a = a.mul_quat(b.as_quat())
});
impl_op_ex!(*|a: &UnitQuat, b: &Vec3| -> Vec3 { a.mul_vec3(*b) });
impl_op_ex!(*|a: &UnitQuat, b: &UnitVec3| -> UnitVec3 {
a.mul_vec3(b.as_vec3()).as_unit_vec3_unchecked()
});
impl_op_ex!(*|a: &UnitQuat, b: &Vec3A| -> Vec3A { a.mul_vec3a(*b) });
impl_op_ex!(*|a: &UnitQuat, b: &UnitVec3A| -> UnitVec3A {
(a * b.as_vec3a()).as_unit_vec3a_unchecked()
});
impl Neg for UnitQuat {
type Output = Self;
#[inline]
fn neg(self) -> Self {
(self * -1.0_f32).as_unit_quat_unchecked()
}
}
impl Default for UnitQuat {
#[inline]
fn default() -> Self {
Self::IDENTITY
}
}
impl PartialEq for UnitQuat {
#[inline]
fn eq(&self, rhs: &Self) -> bool {
Vec4::from(*self).eq(&Vec4::from(*rhs))
}
}
#[cfg(not(target_arch = "spirv"))]
impl AsRef<[f32; 4]> for UnitQuat {
#[inline]
fn as_ref(&self) -> &[f32; 4] {
unsafe { &*(self as *const Self as *const [f32; 4]) }
}
}
impl<'a> Product<&'a Self> for UnitQuat {
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = &'a Self>,
{
iter.fold(Self::IDENTITY, |a, &b| a * b)
}
}
impl From<UnitQuat> for Vec4 {
#[inline]
fn from(q: UnitQuat) -> Self {
Self(q.0)
}
}
impl From<UnitQuat> for UnitVec4 {
#[inline]
fn from(q: UnitQuat) -> Self {
Vec4::from(q).as_unit_vec4_unchecked()
}
}
impl From<UnitQuat> for (f32, f32, f32, f32) {
#[inline]
fn from(q: UnitQuat) -> Self {
Vec4::from(q).into()
}
}
impl From<UnitQuat> for [f32; 4] {
#[inline]
fn from(q: UnitQuat) -> Self {
Vec4::from(q).into()
}
}
impl From<&UnitQuat> for Vec4 {
#[inline]
fn from(q: &UnitQuat) -> Self {
Self(q.0)
}
}
impl From<&UnitQuat> for UnitVec4 {
#[inline]
fn from(q: &UnitQuat) -> Self {
Vec4::from(q).as_unit_vec4_unchecked()
}
}
impl From<&UnitQuat> for (f32, f32, f32, f32) {
#[inline]
fn from(q: &UnitQuat) -> Self {
Vec4::from(q).into()
}
}
impl From<&UnitQuat> for [f32; 4] {
#[inline]
fn from(q: &UnitQuat) -> Self {
Vec4::from(q).into()
}
}
impl From<UnitQuat> for float32x4_t {
#[inline]
fn from(q: UnitQuat) -> Self {
q.0
}
}
impl Deref for UnitQuat {
type Target = crate::deref::Vec4<f32>;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe { &*(self as *const Self).cast() }
}
}