1use crate::{f32::math, neon::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[cfg(feature = "zerocopy")]
12use zerocopy_derive::*;
13
14#[repr(C)]
15union UnionCast {
16 a: [f32; 4],
17 v: Vec3A,
18}
19
20#[inline(always)]
22#[must_use]
23pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
24 Vec3A::new(x, y, z)
25}
26
27#[derive(Clone, Copy)]
37#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
38#[cfg_attr(
39 feature = "zerocopy",
40 derive(FromBytes, Immutable, IntoBytes, KnownLayout)
41)]
42#[repr(transparent)]
43pub struct Vec3A(pub(crate) float32x4_t);
44
45impl Vec3A {
46 pub const ZERO: Self = Self::splat(0.0);
48
49 pub const ONE: Self = Self::splat(1.0);
51
52 pub const NEG_ONE: Self = Self::splat(-1.0);
54
55 pub const MIN: Self = Self::splat(f32::MIN);
57
58 pub const MAX: Self = Self::splat(f32::MAX);
60
61 pub const NAN: Self = Self::splat(f32::NAN);
63
64 pub const INFINITY: Self = Self::splat(f32::INFINITY);
66
67 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
69
70 pub const X: Self = Self::new(1.0, 0.0, 0.0);
72
73 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
75
76 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
78
79 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
81
82 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
84
85 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
87
88 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
90
91 pub const USES_CORE_SIMD: bool = false;
93 pub const USES_NEON: bool = true;
95 pub const USES_SCALAR_MATH: bool = false;
97 pub const USES_SSE2: bool = false;
99 pub const USES_WASM_SIMD: bool = false;
101 #[deprecated(since = "0.31.0", note = "Renamed to USES_WASM_SIMD")]
102 pub const USES_WASM32_SIMD: bool = false;
103
104 #[inline(always)]
106 #[must_use]
107 pub const fn new(x: f32, y: f32, z: f32) -> Self {
108 unsafe { UnionCast { a: [x, y, z, z] }.v }
109 }
110
111 #[inline]
113 #[must_use]
114 pub const fn splat(v: f32) -> Self {
115 unsafe { UnionCast { a: [v; 4] }.v }
116 }
117
118 #[inline]
120 #[must_use]
121 pub fn map<F>(self, f: F) -> Self
122 where
123 F: Fn(f32) -> f32,
124 {
125 Self::new(f(self.x), f(self.y), f(self.z))
126 }
127
128 #[inline]
134 #[must_use]
135 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
136 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
137 }
138
139 #[inline]
141 #[must_use]
142 pub const fn from_array(a: [f32; 3]) -> Self {
143 Self::new(a[0], a[1], a[2])
144 }
145
146 #[inline]
148 #[must_use]
149 pub const fn to_array(&self) -> [f32; 3] {
150 unsafe { *(self as *const Self as *const [f32; 3]) }
151 }
152
153 #[inline]
159 #[must_use]
160 pub const fn from_slice(slice: &[f32]) -> Self {
161 assert!(slice.len() >= 3);
162 Self::new(slice[0], slice[1], slice[2])
163 }
164
165 #[inline]
171 pub fn write_to_slice(self, slice: &mut [f32]) {
172 slice[..3].copy_from_slice(&self.to_array());
173 }
174
175 #[inline]
179 #[must_use]
180 pub fn from_vec4(v: Vec4) -> Self {
181 Self(v.0)
182 }
183
184 #[inline]
186 #[must_use]
187 pub fn extend(self, w: f32) -> Vec4 {
188 Vec4::new(self.x, self.y, self.z, w)
189 }
190
191 #[inline]
195 #[must_use]
196 pub fn truncate(self) -> Vec2 {
197 use crate::swizzles::Vec3Swizzles;
198 self.xy()
199 }
200
201 #[inline]
207 #[must_use]
208 pub fn from_homogeneous(v: Vec4) -> Self {
209 glam_assert!(v.w != 0.0);
210 Self::from_vec4(v) / v.w
211 }
212
213 #[inline]
215 #[must_use]
216 pub fn to_homogeneous(self) -> Vec4 {
217 self.extend(1.0)
218 }
219
220 #[inline]
222 #[must_use]
223 pub fn to_vec3(self) -> Vec3 {
224 Vec3::from(self)
225 }
226
227 #[inline]
229 #[must_use]
230 pub fn with_x(mut self, x: f32) -> Self {
231 self.x = x;
232 self
233 }
234
235 #[inline]
237 #[must_use]
238 pub fn with_y(mut self, y: f32) -> Self {
239 self.y = y;
240 self
241 }
242
243 #[inline]
245 #[must_use]
246 pub fn with_z(mut self, z: f32) -> Self {
247 self.z = z;
248 self
249 }
250
251 #[inline]
253 #[must_use]
254 pub fn dot(self, rhs: Self) -> f32 {
255 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)
257 }
258
259 #[inline]
261 #[must_use]
262 pub fn dot_into_vec(self, rhs: Self) -> Self {
263 Self(unsafe { dot3_into_f32x4(self.0, rhs.0) })
264 }
265
266 #[inline]
268 #[must_use]
269 pub fn cross(self, rhs: Self) -> Self {
270 unsafe {
271 let lhs = self.0;
273 let rhs = rhs.0;
274 let lhs_yzwx = vextq_f32(lhs, lhs, 1);
276 let rhs_wxyz = vextq_f32(rhs, rhs, 3);
277
278 let lhs_yzx = vsetq_lane_f32(vgetq_lane_f32(lhs, 0), lhs_yzwx, 2);
279 let rhs_zxy = vsetq_lane_f32(vgetq_lane_f32(rhs, 2), rhs_wxyz, 0);
280
281 let part_a = vmulq_f32(lhs_yzx, rhs_zxy);
283
284 let lhs_wxyz = vextq_f32(lhs, lhs, 3);
285 let rhs_yzwx = vextq_f32(rhs, rhs, 1);
286 let lhs_zxy = vsetq_lane_f32(vgetq_lane_f32(lhs, 2), lhs_wxyz, 0);
287 let rhs_yzx = vsetq_lane_f32(vgetq_lane_f32(rhs, 0), rhs_yzwx, 2);
288
289 let result = vmlsq_f32(part_a, lhs_zxy, rhs_yzx);
291 Self(result)
292 }
293 }
294
295 #[inline]
302 #[must_use]
303 pub fn min(self, rhs: Self) -> Self {
304 Self(unsafe { vminq_f32(self.0, rhs.0) })
305 }
306
307 #[inline]
314 #[must_use]
315 pub fn max(self, rhs: Self) -> Self {
316 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
317 }
318
319 #[inline]
330 #[must_use]
331 pub fn clamp(self, min: Self, max: Self) -> Self {
332 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
333 self.max(min).min(max)
334 }
335
336 #[inline]
343 #[must_use]
344 pub fn min_element(self) -> f32 {
345 self.x.min(self.y.min(self.z))
346 }
347
348 #[inline]
355 #[must_use]
356 pub fn max_element(self) -> f32 {
357 self.x.max(self.y.max(self.z))
358 }
359
360 #[doc(alias = "argmin")]
362 #[inline]
363 #[must_use]
364 pub fn min_position(self) -> usize {
365 let mut min = self.x;
366 let mut index = 0;
367 if self.y < min {
368 min = self.y;
369 index = 1;
370 }
371 if self.z < min {
372 index = 2;
373 }
374 index
375 }
376
377 #[doc(alias = "argmax")]
379 #[inline]
380 #[must_use]
381 pub fn max_position(self) -> usize {
382 let mut max = self.x;
383 let mut index = 0;
384 if self.y > max {
385 max = self.y;
386 index = 1;
387 }
388 if self.z > max {
389 index = 2;
390 }
391 index
392 }
393
394 #[inline]
398 #[must_use]
399 pub fn element_sum(self) -> f32 {
400 unsafe { vaddvq_f32(vsetq_lane_f32(0.0, self.0, 3)) }
401 }
402
403 #[inline]
407 #[must_use]
408 pub fn element_product(self) -> f32 {
409 unsafe {
410 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
411 vmuls_laneq_f32(s, self.0, 2)
412 }
413 }
414
415 #[inline]
421 #[must_use]
422 pub fn cmpeq(self, rhs: Self) -> BVec3A {
423 BVec3A(unsafe { vceqq_f32(self.0, rhs.0) })
424 }
425
426 #[inline]
432 #[must_use]
433 pub fn cmpne(self, rhs: Self) -> BVec3A {
434 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
435 }
436
437 #[inline]
443 #[must_use]
444 pub fn cmpge(self, rhs: Self) -> BVec3A {
445 BVec3A(unsafe { vcgeq_f32(self.0, rhs.0) })
446 }
447
448 #[inline]
454 #[must_use]
455 pub fn cmpgt(self, rhs: Self) -> BVec3A {
456 BVec3A(unsafe { vcgtq_f32(self.0, rhs.0) })
457 }
458
459 #[inline]
465 #[must_use]
466 pub fn cmple(self, rhs: Self) -> BVec3A {
467 BVec3A(unsafe { vcleq_f32(self.0, rhs.0) })
468 }
469
470 #[inline]
476 #[must_use]
477 pub fn cmplt(self, rhs: Self) -> BVec3A {
478 BVec3A(unsafe { vcltq_f32(self.0, rhs.0) })
479 }
480
481 #[inline]
483 #[must_use]
484 pub fn abs(self) -> Self {
485 Self(unsafe { vabsq_f32(self.0) })
486 }
487
488 #[inline]
494 #[must_use]
495 pub fn signum(self) -> Self {
496 let result = Self(unsafe {
497 vreinterpretq_f32_u32(vorrq_u32(
498 vandq_u32(
499 vreinterpretq_u32_f32(self.0),
500 vreinterpretq_u32_f32(Self::NEG_ONE.0),
501 ),
502 vreinterpretq_u32_f32(Self::ONE.0),
503 ))
504 });
505 let mask = self.is_nan_mask();
506 Self::select(mask, self, result)
507 }
508
509 #[inline]
511 #[must_use]
512 pub fn copysign(self, rhs: Self) -> Self {
513 let mask = Self::splat(-0.0);
514 Self(unsafe {
515 vreinterpretq_f32_u32(vorrq_u32(
516 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
517 vandq_u32(
518 vreinterpretq_u32_f32(self.0),
519 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
520 ),
521 ))
522 })
523 }
524
525 #[inline]
533 #[must_use]
534 pub fn is_negative_bitmask(self) -> u32 {
535 unsafe {
536 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
537 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
538 let x = vgetq_lane_u32(m, 0) >> 31;
539 let y = vgetq_lane_u32(m, 1) >> 31;
540 let z = vgetq_lane_u32(m, 2) >> 31;
541
542 x | y << 1 | z << 2
543 }
544 }
545
546 #[inline]
549 #[must_use]
550 pub fn is_finite(self) -> bool {
551 self.is_finite_mask().all()
552 }
553
554 #[inline]
558 #[must_use]
559 pub fn is_finite_mask(self) -> BVec3A {
560 BVec3A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
561 }
562
563 #[inline]
565 #[must_use]
566 pub fn is_nan(self) -> bool {
567 self.is_nan_mask().any()
568 }
569
570 #[inline]
574 #[must_use]
575 pub fn is_nan_mask(self) -> BVec3A {
576 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
577 }
578
579 #[doc(alias = "magnitude")]
581 #[inline]
582 #[must_use]
583 pub fn length(self) -> f32 {
584 math::sqrt(self.dot(self))
585 }
586
587 #[doc(alias = "magnitude2")]
591 #[inline]
592 #[must_use]
593 pub fn length_squared(self) -> f32 {
594 self.dot(self)
595 }
596
597 #[inline]
601 #[must_use]
602 pub fn length_recip(self) -> f32 {
603 self.length().recip()
604 }
605
606 #[inline]
608 #[must_use]
609 pub fn distance(self, rhs: Self) -> f32 {
610 (self - rhs).length()
611 }
612
613 #[inline]
615 #[must_use]
616 pub fn distance_squared(self, rhs: Self) -> f32 {
617 (self - rhs).length_squared()
618 }
619
620 #[inline]
622 #[must_use]
623 pub fn div_euclid(self, rhs: Self) -> Self {
624 Self::new(
625 math::div_euclid(self.x, rhs.x),
626 math::div_euclid(self.y, rhs.y),
627 math::div_euclid(self.z, rhs.z),
628 )
629 }
630
631 #[inline]
635 #[must_use]
636 pub fn rem_euclid(self, rhs: Self) -> Self {
637 Self::new(
638 math::rem_euclid(self.x, rhs.x),
639 math::rem_euclid(self.y, rhs.y),
640 math::rem_euclid(self.z, rhs.z),
641 )
642 }
643
644 #[inline]
654 #[must_use]
655 pub fn normalize(self) -> Self {
656 #[allow(clippy::let_and_return)]
657 let normalized = self.mul(self.length_recip());
658 glam_assert!(normalized.is_finite());
659 normalized
660 }
661
662 #[inline]
669 #[must_use]
670 pub fn try_normalize(self) -> Option<Self> {
671 let rcp = self.length_recip();
672 if rcp.is_finite() && rcp > 0.0 {
673 Some(self * rcp)
674 } else {
675 None
676 }
677 }
678
679 #[inline]
687 #[must_use]
688 pub fn normalize_or(self, fallback: Self) -> Self {
689 let rcp = self.length_recip();
690 if rcp.is_finite() && rcp > 0.0 {
691 self * rcp
692 } else {
693 fallback
694 }
695 }
696
697 #[inline]
704 #[must_use]
705 pub fn normalize_or_zero(self) -> Self {
706 self.normalize_or(Self::ZERO)
707 }
708
709 #[inline]
713 #[must_use]
714 pub fn normalize_and_length(self) -> (Self, f32) {
715 let length = self.length();
716 let rcp = 1.0 / length;
717 if rcp.is_finite() && rcp > 0.0 {
718 (self * rcp, length)
719 } else {
720 (Self::X, 0.0)
721 }
722 }
723
724 #[inline]
728 #[must_use]
729 pub fn is_normalized(self) -> bool {
730 math::abs(self.length_squared() - 1.0) <= 2e-4
731 }
732
733 #[inline]
741 #[must_use]
742 pub fn project_onto(self, rhs: Self) -> Self {
743 let other_len_sq_rcp = rhs.dot(rhs).recip();
744 glam_assert!(other_len_sq_rcp.is_finite());
745 rhs * self.dot(rhs) * other_len_sq_rcp
746 }
747
748 #[doc(alias("plane"))]
759 #[inline]
760 #[must_use]
761 pub fn reject_from(self, rhs: Self) -> Self {
762 self - self.project_onto(rhs)
763 }
764
765 #[inline]
773 #[must_use]
774 pub fn project_onto_normalized(self, rhs: Self) -> Self {
775 glam_assert!(rhs.is_normalized());
776 rhs * self.dot(rhs)
777 }
778
779 #[doc(alias("plane"))]
790 #[inline]
791 #[must_use]
792 pub fn reject_from_normalized(self, rhs: Self) -> Self {
793 self - self.project_onto_normalized(rhs)
794 }
795
796 #[inline]
799 #[must_use]
800 pub fn round(self) -> Self {
801 Self(unsafe { vrndnq_f32(self.0) })
802 }
803
804 #[inline]
807 #[must_use]
808 pub fn floor(self) -> Self {
809 Self(unsafe { vrndmq_f32(self.0) })
810 }
811
812 #[inline]
815 #[must_use]
816 pub fn ceil(self) -> Self {
817 Self(unsafe { vrndpq_f32(self.0) })
818 }
819
820 #[inline]
823 #[must_use]
824 pub fn trunc(self) -> Self {
825 Self(unsafe { vrndq_f32(self.0) })
826 }
827
828 #[inline]
832 #[must_use]
833 pub fn step(self, rhs: Self) -> Self {
834 Self::select(rhs.cmplt(self), Self::ZERO, Self::ONE)
835 }
836
837 #[inline]
839 #[must_use]
840 pub fn saturate(self) -> Self {
841 self.clamp(Self::ZERO, Self::ONE)
842 }
843
844 #[inline]
851 #[must_use]
852 pub fn fract(self) -> Self {
853 self - self.trunc()
854 }
855
856 #[inline]
863 #[must_use]
864 pub fn fract_gl(self) -> Self {
865 self - self.floor()
866 }
867
868 #[inline]
871 #[must_use]
872 pub fn exp(self) -> Self {
873 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
874 }
875
876 #[inline]
878 #[must_use]
879 pub fn exp2(self) -> Self {
880 Self::new(math::exp2(self.x), math::exp2(self.y), math::exp2(self.z))
881 }
882
883 #[inline]
886 #[must_use]
887 pub fn ln(self) -> Self {
888 Self::new(math::ln(self.x), math::ln(self.y), math::ln(self.z))
889 }
890
891 #[inline]
894 #[must_use]
895 pub fn log2(self) -> Self {
896 Self::new(math::log2(self.x), math::log2(self.y), math::log2(self.z))
897 }
898
899 #[inline]
901 #[must_use]
902 pub fn powf(self, n: f32) -> Self {
903 Self::new(
904 math::powf(self.x, n),
905 math::powf(self.y, n),
906 math::powf(self.z, n),
907 )
908 }
909
910 #[inline]
913 #[must_use]
914 pub fn sqrt(self) -> Self {
915 Self::new(math::sqrt(self.x), math::sqrt(self.y), math::sqrt(self.z))
916 }
917
918 #[inline]
920 #[must_use]
921 pub fn cos(self) -> Self {
922 Self::new(math::cos(self.x), math::cos(self.y), math::cos(self.z))
923 }
924
925 #[inline]
927 #[must_use]
928 pub fn sin(self) -> Self {
929 Self::new(math::sin(self.x), math::sin(self.y), math::sin(self.z))
930 }
931
932 #[inline]
934 #[must_use]
935 pub fn sin_cos(self) -> (Self, Self) {
936 let (sin_x, cos_x) = math::sin_cos(self.x);
937 let (sin_y, cos_y) = math::sin_cos(self.y);
938 let (sin_z, cos_z) = math::sin_cos(self.z);
939
940 (
941 Self::new(sin_x, sin_y, sin_z),
942 Self::new(cos_x, cos_y, cos_z),
943 )
944 }
945
946 #[inline]
948 #[must_use]
949 pub fn recip(self) -> Self {
950 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
951 }
952
953 #[doc(alias = "mix")]
959 #[inline]
960 #[must_use]
961 pub fn lerp(self, rhs: Self, s: f32) -> Self {
962 self * (1.0 - s) + rhs * s
963 }
964
965 #[inline]
970 #[must_use]
971 pub fn move_towards(self, rhs: Self, d: f32) -> Self {
972 let a = rhs - self;
973 let len = a.length();
974 if len <= d || len <= 1e-4 {
975 return rhs;
976 }
977 self + a / len * d
978 }
979
980 #[inline]
986 pub fn midpoint(self, rhs: Self) -> Self {
987 (self + rhs) * 0.5
988 }
989
990 #[inline]
1000 #[must_use]
1001 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
1002 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
1003 }
1004
1005 #[inline]
1011 #[must_use]
1012 pub fn clamp_length(self, min: f32, max: f32) -> Self {
1013 glam_assert!(0.0 <= min);
1014 glam_assert!(min <= max);
1015 let length_sq = self.length_squared();
1016 if length_sq < min * min {
1017 min * (self / math::sqrt(length_sq))
1018 } else if length_sq > max * max {
1019 max * (self / math::sqrt(length_sq))
1020 } else {
1021 self
1022 }
1023 }
1024
1025 #[inline]
1031 #[must_use]
1032 pub fn clamp_length_max(self, max: f32) -> Self {
1033 glam_assert!(0.0 <= max);
1034 let length_sq = self.length_squared();
1035 if length_sq > max * max {
1036 max * (self / math::sqrt(length_sq))
1037 } else {
1038 self
1039 }
1040 }
1041
1042 #[inline]
1048 #[must_use]
1049 pub fn clamp_length_min(self, min: f32) -> Self {
1050 glam_assert!(0.0 <= min);
1051 let length_sq = self.length_squared();
1052 if length_sq < min * min {
1053 min * (self / math::sqrt(length_sq))
1054 } else {
1055 self
1056 }
1057 }
1058
1059 #[inline]
1067 #[must_use]
1068 pub fn mul_add(self, a: Self, b: Self) -> Self {
1069 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
1070 }
1071
1072 #[inline]
1081 #[must_use]
1082 pub fn reflect(self, normal: Self) -> Self {
1083 glam_assert!(normal.is_normalized());
1084 self - 2.0 * self.dot(normal) * normal
1085 }
1086
1087 #[inline]
1097 #[must_use]
1098 pub fn refract(self, normal: Self, eta: f32) -> Self {
1099 glam_assert!(self.is_normalized());
1100 glam_assert!(normal.is_normalized());
1101 let n_dot_i = normal.dot(self);
1102 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1103 if k >= 0.0 {
1104 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1105 } else {
1106 Self::ZERO
1107 }
1108 }
1109
1110 #[inline]
1114 #[must_use]
1115 pub fn angle_between(self, rhs: Self) -> f32 {
1116 math::acos_approx(
1117 self.dot(rhs)
1118 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
1119 )
1120 }
1121
1122 #[inline]
1124 #[must_use]
1125 pub fn rotate_x(self, angle: f32) -> Self {
1126 let (sina, cosa) = math::sin_cos(angle);
1127 Self::new(
1128 self.x,
1129 self.y * cosa - self.z * sina,
1130 self.y * sina + self.z * cosa,
1131 )
1132 }
1133
1134 #[inline]
1136 #[must_use]
1137 pub fn rotate_y(self, angle: f32) -> Self {
1138 let (sina, cosa) = math::sin_cos(angle);
1139 Self::new(
1140 self.x * cosa + self.z * sina,
1141 self.y,
1142 self.x * -sina + self.z * cosa,
1143 )
1144 }
1145
1146 #[inline]
1148 #[must_use]
1149 pub fn rotate_z(self, angle: f32) -> Self {
1150 let (sina, cosa) = math::sin_cos(angle);
1151 Self::new(
1152 self.x * cosa - self.y * sina,
1153 self.x * sina + self.y * cosa,
1154 self.z,
1155 )
1156 }
1157
1158 #[inline]
1166 #[must_use]
1167 pub fn rotate_axis(self, axis: Self, angle: f32) -> Self {
1168 Quat::from_axis_angle(axis.into(), angle) * self
1169 }
1170
1171 #[inline]
1177 #[must_use]
1178 pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {
1179 let angle_between = self.angle_between(rhs);
1180 let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);
1182 let axis = self
1183 .cross(rhs)
1184 .try_normalize()
1185 .unwrap_or_else(|| self.any_orthogonal_vector().normalize());
1186 Quat::from_axis_angle(axis.into(), angle) * self
1187 }
1188
1189 #[inline]
1196 #[must_use]
1197 pub fn any_orthogonal_vector(self) -> Self {
1198 if math::abs(self.x) > math::abs(self.y) {
1200 Self::new(-self.z, 0.0, self.x) } else {
1202 Self::new(0.0, self.z, -self.y) }
1204 }
1205
1206 #[inline]
1214 #[must_use]
1215 pub fn any_orthonormal_vector(self) -> Self {
1216 glam_assert!(self.is_normalized());
1217 let sign = math::signum(self.z);
1219 let a = -1.0 / (sign + self.z);
1220 let b = self.x * self.y * a;
1221 Self::new(b, sign + self.y * self.y * a, -self.y)
1222 }
1223
1224 #[inline]
1231 #[must_use]
1232 pub fn any_orthonormal_pair(self) -> (Self, Self) {
1233 glam_assert!(self.is_normalized());
1234 let sign = math::signum(self.z);
1236 let a = -1.0 / (sign + self.z);
1237 let b = self.x * self.y * a;
1238 (
1239 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
1240 Self::new(b, sign + self.y * self.y * a, -self.y),
1241 )
1242 }
1243
1244 #[inline]
1250 #[must_use]
1251 pub fn slerp(self, rhs: Self, s: f32) -> Self {
1252 let self_length = self.length();
1253 let rhs_length = rhs.length();
1254 let dot = self.dot(rhs) / (self_length * rhs_length);
1256 if math::abs(dot) < 1.0 - 3e-7 {
1258 let theta = math::acos_approx(dot);
1260 let sin_theta = math::sin(theta);
1262 let t1 = math::sin(theta * (1. - s));
1263 let t2 = math::sin(theta * s);
1264
1265 let result_length = self_length.lerp(rhs_length, s);
1267 return (self * (result_length / self_length) * t1
1269 + rhs * (result_length / rhs_length) * t2)
1270 * sin_theta.recip();
1271 }
1272 if dot < 0.0 {
1273 let axis = self.any_orthogonal_vector().normalize().into();
1277 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1278 let result_length = self_length.lerp(rhs_length, s);
1280 rotation * self * (result_length / self_length)
1281 } else {
1282 self.lerp(rhs, s)
1284 }
1285 }
1286
1287 #[inline]
1289 #[must_use]
1290 pub fn as_dvec3(self) -> crate::DVec3 {
1291 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1292 }
1293
1294 #[inline]
1296 #[must_use]
1297 pub fn as_i8vec3(self) -> crate::I8Vec3 {
1298 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1299 }
1300
1301 #[inline]
1303 #[must_use]
1304 pub fn as_u8vec3(self) -> crate::U8Vec3 {
1305 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1306 }
1307
1308 #[inline]
1310 #[must_use]
1311 pub fn as_i16vec3(self) -> crate::I16Vec3 {
1312 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1313 }
1314
1315 #[inline]
1317 #[must_use]
1318 pub fn as_u16vec3(self) -> crate::U16Vec3 {
1319 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1320 }
1321
1322 #[inline]
1324 #[must_use]
1325 pub fn as_ivec3(self) -> crate::IVec3 {
1326 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1327 }
1328
1329 #[inline]
1331 #[must_use]
1332 pub fn as_uvec3(self) -> crate::UVec3 {
1333 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1334 }
1335
1336 #[inline]
1338 #[must_use]
1339 pub fn as_i64vec3(self) -> crate::I64Vec3 {
1340 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1341 }
1342
1343 #[inline]
1345 #[must_use]
1346 pub fn as_u64vec3(self) -> crate::U64Vec3 {
1347 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1348 }
1349
1350 #[inline]
1352 #[must_use]
1353 pub fn as_isizevec3(self) -> crate::ISizeVec3 {
1354 crate::ISizeVec3::new(self.x as isize, self.y as isize, self.z as isize)
1355 }
1356
1357 #[inline]
1359 #[must_use]
1360 pub fn as_usizevec3(self) -> crate::USizeVec3 {
1361 crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)
1362 }
1363}
1364
1365impl Default for Vec3A {
1366 #[inline(always)]
1367 fn default() -> Self {
1368 Self::ZERO
1369 }
1370}
1371
1372impl PartialEq for Vec3A {
1373 #[inline]
1374 fn eq(&self, rhs: &Self) -> bool {
1375 self.cmpeq(*rhs).all()
1376 }
1377}
1378
1379impl Div for Vec3A {
1380 type Output = Self;
1381 #[inline]
1382 fn div(self, rhs: Self) -> Self {
1383 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1384 }
1385}
1386
1387impl Div<&Self> for Vec3A {
1388 type Output = Self;
1389 #[inline]
1390 fn div(self, rhs: &Self) -> Self {
1391 self.div(*rhs)
1392 }
1393}
1394
1395impl Div<&Vec3A> for &Vec3A {
1396 type Output = Vec3A;
1397 #[inline]
1398 fn div(self, rhs: &Vec3A) -> Vec3A {
1399 (*self).div(*rhs)
1400 }
1401}
1402
1403impl Div<Vec3A> for &Vec3A {
1404 type Output = Vec3A;
1405 #[inline]
1406 fn div(self, rhs: Vec3A) -> Vec3A {
1407 (*self).div(rhs)
1408 }
1409}
1410
1411impl DivAssign for Vec3A {
1412 #[inline]
1413 fn div_assign(&mut self, rhs: Self) {
1414 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1415 }
1416}
1417
1418impl DivAssign<&Self> for Vec3A {
1419 #[inline]
1420 fn div_assign(&mut self, rhs: &Self) {
1421 self.div_assign(*rhs);
1422 }
1423}
1424
1425impl Div<f32> for Vec3A {
1426 type Output = Self;
1427 #[inline]
1428 fn div(self, rhs: f32) -> Self {
1429 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1430 }
1431}
1432
1433impl Div<&f32> for Vec3A {
1434 type Output = Self;
1435 #[inline]
1436 fn div(self, rhs: &f32) -> Self {
1437 self.div(*rhs)
1438 }
1439}
1440
1441impl Div<&f32> for &Vec3A {
1442 type Output = Vec3A;
1443 #[inline]
1444 fn div(self, rhs: &f32) -> Vec3A {
1445 (*self).div(*rhs)
1446 }
1447}
1448
1449impl Div<f32> for &Vec3A {
1450 type Output = Vec3A;
1451 #[inline]
1452 fn div(self, rhs: f32) -> Vec3A {
1453 (*self).div(rhs)
1454 }
1455}
1456
1457impl DivAssign<f32> for Vec3A {
1458 #[inline]
1459 fn div_assign(&mut self, rhs: f32) {
1460 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1461 }
1462}
1463
1464impl DivAssign<&f32> for Vec3A {
1465 #[inline]
1466 fn div_assign(&mut self, rhs: &f32) {
1467 self.div_assign(*rhs);
1468 }
1469}
1470
1471impl Div<Vec3A> for f32 {
1472 type Output = Vec3A;
1473 #[inline]
1474 fn div(self, rhs: Vec3A) -> Vec3A {
1475 Vec3A(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1476 }
1477}
1478
1479impl Div<&Vec3A> for f32 {
1480 type Output = Vec3A;
1481 #[inline]
1482 fn div(self, rhs: &Vec3A) -> Vec3A {
1483 self.div(*rhs)
1484 }
1485}
1486
1487impl Div<&Vec3A> for &f32 {
1488 type Output = Vec3A;
1489 #[inline]
1490 fn div(self, rhs: &Vec3A) -> Vec3A {
1491 (*self).div(*rhs)
1492 }
1493}
1494
1495impl Div<Vec3A> for &f32 {
1496 type Output = Vec3A;
1497 #[inline]
1498 fn div(self, rhs: Vec3A) -> Vec3A {
1499 (*self).div(rhs)
1500 }
1501}
1502
1503impl Mul for Vec3A {
1504 type Output = Self;
1505 #[inline]
1506 fn mul(self, rhs: Self) -> Self {
1507 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1508 }
1509}
1510
1511impl Mul<&Self> for Vec3A {
1512 type Output = Self;
1513 #[inline]
1514 fn mul(self, rhs: &Self) -> Self {
1515 self.mul(*rhs)
1516 }
1517}
1518
1519impl Mul<&Vec3A> for &Vec3A {
1520 type Output = Vec3A;
1521 #[inline]
1522 fn mul(self, rhs: &Vec3A) -> Vec3A {
1523 (*self).mul(*rhs)
1524 }
1525}
1526
1527impl Mul<Vec3A> for &Vec3A {
1528 type Output = Vec3A;
1529 #[inline]
1530 fn mul(self, rhs: Vec3A) -> Vec3A {
1531 (*self).mul(rhs)
1532 }
1533}
1534
1535impl MulAssign for Vec3A {
1536 #[inline]
1537 fn mul_assign(&mut self, rhs: Self) {
1538 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1539 }
1540}
1541
1542impl MulAssign<&Self> for Vec3A {
1543 #[inline]
1544 fn mul_assign(&mut self, rhs: &Self) {
1545 self.mul_assign(*rhs);
1546 }
1547}
1548
1549impl Mul<f32> for Vec3A {
1550 type Output = Self;
1551 #[inline]
1552 fn mul(self, rhs: f32) -> Self {
1553 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1554 }
1555}
1556
1557impl Mul<&f32> for Vec3A {
1558 type Output = Self;
1559 #[inline]
1560 fn mul(self, rhs: &f32) -> Self {
1561 self.mul(*rhs)
1562 }
1563}
1564
1565impl Mul<&f32> for &Vec3A {
1566 type Output = Vec3A;
1567 #[inline]
1568 fn mul(self, rhs: &f32) -> Vec3A {
1569 (*self).mul(*rhs)
1570 }
1571}
1572
1573impl Mul<f32> for &Vec3A {
1574 type Output = Vec3A;
1575 #[inline]
1576 fn mul(self, rhs: f32) -> Vec3A {
1577 (*self).mul(rhs)
1578 }
1579}
1580
1581impl MulAssign<f32> for Vec3A {
1582 #[inline]
1583 fn mul_assign(&mut self, rhs: f32) {
1584 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1585 }
1586}
1587
1588impl MulAssign<&f32> for Vec3A {
1589 #[inline]
1590 fn mul_assign(&mut self, rhs: &f32) {
1591 self.mul_assign(*rhs);
1592 }
1593}
1594
1595impl Mul<Vec3A> for f32 {
1596 type Output = Vec3A;
1597 #[inline]
1598 fn mul(self, rhs: Vec3A) -> Vec3A {
1599 Vec3A(unsafe { vmulq_n_f32(rhs.0, self) })
1600 }
1601}
1602
1603impl Mul<&Vec3A> for f32 {
1604 type Output = Vec3A;
1605 #[inline]
1606 fn mul(self, rhs: &Vec3A) -> Vec3A {
1607 self.mul(*rhs)
1608 }
1609}
1610
1611impl Mul<&Vec3A> for &f32 {
1612 type Output = Vec3A;
1613 #[inline]
1614 fn mul(self, rhs: &Vec3A) -> Vec3A {
1615 (*self).mul(*rhs)
1616 }
1617}
1618
1619impl Mul<Vec3A> for &f32 {
1620 type Output = Vec3A;
1621 #[inline]
1622 fn mul(self, rhs: Vec3A) -> Vec3A {
1623 (*self).mul(rhs)
1624 }
1625}
1626
1627impl Add for Vec3A {
1628 type Output = Self;
1629 #[inline]
1630 fn add(self, rhs: Self) -> Self {
1631 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1632 }
1633}
1634
1635impl Add<&Self> for Vec3A {
1636 type Output = Self;
1637 #[inline]
1638 fn add(self, rhs: &Self) -> Self {
1639 self.add(*rhs)
1640 }
1641}
1642
1643impl Add<&Vec3A> for &Vec3A {
1644 type Output = Vec3A;
1645 #[inline]
1646 fn add(self, rhs: &Vec3A) -> Vec3A {
1647 (*self).add(*rhs)
1648 }
1649}
1650
1651impl Add<Vec3A> for &Vec3A {
1652 type Output = Vec3A;
1653 #[inline]
1654 fn add(self, rhs: Vec3A) -> Vec3A {
1655 (*self).add(rhs)
1656 }
1657}
1658
1659impl AddAssign for Vec3A {
1660 #[inline]
1661 fn add_assign(&mut self, rhs: Self) {
1662 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1663 }
1664}
1665
1666impl AddAssign<&Self> for Vec3A {
1667 #[inline]
1668 fn add_assign(&mut self, rhs: &Self) {
1669 self.add_assign(*rhs);
1670 }
1671}
1672
1673impl Add<f32> for Vec3A {
1674 type Output = Self;
1675 #[inline]
1676 fn add(self, rhs: f32) -> Self {
1677 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1678 }
1679}
1680
1681impl Add<&f32> for Vec3A {
1682 type Output = Self;
1683 #[inline]
1684 fn add(self, rhs: &f32) -> Self {
1685 self.add(*rhs)
1686 }
1687}
1688
1689impl Add<&f32> for &Vec3A {
1690 type Output = Vec3A;
1691 #[inline]
1692 fn add(self, rhs: &f32) -> Vec3A {
1693 (*self).add(*rhs)
1694 }
1695}
1696
1697impl Add<f32> for &Vec3A {
1698 type Output = Vec3A;
1699 #[inline]
1700 fn add(self, rhs: f32) -> Vec3A {
1701 (*self).add(rhs)
1702 }
1703}
1704
1705impl AddAssign<f32> for Vec3A {
1706 #[inline]
1707 fn add_assign(&mut self, rhs: f32) {
1708 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1709 }
1710}
1711
1712impl AddAssign<&f32> for Vec3A {
1713 #[inline]
1714 fn add_assign(&mut self, rhs: &f32) {
1715 self.add_assign(*rhs);
1716 }
1717}
1718
1719impl Add<Vec3A> for f32 {
1720 type Output = Vec3A;
1721 #[inline]
1722 fn add(self, rhs: Vec3A) -> Vec3A {
1723 Vec3A(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1724 }
1725}
1726
1727impl Add<&Vec3A> for f32 {
1728 type Output = Vec3A;
1729 #[inline]
1730 fn add(self, rhs: &Vec3A) -> Vec3A {
1731 self.add(*rhs)
1732 }
1733}
1734
1735impl Add<&Vec3A> for &f32 {
1736 type Output = Vec3A;
1737 #[inline]
1738 fn add(self, rhs: &Vec3A) -> Vec3A {
1739 (*self).add(*rhs)
1740 }
1741}
1742
1743impl Add<Vec3A> for &f32 {
1744 type Output = Vec3A;
1745 #[inline]
1746 fn add(self, rhs: Vec3A) -> Vec3A {
1747 (*self).add(rhs)
1748 }
1749}
1750
1751impl Sub for Vec3A {
1752 type Output = Self;
1753 #[inline]
1754 fn sub(self, rhs: Self) -> Self {
1755 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1756 }
1757}
1758
1759impl Sub<&Self> for Vec3A {
1760 type Output = Self;
1761 #[inline]
1762 fn sub(self, rhs: &Self) -> Self {
1763 self.sub(*rhs)
1764 }
1765}
1766
1767impl Sub<&Vec3A> for &Vec3A {
1768 type Output = Vec3A;
1769 #[inline]
1770 fn sub(self, rhs: &Vec3A) -> Vec3A {
1771 (*self).sub(*rhs)
1772 }
1773}
1774
1775impl Sub<Vec3A> for &Vec3A {
1776 type Output = Vec3A;
1777 #[inline]
1778 fn sub(self, rhs: Vec3A) -> Vec3A {
1779 (*self).sub(rhs)
1780 }
1781}
1782
1783impl SubAssign for Vec3A {
1784 #[inline]
1785 fn sub_assign(&mut self, rhs: Self) {
1786 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1787 }
1788}
1789
1790impl SubAssign<&Self> for Vec3A {
1791 #[inline]
1792 fn sub_assign(&mut self, rhs: &Self) {
1793 self.sub_assign(*rhs);
1794 }
1795}
1796
1797impl Sub<f32> for Vec3A {
1798 type Output = Self;
1799 #[inline]
1800 fn sub(self, rhs: f32) -> Self {
1801 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1802 }
1803}
1804
1805impl Sub<&f32> for Vec3A {
1806 type Output = Self;
1807 #[inline]
1808 fn sub(self, rhs: &f32) -> Self {
1809 self.sub(*rhs)
1810 }
1811}
1812
1813impl Sub<&f32> for &Vec3A {
1814 type Output = Vec3A;
1815 #[inline]
1816 fn sub(self, rhs: &f32) -> Vec3A {
1817 (*self).sub(*rhs)
1818 }
1819}
1820
1821impl Sub<f32> for &Vec3A {
1822 type Output = Vec3A;
1823 #[inline]
1824 fn sub(self, rhs: f32) -> Vec3A {
1825 (*self).sub(rhs)
1826 }
1827}
1828
1829impl SubAssign<f32> for Vec3A {
1830 #[inline]
1831 fn sub_assign(&mut self, rhs: f32) {
1832 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1833 }
1834}
1835
1836impl SubAssign<&f32> for Vec3A {
1837 #[inline]
1838 fn sub_assign(&mut self, rhs: &f32) {
1839 self.sub_assign(*rhs);
1840 }
1841}
1842
1843impl Sub<Vec3A> for f32 {
1844 type Output = Vec3A;
1845 #[inline]
1846 fn sub(self, rhs: Vec3A) -> Vec3A {
1847 Vec3A(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1848 }
1849}
1850
1851impl Sub<&Vec3A> for f32 {
1852 type Output = Vec3A;
1853 #[inline]
1854 fn sub(self, rhs: &Vec3A) -> Vec3A {
1855 self.sub(*rhs)
1856 }
1857}
1858
1859impl Sub<&Vec3A> for &f32 {
1860 type Output = Vec3A;
1861 #[inline]
1862 fn sub(self, rhs: &Vec3A) -> Vec3A {
1863 (*self).sub(*rhs)
1864 }
1865}
1866
1867impl Sub<Vec3A> for &f32 {
1868 type Output = Vec3A;
1869 #[inline]
1870 fn sub(self, rhs: Vec3A) -> Vec3A {
1871 (*self).sub(rhs)
1872 }
1873}
1874
1875impl Rem for Vec3A {
1876 type Output = Self;
1877 #[inline]
1878 fn rem(self, rhs: Self) -> Self {
1879 unsafe {
1880 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1881 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1882 }
1883 }
1884}
1885
1886impl Rem<&Self> for Vec3A {
1887 type Output = Self;
1888 #[inline]
1889 fn rem(self, rhs: &Self) -> Self {
1890 self.rem(*rhs)
1891 }
1892}
1893
1894impl Rem<&Vec3A> for &Vec3A {
1895 type Output = Vec3A;
1896 #[inline]
1897 fn rem(self, rhs: &Vec3A) -> Vec3A {
1898 (*self).rem(*rhs)
1899 }
1900}
1901
1902impl Rem<Vec3A> for &Vec3A {
1903 type Output = Vec3A;
1904 #[inline]
1905 fn rem(self, rhs: Vec3A) -> Vec3A {
1906 (*self).rem(rhs)
1907 }
1908}
1909
1910impl RemAssign for Vec3A {
1911 #[inline]
1912 fn rem_assign(&mut self, rhs: Self) {
1913 *self = self.rem(rhs);
1914 }
1915}
1916
1917impl RemAssign<&Self> for Vec3A {
1918 #[inline]
1919 fn rem_assign(&mut self, rhs: &Self) {
1920 self.rem_assign(*rhs);
1921 }
1922}
1923
1924impl Rem<f32> for Vec3A {
1925 type Output = Self;
1926 #[inline]
1927 fn rem(self, rhs: f32) -> Self {
1928 self.rem(Self::splat(rhs))
1929 }
1930}
1931
1932impl Rem<&f32> for Vec3A {
1933 type Output = Self;
1934 #[inline]
1935 fn rem(self, rhs: &f32) -> Self {
1936 self.rem(*rhs)
1937 }
1938}
1939
1940impl Rem<&f32> for &Vec3A {
1941 type Output = Vec3A;
1942 #[inline]
1943 fn rem(self, rhs: &f32) -> Vec3A {
1944 (*self).rem(*rhs)
1945 }
1946}
1947
1948impl Rem<f32> for &Vec3A {
1949 type Output = Vec3A;
1950 #[inline]
1951 fn rem(self, rhs: f32) -> Vec3A {
1952 (*self).rem(rhs)
1953 }
1954}
1955
1956impl RemAssign<f32> for Vec3A {
1957 #[inline]
1958 fn rem_assign(&mut self, rhs: f32) {
1959 *self = self.rem(Self::splat(rhs));
1960 }
1961}
1962
1963impl RemAssign<&f32> for Vec3A {
1964 #[inline]
1965 fn rem_assign(&mut self, rhs: &f32) {
1966 self.rem_assign(*rhs);
1967 }
1968}
1969
1970impl Rem<Vec3A> for f32 {
1971 type Output = Vec3A;
1972 #[inline]
1973 fn rem(self, rhs: Vec3A) -> Vec3A {
1974 Vec3A::splat(self).rem(rhs)
1975 }
1976}
1977
1978impl Rem<&Vec3A> for f32 {
1979 type Output = Vec3A;
1980 #[inline]
1981 fn rem(self, rhs: &Vec3A) -> Vec3A {
1982 self.rem(*rhs)
1983 }
1984}
1985
1986impl Rem<&Vec3A> for &f32 {
1987 type Output = Vec3A;
1988 #[inline]
1989 fn rem(self, rhs: &Vec3A) -> Vec3A {
1990 (*self).rem(*rhs)
1991 }
1992}
1993
1994impl Rem<Vec3A> for &f32 {
1995 type Output = Vec3A;
1996 #[inline]
1997 fn rem(self, rhs: Vec3A) -> Vec3A {
1998 (*self).rem(rhs)
1999 }
2000}
2001
2002impl AsRef<[f32; 3]> for Vec3A {
2003 #[inline]
2004 fn as_ref(&self) -> &[f32; 3] {
2005 unsafe { &*(self as *const Self as *const [f32; 3]) }
2006 }
2007}
2008
2009impl AsMut<[f32; 3]> for Vec3A {
2010 #[inline]
2011 fn as_mut(&mut self) -> &mut [f32; 3] {
2012 unsafe { &mut *(self as *mut Self as *mut [f32; 3]) }
2013 }
2014}
2015
2016impl Sum for Vec3A {
2017 #[inline]
2018 fn sum<I>(iter: I) -> Self
2019 where
2020 I: Iterator<Item = Self>,
2021 {
2022 iter.fold(Self::ZERO, Self::add)
2023 }
2024}
2025
2026impl<'a> Sum<&'a Self> for Vec3A {
2027 #[inline]
2028 fn sum<I>(iter: I) -> Self
2029 where
2030 I: Iterator<Item = &'a Self>,
2031 {
2032 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
2033 }
2034}
2035
2036impl Product for Vec3A {
2037 #[inline]
2038 fn product<I>(iter: I) -> Self
2039 where
2040 I: Iterator<Item = Self>,
2041 {
2042 iter.fold(Self::ONE, Self::mul)
2043 }
2044}
2045
2046impl<'a> Product<&'a Self> for Vec3A {
2047 #[inline]
2048 fn product<I>(iter: I) -> Self
2049 where
2050 I: Iterator<Item = &'a Self>,
2051 {
2052 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
2053 }
2054}
2055
2056impl Neg for Vec3A {
2057 type Output = Self;
2058 #[inline]
2059 fn neg(self) -> Self {
2060 Self(unsafe { vnegq_f32(self.0) })
2061 }
2062}
2063
2064impl Neg for &Vec3A {
2065 type Output = Vec3A;
2066 #[inline]
2067 fn neg(self) -> Vec3A {
2068 (*self).neg()
2069 }
2070}
2071
2072impl Index<usize> for Vec3A {
2073 type Output = f32;
2074 #[inline]
2075 fn index(&self, index: usize) -> &Self::Output {
2076 match index {
2077 0 => &self.x,
2078 1 => &self.y,
2079 2 => &self.z,
2080 _ => panic!("index out of bounds"),
2081 }
2082 }
2083}
2084
2085impl IndexMut<usize> for Vec3A {
2086 #[inline]
2087 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
2088 match index {
2089 0 => &mut self.x,
2090 1 => &mut self.y,
2091 2 => &mut self.z,
2092 _ => panic!("index out of bounds"),
2093 }
2094 }
2095}
2096
2097impl fmt::Display for Vec3A {
2098 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2099 if let Some(p) = f.precision() {
2100 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
2101 } else {
2102 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
2103 }
2104 }
2105}
2106
2107impl fmt::Debug for Vec3A {
2108 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2109 fmt.debug_tuple(stringify!(Vec3A))
2110 .field(&self.x)
2111 .field(&self.y)
2112 .field(&self.z)
2113 .finish()
2114 }
2115}
2116
2117impl From<Vec3A> for float32x4_t {
2118 #[inline(always)]
2119 fn from(t: Vec3A) -> Self {
2120 t.0
2121 }
2122}
2123
2124impl From<float32x4_t> for Vec3A {
2125 #[inline(always)]
2126 fn from(t: float32x4_t) -> Self {
2127 Self(t)
2128 }
2129}
2130
2131impl From<[f32; 3]> for Vec3A {
2132 #[inline]
2133 fn from(a: [f32; 3]) -> Self {
2134 Self::new(a[0], a[1], a[2])
2135 }
2136}
2137
2138impl From<Vec3A> for [f32; 3] {
2139 #[inline]
2140 fn from(v: Vec3A) -> Self {
2141 use crate::align16::Align16;
2142 use core::mem::MaybeUninit;
2143 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2144 unsafe {
2145 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2146 out.assume_init().0
2147 }
2148 }
2149}
2150
2151impl From<(f32, f32, f32)> for Vec3A {
2152 #[inline]
2153 fn from(t: (f32, f32, f32)) -> Self {
2154 Self::new(t.0, t.1, t.2)
2155 }
2156}
2157
2158impl From<Vec3A> for (f32, f32, f32) {
2159 #[inline]
2160 fn from(v: Vec3A) -> Self {
2161 (v.x, v.y, v.z)
2162 }
2163}
2164
2165impl From<Vec3> for Vec3A {
2166 #[inline]
2167 fn from(v: Vec3) -> Self {
2168 Self::new(v.x, v.y, v.z)
2169 }
2170}
2171
2172impl From<Vec3A> for Vec3 {
2173 #[inline]
2174 fn from(v: Vec3A) -> Self {
2175 use crate::align16::Align16;
2176 use core::mem::MaybeUninit;
2177 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2178 unsafe {
2179 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2180 out.assume_init().0
2181 }
2182 }
2183}
2184
2185impl From<(Vec2, f32)> for Vec3A {
2186 #[inline]
2187 fn from((v, z): (Vec2, f32)) -> Self {
2188 Self::new(v.x, v.y, z)
2189 }
2190}
2191
2192impl Deref for Vec3A {
2193 type Target = crate::deref::Vec3<f32>;
2194 #[inline]
2195 fn deref(&self) -> &Self::Target {
2196 unsafe { &*(self as *const Self).cast() }
2197 }
2198}
2199
2200impl DerefMut for Vec3A {
2201 #[inline]
2202 fn deref_mut(&mut self) -> &mut Self::Target {
2203 unsafe { &mut *(self as *mut Self).cast() }
2204 }
2205}
2206
2207impl From<BVec3> for Vec3A {
2208 #[inline]
2209 fn from(v: BVec3) -> Self {
2210 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
2211 }
2212}
2213
2214impl From<BVec3A> for Vec3A {
2215 #[inline]
2216 fn from(v: BVec3A) -> Self {
2217 let bool_array: [bool; 3] = v.into();
2218 Self::new(
2219 f32::from(bool_array[0]),
2220 f32::from(bool_array[1]),
2221 f32::from(bool_array[2]),
2222 )
2223 }
2224}