1#[allow(unused)] use std::mem;
3
4use crate::*;
5
6#[cfg(_XM_NO_INTRINSICS_)]
7macro_rules! XMISNAN {
8 ($x:expr) => {
9 $x.is_nan()
10 }
11}
12
13#[cfg(_XM_NO_INTRINSICS_)]
14macro_rules! XMISINF {
15 ($x:expr) => {
16 $x.is_infinite()
17 }
18}
19
20#[cfg(_XM_SSE_INTRINSICS_)]
23macro_rules! XM3UNPACK3INTO4 {
24 ($l1:expr, $l2:expr, $l3:expr) => {
25 let V3: XMVECTOR = _mm_shuffle_ps($l2, $l3, _MM_SHUFFLE(0, 0, 3, 2));
26 let mut V2: XMVECTOR = _mm_shuffle_ps($l2, $l1, _MM_SHUFFLE(3, 3, 1, 0));
27 V2 = XM_PERMUTE_PS!(V2, _MM_SHUFFLE!(1, 1, 0, 2));
28 let V4: XMVECTOR = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128($l3), 32 / 8));
31 (V2, V3, V4)
34 }
35}
36
37#[cfg(_XM_SSE_INTRINSICS_)]
38macro_rules! XM3PACK4INTO3 {
39 ($V1:expr, $V2:expr, $V3:expr, $V4:expr) => {
40 let v2x: let = _mm_shuffle_ps(V2, V3, _MM_SHUFFLE(1, 0, 2, 1));
41 V2 = _mm_shuffle_ps(V2, V1, _MM_SHUFFLE(2, 2, 0, 0));
42 V1 = _mm_shuffle_ps(V1, V2, _MM_SHUFFLE(0, 2, 1, 0));
43 V3 = _mm_shuffle_ps(V3, V4, _MM_SHUFFLE(0, 0, 2, 2));
44 V3 = _mm_shuffle_ps(V3, V4, _MM_SHUFFLE(2, 1, 2, 0));
45 (v2x, V1, V3)
49 }
50}
51
52#[inline]
58pub fn XMVectorZero() -> XMVECTOR {
59 #[cfg(_XM_NO_INTRINSICS_)]
60 unsafe {
61 let vResult = XMVECTORF32 { f: [0.0, 0.0, 0.0, 0.0] };
62 return vResult.v;
63 }
64
65 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
66 {
67 unimplemented!()
68 }
69
70 #[cfg(_XM_SSE_INTRINSICS_)]
71 unsafe {
72 return _mm_setzero_ps();
73 }
74}
75
76#[inline]
80pub fn XMVectorSet(
81 x: f32,
82 y: f32,
83 z: f32,
84 w: f32,
85) -> XMVECTOR {
86 #[cfg(_XM_NO_INTRINSICS_)]
87 unsafe {
88 let vResult = XMVECTORF32 { f: [x, y, z, w] };
89 return vResult.v;
90 }
91
92 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
93 {
94 unimplemented!()
95 }
96
97 #[cfg(_XM_SSE_INTRINSICS_)]
98 unsafe {
99 return _mm_set_ps(w, z, y, x);
100 }
101}
102
103#[inline]
107pub fn XMVectorSetInt(
108 x: u32,
109 y: u32,
110 z: u32,
111 w: u32,
112) -> XMVECTOR {
113 #[cfg(_XM_NO_INTRINSICS_)]
114 unsafe {
115 let vResult = XMVECTORU32 { u: [x, y, z, w] };
116 return vResult.v;
117 }
118
119 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
120 {
121 unimplemented!()
122 }
123
124 #[cfg(_XM_SSE_INTRINSICS_)]
125 unsafe {
126 let V: __m128i = _mm_set_epi32(w as i32, z as i32, y as i32, x as i32);
127 return _mm_castsi128_ps(V);
128 }
129}
130
131#[inline]
135pub fn XMVectorReplicate(Value: f32) -> XMVECTOR {
136 #[cfg(_XM_NO_INTRINSICS_)]
137 unsafe {
138 let mut vResult: XMVECTORF32 = crate::undefined();
139 vResult.f[0] = Value;
140 vResult.f[1] = Value;
141 vResult.f[2] = Value;
142 vResult.f[3] = Value;
143 return vResult.v;
144 }
145
146 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
147 {
148 unimplemented!()
149 }
150
151 #[cfg(_XM_SSE_INTRINSICS_)]
152 unsafe {
153 return _mm_set_ps1(Value);
154 }
155}
156
157#[inline]
161pub fn XMVectorReplicatePtr(pValue: &f32) -> XMVECTOR {
162 #[cfg(_XM_NO_INTRINSICS_)]
163 unsafe {
164 let mut vResult: XMVECTORF32 = crate::undefined();
165 let Value = *pValue;
166 vResult.f[0] = Value;
167 vResult.f[1] = Value;
168 vResult.f[2] = Value;
169 vResult.f[3] = Value;
170 return vResult.v;
171 }
172
173 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
174 {
175 unimplemented!()
176 }
177
178 #[cfg(_XM_AVX_INTRINSICS_)]
179 unsafe {
180 return _mm_broadcast_ss(pValue);
181 }
182
183 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_AVX_INTRINSICS_)))]
184 unsafe {
185 return _mm_load_ps1(pValue);
186 }
187}
188
189#[inline]
193pub fn XMVectorReplicateInt(Value: u32) -> XMVECTOR {
194 #[cfg(_XM_NO_INTRINSICS_)]
195 unsafe {
196 let mut vResult: XMVECTORU32 = crate::undefined();
197 vResult.u[0] = Value;
198 vResult.u[1] = Value;
199 vResult.u[2] = Value;
200 vResult.u[3] = Value;
201 return vResult.v;
202 }
203
204 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
205 {
206 unimplemented!()
207 }
208
209 #[cfg(_XM_SSE_INTRINSICS_)]
210 unsafe {
211 let vTemp: __m128i = _mm_set1_epi32(Value as i32);
212 return _mm_castsi128_ps(vTemp);
213 }
214}
215
216#[inline]
222pub fn XMVectorTrueInt() -> XMVECTOR {
223 #[cfg(_XM_NO_INTRINSICS_)]
224 unsafe {
225 let vResult = XMVECTORU32 { u: [0xFFFFFFFFu32, 0xFFFFFFFFu32, 0xFFFFFFFFu32, 0xFFFFFFFFu32] };
226 return vResult.v;
227 }
228
229 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
230 {
231 unimplemented!()
232 }
233
234 #[cfg(_XM_SSE_INTRINSICS_)]
235 unsafe {
236 let V: __m128i = _mm_set1_epi32(-1);
237 return _mm_castsi128_ps(V);
238 }
239}
240
241#[inline]
245pub fn XMVectorFalseInt() -> XMVECTOR {
246 #[cfg(_XM_NO_INTRINSICS_)]
247 unsafe {
248 let vResult = XMVECTORF32 { f: [0.0, 0.0, 0.0, 0.0] };
249 return vResult.v;
250 }
251
252 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
253 {
254 unimplemented!()
255 }
256
257 #[cfg(_XM_SSE_INTRINSICS_)]
258 unsafe {
259 return _mm_setzero_ps();
260 }
261}
262
263#[inline]
277pub fn XMVectorSplatX(V: FXMVECTOR) -> XMVECTOR {
278 #[cfg(_XM_NO_INTRINSICS_)]
279 unsafe {
280 let mut vResult: XMVECTORF32 = crate::undefined();
281 vResult.f[0] = V.vector4_f32[0];
282 vResult.f[1] = V.vector4_f32[0];
283 vResult.f[2] = V.vector4_f32[0];
284 vResult.f[3] = V.vector4_f32[0];
285 return vResult.v;
286 }
287
288 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
289 {
290 unimplemented!()
291 }
292
293 #[cfg(all(_XM_AVX2_INTRINSICS_, _XM_FAVOR_INTEL_))]
294 unsafe {
295 return _mm_broadcastss_ps(V);
296 }
297
298 #[cfg(all(_XM_SSE_INTRINSICS_, not(all(_XM_AVX2_INTRINSICS_, _XM_FAVOR_INTEL_))))]
299 unsafe {
300 return XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 0, 0, 0));
301 }
302}
303
304#[inline]
318pub fn XMVectorSplatY(V: FXMVECTOR) -> XMVECTOR {
319 #[cfg(_XM_NO_INTRINSICS_)]
320 unsafe {
321 let mut vResult: XMVECTORF32 = crate::undefined();
322 vResult.f[0] = V.vector4_f32[1];
323 vResult.f[1] = V.vector4_f32[1];
324 vResult.f[2] = V.vector4_f32[1];
325 vResult.f[3] = V.vector4_f32[1];
326 return vResult.v;
327 }
328
329 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
330 {
331 unimplemented!()
332 }
333
334 #[cfg(_XM_SSE_INTRINSICS_)]
335 unsafe {
336 return XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1));
337 }
338}
339
340#[inline]
354pub fn XMVectorSplatZ(V: FXMVECTOR) -> XMVECTOR {
355 #[cfg(_XM_NO_INTRINSICS_)]
356 unsafe {
357 let mut vResult: XMVECTORF32 = crate::undefined();
358 vResult.f[0] = V.vector4_f32[2];
359 vResult.f[1] = V.vector4_f32[2];
360 vResult.f[2] = V.vector4_f32[2];
361 vResult.f[3] = V.vector4_f32[2];
362 return vResult.v;
363 }
364
365 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
366 {
367 unimplemented!()
368 }
369
370 #[cfg(_XM_SSE_INTRINSICS_)]
371 unsafe {
372 return XM_PERMUTE_PS!(V, _MM_SHUFFLE(2, 2, 2, 2));
373 }
374}
375
376#[inline]
390pub fn XMVectorSplatW(V: FXMVECTOR) -> XMVECTOR {
391 #[cfg(_XM_NO_INTRINSICS_)]
392 unsafe {
393 let mut vResult: XMVECTORF32 = crate::undefined();
394 vResult.f[0] = V.vector4_f32[3];
395 vResult.f[1] = V.vector4_f32[3];
396 vResult.f[2] = V.vector4_f32[3];
397 vResult.f[3] = V.vector4_f32[3];
398 return vResult.v;
399 }
400
401 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
402 {
403 unimplemented!()
404 }
405
406 #[cfg(_XM_SSE_INTRINSICS_)]
407 unsafe {
408 return XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 3, 3, 3));
409 }
410}
411
412#[inline]
422pub fn XMVectorSplatOne() -> XMVECTOR {
423 #[cfg(_XM_NO_INTRINSICS_)]
424 unsafe {
425 let mut vResult: XMVECTORF32 = crate::undefined();
426 vResult.f[0] = 1.0;
427 vResult.f[1] = 1.0;
428 vResult.f[2] = 1.0;
429 vResult.f[3] = 1.0;
430 return vResult.v;
431 }
432
433 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
434 {
435 unimplemented!()
436 }
437
438 #[cfg(_XM_SSE_INTRINSICS_)]
439 unsafe {
440 return g_XMOne.v;
441 }
442}
443
444#[inline]
454pub fn XMVectorSplatInfinity() -> XMVECTOR {
455 #[cfg(_XM_NO_INTRINSICS_)]
456 unsafe {
457 let mut vResult: XMVECTORU32 = crate::undefined();
458 vResult.u[0] = 0x7F800000;
459 vResult.u[1] = 0x7F800000;
460 vResult.u[2] = 0x7F800000;
461 vResult.u[3] = 0x7F800000;
462 return vResult.v;
463 }
464
465 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
466 {
467 unimplemented!()
468 }
469
470 #[cfg(_XM_SSE_INTRINSICS_)]
471 unsafe {
472 return g_XMInfinity.v;
473 }
474}
475
476#[inline]
486pub fn XMVectorSplatQNaN() -> XMVECTOR {
487 #[cfg(_XM_NO_INTRINSICS_)]
488 unsafe {
489 let mut vResult: XMVECTORU32 = crate::undefined();
490 vResult.u[0] = 0x7FC00000;
491 vResult.u[1] = 0x7FC00000;
492 vResult.u[2] = 0x7FC00000;
493 vResult.u[3] = 0x7FC00000;
494 return vResult.v;
495 }
496
497 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
498 {
499 unimplemented!()
500 }
501
502 #[cfg(_XM_SSE_INTRINSICS_)]
503 unsafe {
504 return g_XMQNaN.v;
505 }
506}
507
508#[inline]
518pub fn XMVectorSplatEpsilon() -> XMVECTOR {
519 #[cfg(_XM_NO_INTRINSICS_)]
520 unsafe {
521 let mut vResult: XMVECTORU32 = crate::undefined();
522 vResult.u[0] = 0x34000000;
523 vResult.u[1] = 0x34000000;
524 vResult.u[2] = 0x34000000;
525 vResult.u[3] = 0x34000000;
526 return vResult.v;
527 }
528
529 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
530 {
531 unimplemented!()
532 }
533
534 #[cfg(_XM_SSE_INTRINSICS_)]
535 unsafe {
536 return g_XMEpsilon.v;
537 }
538}
539
540#[inline]
544pub fn XMVectorSplatSignMask() -> XMVECTOR {
545 #[cfg(_XM_NO_INTRINSICS_)]
546 unsafe {
547 let mut vResult: XMVECTORU32 = crate::undefined();
548 vResult.u[0] = 0x80000000u32;
549 vResult.u[1] = 0x80000000u32;
550 vResult.u[2] = 0x80000000u32;
551 vResult.u[3] = 0x80000000u32;
552 return vResult.v;
553 }
554
555 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
556 {
557 unimplemented!()
558 }
559
560 #[cfg(_XM_SSE_INTRINSICS_)]
561 unsafe {
562 let V: __m128i = _mm_set1_epi32(0x80000000u32 as i32);
563 return _mm_castsi128_ps(V);
564 }
565}
566
567#[inline]
571pub fn XMVectorGetByIndex(V: XMVECTOR, i: usize) -> f32 {
572 debug_assert!(i < 4);
573
574 #[cfg(_XM_NO_INTRINSICS_)]
575 unsafe {
576 return V.vector4_f32[i];
577 }
578
579 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
580 {
581 unimplemented!()
582 }
583
584 #[cfg(_XM_SSE_INTRINSICS_)]
585 unsafe {
586 let mut U: XMVECTORF32 = crate::undefined();
587 U.v = V;
588 return U.f[i];
589 }
590}
591
592#[inline]
606pub fn XMVectorGetX(V: XMVECTOR) -> f32 {
607 #[cfg(_XM_NO_INTRINSICS_)]
608 unsafe {
609 return V.vector4_f32[0];
610 }
611
612 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
613 {
614 unimplemented!()
615 }
616
617 #[cfg(_XM_SSE_INTRINSICS_)]
618 unsafe {
619 return _mm_cvtss_f32(V);
620 }
621}
622
623#[inline]
637pub fn XMVectorGetY(V: XMVECTOR) -> f32 {
638 #[cfg(_XM_NO_INTRINSICS_)]
639 unsafe {
640 return V.vector4_f32[1];
641 }
642
643 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
644 {
645 unimplemented!()
646 }
647
648 #[cfg(_XM_SSE_INTRINSICS_)]
649 unsafe {
650 let vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1));
651 return _mm_cvtss_f32(vTemp);
652 }
653}
654
655#[inline]
669pub fn XMVectorGetZ(V: XMVECTOR) -> f32 {
670 #[cfg(_XM_NO_INTRINSICS_)]
671 unsafe {
672 return V.vector4_f32[2];
673 }
674
675 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
676 {
677 unimplemented!()
678 }
679
680 #[cfg(_XM_SSE_INTRINSICS_)]
681 unsafe {
682 let vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(2, 2, 2, 2));
683 return _mm_cvtss_f32(vTemp);
684 }
685}
686
687#[inline]
701pub fn XMVectorGetW(V: XMVECTOR) -> f32 {
702 #[cfg(_XM_NO_INTRINSICS_)]
703 unsafe {
704 return V.vector4_f32[3];
705 }
706
707 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
708 {
709 unimplemented!()
710 }
711
712 #[cfg(_XM_SSE_INTRINSICS_)]
713 unsafe {
714 let vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 3, 3, 3));
715 return _mm_cvtss_f32(vTemp);
716 }
717}
718
719#[inline]
726pub fn XMVectorGetXPtr(
727 x: &mut f32,
728 V: FXMVECTOR,
729)
730{
731 #[cfg(_XM_NO_INTRINSICS_)]
732 unsafe {
733 *x = V.vector4_f32[0];
734 }
735
736 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
737 {
738 unimplemented!()
739 }
740
741 #[cfg(_XM_SSE_INTRINSICS_)]
742 unsafe {
743 _mm_store_ss(x, V);
744 }
745}
746
747#[inline]
752pub fn XMVectorGetYPtr(
753 y: &mut f32,
754 V: FXMVECTOR,
755)
756{
757 #[cfg(_XM_NO_INTRINSICS_)]
758 unsafe {
759 *y = V.vector4_f32[1];
760 }
761
762 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
763 {
764 unimplemented!()
765 }
766
767 #[cfg(_XM_SSE4_INTRINSICS_)]
768 unsafe {
769 *mem::transmute::<_, *mut i32>(y) = _mm_extract_ps(V, 1);
770 }
771
772 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
773 unsafe {
774 let vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1));
775 _mm_store_ss(y, vResult);
776 }
777}
778
779#[inline]
784pub fn XMVectorGetZPtr(
785 z: &mut f32,
786 V: FXMVECTOR,
787)
788{
789 #[cfg(_XM_NO_INTRINSICS_)]
790 unsafe {
791 *z = V.vector4_f32[2];
792 }
793
794 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
795 {
796 unimplemented!()
797 }
798
799 #[cfg(_XM_SSE4_INTRINSICS_)]
800 unsafe {
801 *mem::transmute::<_, *mut i32>(z) = _mm_extract_ps(V, 2);
802 }
803
804 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
805 unsafe {
806 let vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(2, 2, 2, 2));
807 _mm_store_ss(z, vResult);
808 }
809}
810
811#[inline]
816pub fn XMVectorGetWPtr(
817 w: &mut f32,
818 V: FXMVECTOR,
819)
820{
821 #[cfg(_XM_NO_INTRINSICS_)]
822 unsafe {
823 *w = V.vector4_f32[3];
824 }
825
826 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
827 {
828 unimplemented!()
829 }
830
831 #[cfg(_XM_SSE4_INTRINSICS_)]
832 unsafe {
833 *mem::transmute::<_, *mut i32>(w) = _mm_extract_ps(V, 3);
834 }
835
836 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
837 unsafe {
838 let vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 3, 3, 3));
839 _mm_store_ss(w, vResult);
840 }
841}
842
843#[inline]
847pub fn XMVectorGetIntByIndex(V: XMVECTOR, i: usize) -> u32 {
848 debug_assert!(i < 4);
849
850 #[cfg(_XM_NO_INTRINSICS_)]
851 unsafe {
852 return V.vector4_u32[i];
853 }
854
855 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
856 {
857 unimplemented!()
858 }
859
860 #[cfg(_XM_SSE_INTRINSICS_)]
861 unsafe {
862 let mut U: XMVECTORU32 = crate::undefined();
863 U.v = V;
864 return U.u[i];
865 }
866}
867
868#[inline]
882pub fn XMVectorGetIntX(V: XMVECTOR) -> u32 {
883 #[cfg(_XM_NO_INTRINSICS_)]
884 unsafe {
885 return V.vector4_u32[0];
886 }
887
888 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
889 {
890 unimplemented!()
891 }
892
893 #[cfg(_XM_SSE_INTRINSICS_)]
894 unsafe {
895 return (_mm_cvtsi128_si32(_mm_castps_si128(V))) as u32;
896 }
897}
898
899#[inline]
913pub fn XMVectorGetIntY(V: XMVECTOR) -> u32 {
914 #[cfg(_XM_NO_INTRINSICS_)]
915 unsafe {
916 return V.vector4_u32[1];
917 }
918
919 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
920 {
921 unimplemented!()
922 }
923
924 #[cfg(_XM_SSE4_INTRINSICS_)]
925 unsafe {
926 let V1: __m128i = _mm_castps_si128(V);
927 return (_mm_extract_epi32(V1, 1)) as u32;
928 }
929
930 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
931 unsafe {
932 let vResulti: __m128i = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(1, 1, 1, 1));
933 return (_mm_cvtsi128_si32(vResulti)) as u32;
934 }
935}
936
937#[inline]
951pub fn XMVectorGetIntZ(V: XMVECTOR) -> u32 {
952 #[cfg(_XM_NO_INTRINSICS_)]
953 unsafe {
954 return V.vector4_u32[2];
955 }
956
957 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
958 {
959 unimplemented!()
960 }
961
962 #[cfg(_XM_SSE4_INTRINSICS_)]
963 unsafe {
964 let V1: __m128i = _mm_castps_si128(V);
965 return (_mm_extract_epi32(V1, 2)) as u32;
966 }
967
968 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
969 unsafe {
970 let vResulti: __m128i = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(2, 2, 2, 2));
971 return (_mm_cvtsi128_si32(vResulti)) as u32;
972 }
973}
974
975#[inline]
989pub fn XMVectorGetIntW(V: XMVECTOR) -> u32 {
990 #[cfg(_XM_NO_INTRINSICS_)]
991 unsafe {
992 return V.vector4_u32[3];
993 }
994
995 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
996 {
997 unimplemented!()
998 }
999
1000 #[cfg(_XM_SSE4_INTRINSICS_)]
1001 unsafe {
1002 let V1: __m128i = _mm_castps_si128(V);
1003 return (_mm_extract_epi32(V1, 3)) as u32;
1004 }
1005
1006 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1007 unsafe {
1008 let vResulti: __m128i = _mm_shuffle_epi32(_mm_castps_si128(V), _MM_SHUFFLE(3, 3, 3, 3));
1009 return (_mm_cvtsi128_si32(vResulti)) as u32;
1010 }
1011}
1012
1013#[inline]
1023pub fn XMVectorSetByIndex(V: XMVECTOR, f: f32, i: usize) -> XMVECTOR {
1024 debug_assert!(i < 4);
1025
1026 unsafe {
1027 let mut U: XMVECTORF32 = crate::undefined();
1028 U.v = V;
1029 U.f[i] = f;
1030 return U.v;
1031 }
1032}
1033
1034#[inline]
1052pub fn XMVectorSetX(V: XMVECTOR, x: f32) -> XMVECTOR {
1053 #[cfg(_XM_NO_INTRINSICS_)]
1054 unsafe {
1055 let U = XMVECTORF32 {
1056 f: [
1057 x,
1058 V.vector4_f32[1],
1059 V.vector4_f32[2],
1060 V.vector4_f32[3]
1061 ]
1062 };
1063 return U.v;
1064 }
1065
1066 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1067 {
1068 unimplemented!()
1069 }
1070
1071 #[cfg(_XM_SSE_INTRINSICS_)]
1072 unsafe {
1073 let mut vResult: XMVECTOR = _mm_set_ss(x);
1074 vResult = _mm_move_ss(V, vResult);
1075 return vResult;
1076 }
1077}
1078
1079#[inline]
1097pub fn XMVectorSetY(V: XMVECTOR, y: f32) -> XMVECTOR {
1098 #[cfg(_XM_NO_INTRINSICS_)]
1099 unsafe {
1100 let U = XMVECTORF32 {
1101 f: [
1102 V.vector4_f32[0],
1103 y,
1104 V.vector4_f32[2],
1105 V.vector4_f32[3]
1106 ]
1107 };
1108 return U.v;
1109 }
1110
1111 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1112 {
1113 unimplemented!()
1114 }
1115
1116 #[cfg(_XM_SSE4_INTRINSICS_)]
1117 unsafe {
1118 let mut vResult: XMVECTOR = _mm_set_ss(y);
1119 vResult = _mm_insert_ps(V, vResult, 0x10);
1120 return vResult;
1121 }
1122
1123 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1124 unsafe {
1125 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 2, 0, 1));
1127 let vTemp: XMVECTOR = _mm_set_ss(y);
1129 vResult = _mm_move_ss(vResult, vTemp);
1131 vResult = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(3, 2, 0, 1));
1133 return vResult;
1134 }
1135}
1136
1137#[inline]
1155pub fn XMVectorSetZ(V: XMVECTOR, z: f32) -> XMVECTOR {
1156 #[cfg(_XM_NO_INTRINSICS_)]
1157 unsafe {
1158 let U = XMVECTORF32 {
1159 f: [
1160 V.vector4_f32[0],
1161 V.vector4_f32[1],
1162 z,
1163 V.vector4_f32[3]
1164 ]
1165 };
1166 return U.v;
1167 }
1168
1169 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1170 {
1171 unimplemented!()
1172 }
1173
1174 #[cfg(_XM_SSE4_INTRINSICS_)]
1175 unsafe {
1176 let mut vResult: XMVECTOR = _mm_set_ss(z);
1177 vResult = _mm_insert_ps(V, vResult, 0x20);
1178 return vResult;
1179 }
1180
1181 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1182 unsafe {
1183 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 0, 1, 2));
1185 let vTemp: XMVECTOR = _mm_set_ss(z);
1187 vResult = _mm_move_ss(vResult, vTemp);
1189 vResult = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(3, 0, 1, 2));
1191 return vResult;
1192 }
1193}
1194
1195#[inline]
1213pub fn XMVectorSetW(V: XMVECTOR, w: f32) -> XMVECTOR {
1214 #[cfg(_XM_NO_INTRINSICS_)]
1215 unsafe {
1216 let U = XMVECTORF32 {
1217 f: [
1218 V.vector4_f32[0],
1219 V.vector4_f32[1],
1220 V.vector4_f32[2],
1221 w,
1222 ]
1223 };
1224 return U.v;
1225 }
1226
1227 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1228 {
1229 unimplemented!()
1230 }
1231
1232 #[cfg(_XM_SSE4_INTRINSICS_)]
1233 unsafe {
1234 let mut vResult: XMVECTOR = _mm_set_ss(w);
1235 vResult = _mm_insert_ps(V, vResult, 0x30);
1236 return vResult;
1237 }
1238
1239 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1240 unsafe {
1241 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 2, 1, 3));
1243 let vTemp: XMVECTOR = _mm_set_ss(w);
1245 vResult = _mm_move_ss(vResult, vTemp);
1247 vResult = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(0, 2, 1, 3));
1249 return vResult;
1250 }
1251}
1252
1253#[inline]
1263pub fn XMVectorSetIntByIndex(V: XMVECTOR, x: u32, i: usize) -> XMVECTOR {
1264 unsafe {
1267 let mut U: XMVECTORU32 = crate::undefined();
1268 U.v = V;
1269 U.u[i] = x;
1270 return U.v;
1271 }
1272}
1273
1274#[inline]
1288pub fn XMVectorSetIntX(V: XMVECTOR, x: u32) -> XMVECTOR {
1289 #[cfg(_XM_NO_INTRINSICS_)]
1290 unsafe {
1291 let U = XMVECTORU32 {
1292 u: [
1293 x,
1294 V.vector4_u32[1],
1295 V.vector4_u32[2],
1296 V.vector4_u32[3]
1297 ]
1298 };
1299 return U.v;
1300 }
1301
1302 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1303 {
1304 unimplemented!()
1305 }
1306
1307 #[cfg(_XM_SSE_INTRINSICS_)]
1308 unsafe {
1309 let vTemp: __m128i = _mm_cvtsi32_si128(x as i32);
1310 let vResult: XMVECTOR = _mm_move_ss(V, _mm_castsi128_ps(vTemp));
1311 return vResult;
1312 }
1313}
1314
1315#[inline]
1329pub fn XMVectorSetIntY(V: XMVECTOR, y: u32) -> XMVECTOR {
1330 #[cfg(_XM_NO_INTRINSICS_)]
1331 unsafe {
1332 let U = XMVECTORU32 {
1333 u: [
1334 V.vector4_u32[0],
1335 y,
1336 V.vector4_u32[2],
1337 V.vector4_u32[3]
1338 ]
1339 };
1340 return U.v;
1341 }
1342
1343 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1344 {
1345 unimplemented!()
1346 }
1347
1348 #[cfg(_XM_SSE4_INTRINSICS_)]
1349 unsafe {
1350 let mut vResult: __m128i = _mm_castps_si128(V);
1351 vResult = _mm_insert_epi32(vResult, y as i32, 1);
1352 return _mm_castsi128_ps(vResult);
1353 }
1354
1355 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1356 unsafe {
1357 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 2, 0, 1));
1359 let vTemp: __m128i = _mm_cvtsi32_si128(y as i32);
1361 vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp));
1363 vResult = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(3, 2, 0, 1));
1365 return vResult;
1366 }
1367}
1368
1369#[inline]
1383pub fn XMVectorSetIntZ(V: XMVECTOR, z: u32) -> XMVECTOR {
1384 #[cfg(_XM_NO_INTRINSICS_)]
1385 unsafe {
1386 let U = XMVECTORU32 {
1387 u: [
1388 V.vector4_u32[0],
1389 V.vector4_u32[1],
1390 z,
1391 V.vector4_u32[3]
1392 ]
1393 };
1394 return U.v;
1395 }
1396
1397 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1398 {
1399 unimplemented!()
1400 }
1401
1402 #[cfg(_XM_SSE4_INTRINSICS_)]
1403 unsafe {
1404 let mut vResult: __m128i = _mm_castps_si128(V);
1405 vResult = _mm_insert_epi32(vResult, z as i32, 2);
1406 return _mm_castsi128_ps(vResult);
1407 }
1408
1409 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1410 unsafe {
1411 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 0, 1, 2));
1413 let vTemp: __m128i = _mm_cvtsi32_si128(z as i32);
1415 vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp));
1417 vResult = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(3, 0, 1, 2));
1419 return vResult;
1420 }
1421}
1422
1423#[inline]
1437pub fn XMVectorSetIntW(V: XMVECTOR, w: u32) -> XMVECTOR {
1438 #[cfg(_XM_NO_INTRINSICS_)]
1439 unsafe {
1440 let U = XMVECTORU32 {
1441 u: [
1442 V.vector4_u32[0],
1443 V.vector4_u32[1],
1444 V.vector4_u32[2],
1445 w,
1446 ]
1447 };
1448 return U.v;
1449 }
1450
1451 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1452 {
1453 unimplemented!()
1454 }
1455
1456 #[cfg(_XM_SSE4_INTRINSICS_)]
1457 unsafe {
1458 let mut vResult: __m128i = _mm_castps_si128(V);
1459 vResult = _mm_insert_epi32(vResult, w as i32, 3);
1460 return _mm_castsi128_ps(vResult);
1461 }
1462
1463 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1464 unsafe {
1465 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 2, 1, 3));
1467 let vTemp: __m128i = _mm_cvtsi32_si128(w as i32);
1469 vResult = _mm_move_ss(vResult, _mm_castsi128_ps(vTemp));
1471 vResult = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(0, 2, 1, 3));
1473 return vResult;
1474 }
1475}
1476
1477#[inline]
1540pub fn XMVectorSwizzle(
1541 V: FXMVECTOR,
1542 E0: u32,
1543 E1: u32,
1544 E2: u32,
1545 E3: u32
1546) -> XMVECTOR
1547{
1548 debug_assert!((E0 < 4) && (E1 < 4) && (E2 < 4) && (E3 < 4));
1549
1550 #[cfg(_XM_NO_INTRINSICS_)]
1551 unsafe {
1552 let U = XMVECTORF32 {
1553 f: [
1554 V.vector4_f32[E0 as usize],
1555 V.vector4_f32[E1 as usize],
1556 V.vector4_f32[E2 as usize],
1557 V.vector4_f32[E3 as usize],
1558 ]
1559 };
1560 return U.v;
1561 }
1562
1563 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1564 {
1565 unimplemented!()
1566 }
1567
1568 #[cfg(_XM_AVX_INTRINSICS_)]
1569 unsafe {
1570 let elem: [i32; 4] = [ E0 as i32, E1 as i32, E2 as i32, E3 as i32 ];
1571 let vControl: __m128i = _mm_loadu_si128(mem::transmute(&elem[0]));
1572 return _mm_permutevar_ps(V, vControl);
1573 }
1574
1575 #[cfg(all(not(_XM_AVX_INTRINSICS_), _XM_SSE_INTRINSICS_))]
1576 unsafe {
1577 let aPtr = mem::transmute::<_, *const u32>(&V);
1578
1579 let mut Result: XMVECTOR = crate::undefined();
1580
1581 let pWork = mem::transmute::<_, *mut u32>(&mut Result);
1582
1583 idx!(mut pWork[0]) = idx!(aPtr[E0]);
1584 idx!(mut pWork[1]) = idx!(aPtr[E1]);
1585 idx!(mut pWork[2]) = idx!(aPtr[E2]);
1586 idx!(mut pWork[3]) = idx!(aPtr[E3]);
1587
1588 return Result;
1589 }
1590}
1591
1592#[test]
1593fn test_XMVectorSwizzle() {
1594 let a = XMVectorSet(1.0, 2.0, 3.0, 4.0);
1595 let b = XMVectorSwizzle(a, 3, 2, 1, 0);
1596 let c = XMVectorSet(4.0, 3.0, 2.0, 1.0);
1597
1598 assert_eq!(XMVectorGetX(b), XMVectorGetX(c));
1599 assert_eq!(XMVectorGetY(b), XMVectorGetY(c));
1600 assert_eq!(XMVectorGetZ(b), XMVectorGetZ(c));
1601 assert_eq!(XMVectorGetW(b), XMVectorGetW(c));
1602}
1603
1604#[inline]
1637pub fn XMVectorPermute(
1638 V1: FXMVECTOR,
1639 V2: FXMVECTOR,
1640 PermuteX: u32,
1641 PermuteY: u32,
1642 PermuteZ: u32,
1643 PermuteW: u32
1644) -> XMVECTOR
1645{
1646 debug_assert!(PermuteX <= 7 && PermuteY <= 7 && PermuteZ <= 7 && PermuteW <= 7);
1647
1648 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1649 {
1650 unimplemented!()
1651 }
1652
1653 #[cfg(_XM_AVX_INTRINSICS_)]
1654 unsafe {
1655 const three: XMVECTORU32 = XMVECTORU32 { u: [ 3, 3, 3, 3 ] };
1656
1657 let elem: Align16<[u32; 4]> = Align16([PermuteX, PermuteY, PermuteZ, PermuteW]);
1658 let mut vControl: __m128i = _mm_load_si128(mem::transmute::<_, *const __m128i>(&elem[0]));
1659
1660 let vSelect: __m128i = _mm_cmpgt_epi32(vControl, three.m128i());
1661 vControl = _mm_castps_si128(_mm_and_ps(_mm_castsi128_ps(vControl), three.v));
1662
1663 let shuffled1: __m128 = _mm_permutevar_ps(V1, vControl);
1664 let shuffled2: __m128 = _mm_permutevar_ps(V2, vControl);
1665
1666 let masked1: __m128 = _mm_andnot_ps(_mm_castsi128_ps(vSelect), shuffled1);
1667 let masked2: __m128 = _mm_and_ps(_mm_castsi128_ps(vSelect), shuffled2);
1668
1669 return _mm_or_ps(masked1, masked2);
1670 }
1671
1672 #[cfg(all(not(_XM_ARM_NEON_INTRINSICS_), not(_XM_AVX_INTRINSICS_)))]
1673 unsafe {
1674 let aPtr: &[*const u32; 2] = &[
1675 mem::transmute(&V1),
1676 mem::transmute(&V2),
1677 ];
1678
1679 let mut Result: XMVECTOR = crate::undefined();
1680
1681 let pWork = mem::transmute::<_, *mut u32>(&mut Result);
1682
1683 let i0: u32 = PermuteX & 3;
1684 let vi0: u32 = PermuteX >> 2;
1685 idx!(mut pWork[0]) = idx!(aPtr[vi0][i0]);
1686
1687 let i1: u32 = PermuteY & 3;
1688 let vi1: u32 = PermuteY >> 2;
1689 idx!(mut pWork[1]) = idx!(aPtr[vi1][i1]);
1690
1691 let i2: u32 = PermuteZ & 3;
1692 let vi2: u32 = PermuteZ >> 2;
1693 idx!(mut pWork[2]) = idx!(aPtr[vi2][i2]);
1694
1695 let i3: u32 = PermuteW & 3;
1696 let vi3: u32 = PermuteW >> 2;
1697 idx!(mut pWork[3]) = idx!(aPtr[vi3][i3]);
1698
1699 return Result;
1700 }
1701}
1702
1703#[test]
1704fn test_XMVectorPermute() {
1705 let a = XMVectorSet(1.0, 2.0, 3.0, 4.0);
1706 let b = XMVectorSet(5.0, 6.0, 7.0, 8.0);
1707
1708 let c = XMVectorPermute(a, b, 0, 2, 4, 6);
1709 let d = XMVectorSet(1.0, 3.0, 5.0, 7.0);
1710
1711 assert_eq!(XMVectorGetX(c), XMVectorGetX(d));
1712 assert_eq!(XMVectorGetY(c), XMVectorGetY(d));
1713 assert_eq!(XMVectorGetZ(c), XMVectorGetZ(d));
1714 assert_eq!(XMVectorGetW(c), XMVectorGetW(d));
1715
1716 let e = XMVectorPermute(a, b, 1, 3, 5, 7);
1717 let f = XMVectorSet(2.0, 4.0, 6.0, 8.0);
1718
1719 assert_eq!(XMVectorGetX(e), XMVectorGetX(f));
1720 assert_eq!(XMVectorGetY(e), XMVectorGetY(f));
1721 assert_eq!(XMVectorGetZ(e), XMVectorGetZ(f));
1722 assert_eq!(XMVectorGetW(e), XMVectorGetW(f));
1723}
1724
1725#[inline]
1774pub fn XMVectorSelectControl(
1775 VectorIndex0: u32,
1776 VectorIndex1: u32,
1777 VectorIndex2: u32,
1778 VectorIndex3: u32,
1779) -> XMVECTOR
1780{
1781 #[cfg(_XM_SSE_INTRINSICS_)]
1782 unsafe {
1783 let mut vTemp: __m128i = _mm_set_epi32(VectorIndex3 as i32, VectorIndex2 as i32, VectorIndex1 as i32, VectorIndex0 as i32);
1785 vTemp = _mm_cmpgt_epi32(vTemp, g_XMZero.m128i());
1787 return _mm_castsi128_ps(vTemp);
1788 }
1789
1790 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1791 {
1792 unimplemented!()
1793 }
1794
1795 #[cfg(not(any(_XM_SSE_INTRINSICS_, _XM_ARM_NEON_INTRINSICS_)))]
1796 unsafe {
1797 let mut ControlVector: XMVECTOR = crate::undefined();
1798 let ControlElement: [u32; 2] = [
1799 XM_SELECT_0,
1800 XM_SELECT_1,
1801 ];
1802
1803 ControlVector.vector4_u32[0] = ControlElement[VectorIndex0 as usize];
1804 ControlVector.vector4_u32[1] = ControlElement[VectorIndex1 as usize];
1805 ControlVector.vector4_u32[2] = ControlElement[VectorIndex2 as usize];
1806 ControlVector.vector4_u32[3] = ControlElement[VectorIndex3 as usize];
1807
1808 return ControlVector;
1809 }
1810}
1811
1812#[inline]
1870pub fn XMVectorSelect(
1871 V1: FXMVECTOR,
1872 V2: FXMVECTOR,
1873 Control: FXMVECTOR,
1874) -> XMVECTOR
1875{
1876 #[cfg(_XM_NO_INTRINSICS_)]
1877 unsafe {
1878 let Result = XMVECTORU32 {
1879 u: [
1880 (V1.vector4_u32[0] & !Control.vector4_u32[0]) | (V2.vector4_u32[0] & Control.vector4_u32[0]),
1881 (V1.vector4_u32[1] & !Control.vector4_u32[1]) | (V2.vector4_u32[1] & Control.vector4_u32[1]),
1882 (V1.vector4_u32[2] & !Control.vector4_u32[2]) | (V2.vector4_u32[2] & Control.vector4_u32[2]),
1883 (V1.vector4_u32[3] & !Control.vector4_u32[3]) | (V2.vector4_u32[3] & Control.vector4_u32[3]),
1884 ]
1885 };
1886 return Result.v;
1887 }
1888
1889 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1890 {
1891 unimplemented!()
1892 }
1893
1894 #[cfg(_XM_SSE_INTRINSICS_)]
1895 unsafe {
1896 let vTemp1: XMVECTOR = _mm_andnot_ps(Control, V1);
1897 let vTemp2: XMVECTOR = _mm_and_ps(V2, Control);
1898 return _mm_or_ps(vTemp1, vTemp2);
1899 }
1900}
1901
1902#[inline]
1933pub fn XMVectorMergeXY(
1934 V1: FXMVECTOR,
1935 V2: FXMVECTOR,
1936) -> XMVECTOR
1937{
1938 #[cfg(_XM_NO_INTRINSICS_)]
1939 unsafe {
1940 let Result = XMVECTORU32 {
1941 u: [
1942 V1.vector4_u32[0],
1943 V2.vector4_u32[0],
1944 V1.vector4_u32[1],
1945 V2.vector4_u32[1],
1946 ]
1947 };
1948 return Result.v;
1949 }
1950
1951 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
1952 {
1953 unimplemented!()
1954 }
1955
1956 #[cfg(_XM_SSE_INTRINSICS_)]
1957 unsafe {
1958 return _mm_unpacklo_ps(V1, V2);
1959 }
1960}
1961
1962#[inline]
1993pub fn XMVectorMergeZW(
1994 V1: FXMVECTOR,
1995 V2: FXMVECTOR,
1996) -> XMVECTOR
1997{
1998 #[cfg(_XM_NO_INTRINSICS_)]
1999 unsafe {
2000 let Result = XMVECTORU32 {
2001 u: [
2002 V1.vector4_u32[2],
2003 V2.vector4_u32[2],
2004 V1.vector4_u32[3],
2005 V2.vector4_u32[3]
2006 ]
2007 };
2008 return Result.v;
2009 }
2010
2011 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2012 {
2013 unimplemented!()
2014 }
2015
2016 #[cfg(_XM_SSE_INTRINSICS_)]
2017 unsafe {
2018 return _mm_unpackhi_ps(V1, V2);
2019 }
2020}
2021
2022#[inline]
2054pub fn XMVectorShiftLeft(V1: FXMVECTOR, V2: FXMVECTOR, Elements: u32) -> XMVECTOR {
2055 debug_assert!(Elements < 4);
2056 return XMVectorPermute(V1, V2, Elements, ((Elements)+1), ((Elements)+2), ((Elements)+3));
2057}
2058
2059#[inline]
2088pub fn XMVectorRotateLeft(V: FXMVECTOR, Elements: u32) -> XMVECTOR {
2089 debug_assert!(Elements < 4);
2090 return XMVectorSwizzle(V, Elements & 3, (Elements + 1) & 3, (Elements + 2) & 3, (Elements + 3) & 3);
2091}
2092
2093#[inline]
2122pub fn XMVectorRotateRight(V: FXMVECTOR, Elements: u32) -> XMVECTOR {
2123 debug_assert!(Elements < 4);
2124 return XMVectorSwizzle(V, (4 - (Elements)) & 3, (5 - (Elements)) & 3, (6 - (Elements)) & 3, (7 - (Elements)) & 3);
2125}
2126
2127#[inline]
2163pub fn XMVectorInsert(
2164 VD: FXMVECTOR,
2165 VS: FXMVECTOR,
2166 VSLeftRotateElements: u32,
2167 Select0: u32,
2168 Select1: u32,
2169 Select2: u32,
2170 Select3: u32,
2171) -> XMVECTOR {
2172 let Control: XMVECTOR = XMVectorSelectControl(Select0 & 1, Select1 & 1, Select2 & 1, Select3 & 1);
2173 return XMVectorSelect(VD, XMVectorRotateLeft(VS, VSLeftRotateElements), Control);
2174}
2175
2176#[inline]
2207pub fn XMVectorEqual(
2208 V1: FXMVECTOR,
2209 V2: FXMVECTOR,
2210) -> XMVECTOR {
2211 #[cfg(_XM_NO_INTRINSICS_)]
2212 unsafe {
2213 let Control = XMVECTORU32 {
2214 u: [
2215 if V1.vector4_f32[0] == V2.vector4_f32[0] { 0xFFFFFFFF } else { 0 },
2216 if V1.vector4_f32[1] == V2.vector4_f32[1] { 0xFFFFFFFF } else { 0 },
2217 if V1.vector4_f32[2] == V2.vector4_f32[2] { 0xFFFFFFFF } else { 0 },
2218 if V1.vector4_f32[3] == V2.vector4_f32[3] { 0xFFFFFFFF } else { 0 },
2219 ]
2220 };
2221 return Control.v;
2222 }
2223
2224 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2225 {
2226 unimplemented!()
2227 }
2228
2229 #[cfg(_XM_SSE_INTRINSICS_)]
2230 unsafe {
2231 return _mm_cmpeq_ps(V1, V2);
2232 }
2233}
2234
2235#[inline]
2257pub fn XMVectorEqualR(pCR: &mut u32, V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2258 #[cfg(_XM_NO_INTRINSICS_)]
2259 unsafe {
2260 let ux = if V1.vector4_f32[0] == V2.vector4_f32[0] { 0xFFFFFFFFu32 } else { 0 };
2261 let uy = if V1.vector4_f32[1] == V2.vector4_f32[1] { 0xFFFFFFFFu32 } else { 0 };
2262 let uz = if V1.vector4_f32[2] == V2.vector4_f32[2] { 0xFFFFFFFFu32 } else { 0 };
2263 let uw = if V1.vector4_f32[3] == V2.vector4_f32[3] { 0xFFFFFFFFu32 } else { 0 };
2264 let mut CR = 0;
2265 if ubool(ux & uy & uz & uw) {
2266 CR = XM_CRMASK_CR6TRUE;
2268 } else if !ubool(ux | uy | uz | uw) {
2269 CR = XM_CRMASK_CR6FALSE;
2271 }
2272 *pCR = CR;
2273
2274 let Control = XMVECTORU32 { u: [ ux, uy, uz, uw ]};
2275 return Control.v;
2276 }
2277
2278 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2279 {
2280 unimplemented!()
2281 }
2282
2283 #[cfg(_XM_SSE_INTRINSICS_)]
2284 unsafe {
2285 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
2286 let mut CR = 0;
2287 let iTest: i32 = _mm_movemask_ps(vTemp);
2288 if (iTest == 0xf)
2289 {
2290 CR = XM_CRMASK_CR6TRUE;
2292 }
2293 else if !ibool(iTest)
2294 {
2295 CR = XM_CRMASK_CR6FALSE;
2297 }
2298 *pCR = CR;
2299 return vTemp;
2300 }
2301}
2302
2303#[test]
2304fn test_XMVectorEqualR() {
2305 let a = XMVectorSet(1.0, 2.0, 3.0, 4.0);
2306 let b = XMVectorSet(1.0, 2.0, 3.0, 4.0);
2307 let mut cr = 0;
2308 let r = XMVectorEqualR(&mut cr, a, b);
2309 assert!(XMComparisonAllTrue(cr));
2310 assert_eq!([true, true, true, true], [XMVectorGetX(r).is_nan(), XMVectorGetY(r).is_nan(), XMVectorGetZ(r).is_nan(), XMVectorGetW(r).is_nan()]);
2311
2312 let a = XMVectorSet(0.0, 0.0, 0.0, 0.0);
2313 let b = XMVectorSplatOne();
2314 let r = XMVectorEqualR(&mut cr, a, b);
2315 assert!(XMComparisonAllFalse(cr));
2316 assert_eq!([false, false, false, false], [XMVectorGetX(r).is_nan(), XMVectorGetY(r).is_nan(), XMVectorGetZ(r).is_nan(), XMVectorGetW(r).is_nan()]);
2317
2318 let a = XMVectorSet(1.0, 0.0, 1.0, 0.0);
2319 let b = XMVectorSplatOne();
2320 let r = XMVectorEqualR(&mut cr, a, b);
2321 assert!(XMComparisonAnyFalse(cr));
2322 assert!(XMComparisonAnyTrue(cr));
2323 assert_eq!([true, false, true, false], [XMVectorGetX(r).is_nan(), XMVectorGetY(r).is_nan(), XMVectorGetZ(r).is_nan(), XMVectorGetW(r).is_nan()]);
2324}
2325
2326#[inline]
2357pub fn XMVectorEqualInt(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2358 #[cfg(_XM_NO_INTRINSICS_)]
2359 unsafe {
2360 let Control = XMVECTORU32 {
2361 u: [
2362 if V1.vector4_u32[0] == V2.vector4_u32[0] { 0xFFFFFFFF } else { 0 },
2363 if V1.vector4_u32[1] == V2.vector4_u32[1] { 0xFFFFFFFF } else { 0 },
2364 if V1.vector4_u32[2] == V2.vector4_u32[2] { 0xFFFFFFFF } else { 0 },
2365 if V1.vector4_u32[3] == V2.vector4_u32[3] { 0xFFFFFFFF } else { 0 },
2366 ]
2367 };
2368 return Control.v;
2369 }
2370
2371 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2372 {
2373 unimplemented!()
2374 }
2375
2376 #[cfg(_XM_SSE_INTRINSICS_)]
2377 unsafe {
2378 let V: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
2379 return _mm_castsi128_ps(V);
2380 }
2381}
2382
2383#[inline]
2389pub fn XMVectorEqualIntR(pCR: &mut u32, V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2390 #[cfg(_XM_NO_INTRINSICS_)]
2391 {
2392 let Control: XMVECTOR = XMVectorEqualInt(V1, V2);
2393
2394 *pCR = 0;
2395 if (XMVector4EqualInt(Control, XMVectorTrueInt()))
2396 {
2397 *pCR |= XM_CRMASK_CR6TRUE;
2399 }
2400 else if (XMVector4EqualInt(Control, XMVectorFalseInt()))
2401 {
2402 *pCR |= XM_CRMASK_CR6FALSE;
2404 }
2405 return Control;
2406 }
2407
2408 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2409 {
2410 unimplemented!()
2411 }
2412
2413 #[cfg(_XM_SSE_INTRINSICS_)]
2414 unsafe {
2415 let V: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
2416 let iTemp: i32 = _mm_movemask_ps(_mm_castsi128_ps(V));
2417 let mut CR: u32 = 0;
2418 if (iTemp == 0x0F)
2419 {
2420 CR = XM_CRMASK_CR6TRUE;
2421 }
2422 else if !ibool(iTemp)
2423 {
2424 CR = XM_CRMASK_CR6FALSE;
2425 }
2426 *pCR = CR;
2427 return _mm_castsi128_ps(V);
2428 }
2429}
2430
2431#[inline]
2462pub fn XMVectorNearEqual(V1: FXMVECTOR, V2: FXMVECTOR, Epsilon: FXMVECTOR) -> XMVECTOR {
2463 #[cfg(_XM_NO_INTRINSICS_)]
2464 unsafe {
2465 let fDeltax: f32 = V1.vector4_f32[0] - V2.vector4_f32[0];
2466 let fDeltay: f32 = V1.vector4_f32[1] - V2.vector4_f32[1];
2467 let fDeltaz: f32 = V1.vector4_f32[2] - V2.vector4_f32[2];
2468 let fDeltaw: f32 = V1.vector4_f32[3] - V2.vector4_f32[3];
2469
2470 let fDeltax = fabsf(fDeltax);
2471 let fDeltay = fabsf(fDeltay);
2472 let fDeltaz = fabsf(fDeltaz);
2473 let fDeltaw = fabsf(fDeltaw);
2474
2475 let Control = XMVECTORU32 {
2476 u: [
2477 if fDeltax <= Epsilon.vector4_f32[0] { 0xFFFFFFFFu32 } else { 0 },
2478 if fDeltay <= Epsilon.vector4_f32[1] { 0xFFFFFFFFu32 } else { 0 },
2479 if fDeltaz <= Epsilon.vector4_f32[2] { 0xFFFFFFFFu32 } else { 0 },
2480 if fDeltaw <= Epsilon.vector4_f32[3] { 0xFFFFFFFFu32 } else { 0 },
2481 ]
2482 };
2483 return Control.v;
2484 }
2485
2486 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2487 {
2488 unimplemented!()
2489 }
2490
2491 #[cfg(_XM_SSE_INTRINSICS_)]
2492 unsafe {
2493 let vDelta: XMVECTOR = _mm_sub_ps(V1, V2);
2495 let mut vTemp: XMVECTOR = _mm_setzero_ps();
2497 vTemp = _mm_sub_ps(vTemp, vDelta);
2498 vTemp = _mm_max_ps(vTemp, vDelta);
2499 vTemp = _mm_cmple_ps(vTemp, Epsilon);
2500 return vTemp;
2501 }
2502}
2503
2504#[inline]
2535pub fn XMVectorNotEqual(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2536 #[cfg(_XM_NO_INTRINSICS_)]
2537 unsafe {
2538 let Control = XMVECTORU32 {
2539 u: [
2540 if V1.vector4_f32[0] != V2.vector4_f32[0] { 0xFFFFFFFF } else { 0 },
2541 if V1.vector4_f32[1] != V2.vector4_f32[1] { 0xFFFFFFFF } else { 0 },
2542 if V1.vector4_f32[2] != V2.vector4_f32[2] { 0xFFFFFFFF } else { 0 },
2543 if V1.vector4_f32[3] != V2.vector4_f32[3] { 0xFFFFFFFF } else { 0 },
2544 ]
2545 };
2546 return Control.v;
2547 }
2548
2549 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2550 {
2551 unimplemented!()
2552 }
2553
2554 #[cfg(_XM_SSE_INTRINSICS_)]
2555 unsafe {
2556 return _mm_cmpneq_ps(V1, V2);
2557 }
2558}
2559
2560#[inline]
2564pub fn XMVectorNotEqualInt(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2565 #[cfg(_XM_NO_INTRINSICS_)]
2566 unsafe {
2567 let Control = XMVECTORU32 {
2568 u: [
2569 if V1.vector4_u32[0] != V2.vector4_u32[0] { 0xFFFFFFFF } else { 0 },
2570 if V1.vector4_u32[1] != V2.vector4_u32[1] { 0xFFFFFFFF } else { 0 },
2571 if V1.vector4_u32[2] != V2.vector4_u32[2] { 0xFFFFFFFF } else { 0 },
2572 if V1.vector4_u32[3] != V2.vector4_u32[3] { 0xFFFFFFFF } else { 0 },
2573 ]
2574 };
2575 return Control.v;
2576 }
2577
2578 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2579 {
2580 unimplemented!()
2581 }
2582
2583 #[cfg(_XM_SSE_INTRINSICS_)]
2584 unsafe {
2585 let V: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
2586 return _mm_xor_ps(_mm_castsi128_ps(V), g_XMNegOneMask.v);
2587 }
2588}
2589
2590#[inline]
2594pub fn XMVectorGreater(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2595 #[cfg(_XM_NO_INTRINSICS_)]
2596 unsafe {
2597 let Control = XMVECTORU32 {
2598 u: [
2599 if V1.vector4_f32[0] > V2.vector4_f32[0] { 0xFFFFFFFF } else { 0 },
2600 if V1.vector4_f32[1] > V2.vector4_f32[1] { 0xFFFFFFFF } else { 0 },
2601 if V1.vector4_f32[2] > V2.vector4_f32[2] { 0xFFFFFFFF } else { 0 },
2602 if V1.vector4_f32[3] > V2.vector4_f32[3] { 0xFFFFFFFF } else { 0 },
2603 ]
2604 };
2605 return Control.v;
2606 }
2607
2608 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2609 {
2610 unimplemented!()
2611 }
2612
2613 #[cfg(_XM_SSE_INTRINSICS_)]
2614 unsafe {
2615 return _mm_cmpgt_ps(V1, V2);
2616 }
2617}
2618
2619#[inline]
2623pub fn XMVectorGreaterR(pCR: &mut u32, V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2624 #[cfg(_XM_NO_INTRINSICS_)]
2625 unsafe {
2626 let ux = if V1.vector4_f32[0] > V2.vector4_f32[0] { 0xFFFFFFFFu32 } else { 0 };
2627 let uy = if V1.vector4_f32[1] > V2.vector4_f32[1] { 0xFFFFFFFFu32 } else { 0 };
2628 let uz = if V1.vector4_f32[2] > V2.vector4_f32[2] { 0xFFFFFFFFu32 } else { 0 };
2629 let uw = if V1.vector4_f32[3] > V2.vector4_f32[3] { 0xFFFFFFFFu32 } else { 0 };
2630 let mut CR = 0;
2631 if ubool(ux & uy & uz & uw) {
2632 CR = XM_CRMASK_CR6TRUE;
2634 } else if !ubool(ux | uy | uz | uw) {
2635 CR = XM_CRMASK_CR6FALSE;
2637 }
2638 *pCR = CR;
2639
2640 let Control = XMVECTORU32 { u: [ ux, uy, uz, uw ]};
2641 return Control.v;
2642 }
2643
2644 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2645 {
2646 unimplemented!()
2647 }
2648
2649 #[cfg(_XM_SSE_INTRINSICS_)]
2650 unsafe {
2651 let vTemp: XMVECTOR = _mm_cmpgt_ps(V1, V2);
2652 let mut CR = 0;
2653 let iTest: i32 = _mm_movemask_ps(vTemp);
2654 if (iTest == 0xf)
2655 {
2656 CR = XM_CRMASK_CR6TRUE;
2658 }
2659 else if !ibool(iTest)
2660 {
2661 CR = XM_CRMASK_CR6FALSE;
2663 }
2664 *pCR = CR;
2665 return vTemp;
2666 }
2667}
2668
2669#[inline]
2673pub fn XMVectorGreaterOrEqual(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2674 #[cfg(_XM_NO_INTRINSICS_)]
2675 unsafe {
2676 let Control = XMVECTORU32 {
2677 u: [
2678 if V1.vector4_f32[0] >= V2.vector4_f32[0] { 0xFFFFFFFF } else { 0 },
2679 if V1.vector4_f32[1] >= V2.vector4_f32[1] { 0xFFFFFFFF } else { 0 },
2680 if V1.vector4_f32[2] >= V2.vector4_f32[2] { 0xFFFFFFFF } else { 0 },
2681 if V1.vector4_f32[3] >= V2.vector4_f32[3] { 0xFFFFFFFF } else { 0 },
2682 ]
2683 };
2684 return Control.v;
2685 }
2686
2687 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2688 {
2689 unimplemented!()
2690 }
2691
2692 #[cfg(_XM_SSE_INTRINSICS_)]
2693 unsafe {
2694 return _mm_cmpge_ps(V1, V2);
2695 }
2696}
2697
2698#[inline]
2702pub fn XMVectorGreaterOrEqualR(pCR: &mut u32, V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2703 #[cfg(_XM_NO_INTRINSICS_)]
2704 unsafe {
2705 let ux = if V1.vector4_f32[0] >= V2.vector4_f32[0] { 0xFFFFFFFFu32 } else { 0 };
2706 let uy = if V1.vector4_f32[1] >= V2.vector4_f32[1] { 0xFFFFFFFFu32 } else { 0 };
2707 let uz = if V1.vector4_f32[2] >= V2.vector4_f32[2] { 0xFFFFFFFFu32 } else { 0 };
2708 let uw = if V1.vector4_f32[3] >= V2.vector4_f32[3] { 0xFFFFFFFFu32 } else { 0 };
2709 let mut CR = 0;
2710 if ubool(ux & uy & uz & uw) {
2711 CR = XM_CRMASK_CR6TRUE;
2713 } else if !ubool(ux | uy | uz | uw) {
2714 CR = XM_CRMASK_CR6FALSE;
2716 }
2717 *pCR = CR;
2718
2719 let Control = XMVECTORU32 { u: [ ux, uy, uz, uw ]};
2720 return Control.v;
2721 }
2722
2723 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2724 {
2725 unimplemented!()
2726 }
2727
2728 #[cfg(_XM_SSE_INTRINSICS_)]
2729 unsafe {
2730 let vTemp: XMVECTOR = _mm_cmpge_ps(V1, V2);
2731 let mut CR = 0;
2732 let iTest: i32 = _mm_movemask_ps(vTemp);
2733 if (iTest == 0xf)
2734 {
2735 CR = XM_CRMASK_CR6TRUE;
2737 }
2738 else if !ibool(iTest)
2739 {
2740 CR = XM_CRMASK_CR6FALSE;
2742 }
2743 *pCR = CR;
2744 return vTemp;
2745 }
2746}
2747
2748#[inline]
2752pub fn XMVectorLess(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2753 #[cfg(_XM_NO_INTRINSICS_)]
2754 unsafe {
2755 let Control = XMVECTORU32 {
2756 u: [
2757 if V1.vector4_f32[0] < V2.vector4_f32[0] { 0xFFFFFFFF } else { 0 },
2758 if V1.vector4_f32[1] < V2.vector4_f32[1] { 0xFFFFFFFF } else { 0 },
2759 if V1.vector4_f32[2] < V2.vector4_f32[2] { 0xFFFFFFFF } else { 0 },
2760 if V1.vector4_f32[3] < V2.vector4_f32[3] { 0xFFFFFFFF } else { 0 },
2761 ]
2762 };
2763 return Control.v;
2764 }
2765
2766 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2767 {
2768 unimplemented!()
2769 }
2770
2771 #[cfg(_XM_SSE_INTRINSICS_)]
2772 unsafe {
2773 return _mm_cmplt_ps(V1, V2);
2774 }
2775}
2776
2777#[inline]
2781pub fn XMVectorLessOrEqual(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
2782 #[cfg(_XM_NO_INTRINSICS_)]
2783 unsafe {
2784 let Control = XMVECTORU32 {
2785 u: [
2786 if V1.vector4_f32[0] <= V2.vector4_f32[0] { 0xFFFFFFFF } else { 0 },
2787 if V1.vector4_f32[1] <= V2.vector4_f32[1] { 0xFFFFFFFF } else { 0 },
2788 if V1.vector4_f32[2] <= V2.vector4_f32[2] { 0xFFFFFFFF } else { 0 },
2789 if V1.vector4_f32[3] <= V2.vector4_f32[3] { 0xFFFFFFFF } else { 0 },
2790 ]
2791 };
2792 return Control.v;
2793 }
2794
2795 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2796 {
2797 unimplemented!()
2798 }
2799
2800 #[cfg(_XM_SSE_INTRINSICS_)]
2801 unsafe {
2802 return _mm_cmple_ps(V1, V2);
2803 }
2804}
2805
2806#[inline]
2837pub fn XMVectorInBounds(V: FXMVECTOR, Bounds: FXMVECTOR) -> XMVECTOR {
2838 #[cfg(_XM_NO_INTRINSICS_)]
2839 unsafe {
2840 let Control = XMVECTORU32 {
2841 u: [
2842 if V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0] { 0xFFFFFFFF } else { 0 },
2843 if V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1] { 0xFFFFFFFF } else { 0 },
2844 if V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2] { 0xFFFFFFFF } else { 0 },
2845 if V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3] { 0xFFFFFFFF } else { 0 },
2846 ]
2847 };
2848 return Control.v;
2849 }
2850
2851 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2852 {
2853 unimplemented!()
2854 }
2855
2856 #[cfg(_XM_SSE_INTRINSICS_)]
2857 unsafe {
2858 let mut vTemp1: XMVECTOR = _mm_cmple_ps(V, Bounds);
2860 let mut vTemp2: XMVECTOR = _mm_mul_ps(Bounds, g_XMNegativeOne.v);
2862 vTemp2 = _mm_cmple_ps(vTemp2, V);
2864 vTemp1 = _mm_and_ps(vTemp1, vTemp2);
2866 return vTemp1;
2867 }
2868}
2869
2870#[inline]
2905pub fn XMVectorInBoundsR(pCR: &mut u32, V: FXMVECTOR, Bounds: FXMVECTOR) -> XMVECTOR {
2906 #[cfg(_XM_NO_INTRINSICS_)]
2907 unsafe {
2908 let ux = if V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0] { 0xFFFFFFFF } else { 0 };
2909 let uy = if V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1] { 0xFFFFFFFF } else { 0 };
2910 let uz = if V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2] { 0xFFFFFFFF } else { 0 };
2911 let uw = if V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3] { 0xFFFFFFFF } else { 0 };
2912
2913 let mut CR = 0;
2914 if ubool(ux & uy & uz & uw) {
2915 CR = XM_CRMASK_CR6TRUE;
2917 }
2918 *pCR = CR;
2919
2920 let Control = XMVECTORU32 { u: [ ux, uy, uz, uw ]};
2921 return Control.v;
2922 }
2923
2924 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2925 {
2926 unimplemented!()
2927 }
2928
2929 #[cfg(_XM_SSE_INTRINSICS_)]
2930 unsafe {
2931 let mut vTemp1: XMVECTOR = _mm_cmple_ps(V, Bounds);
2933 let mut vTemp2: XMVECTOR = _mm_mul_ps(Bounds, g_XMNegativeOne.v);
2935 vTemp2 = _mm_cmple_ps(vTemp2, V);
2937 vTemp1 = _mm_and_ps(vTemp1, vTemp2);
2939
2940 let mut CR: u32 = 0;
2941 if (_mm_movemask_ps(vTemp1) == 0xf)
2942 {
2943 CR = XM_CRMASK_CR6BOUNDS;
2945 }
2946 *pCR = CR;
2947 return vTemp1;
2948 }
2949}
2950
2951#[inline]
2955pub fn XMVectorIsNaN(V: FXMVECTOR) -> XMVECTOR {
2956 #[cfg(_XM_NO_INTRINSICS_)]
2957 unsafe {
2958 let Control = XMVECTORU32 {
2959 u: [
2960 if XMISNAN!(V.vector4_f32[0]) { 0xFFFFFFFFu32 } else { 0 },
2961 if XMISNAN!(V.vector4_f32[1]) { 0xFFFFFFFFu32 } else { 0 },
2962 if XMISNAN!(V.vector4_f32[2]) { 0xFFFFFFFFu32 } else { 0 },
2963 if XMISNAN!(V.vector4_f32[3]) { 0xFFFFFFFFu32 } else { 0 },
2964 ]
2965 };
2966 return Control.v;
2967 }
2968
2969 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
2970 {
2971 unimplemented!()
2972 }
2973
2974 #[cfg(_XM_SSE_INTRINSICS_)]
2975 unsafe {
2976 return _mm_cmpneq_ps(V, V);
2978 }
2979}
2980
2981#[inline]
3010pub fn XMVectorIsInfinite(V: FXMVECTOR) -> XMVECTOR {
3011 #[cfg(_XM_NO_INTRINSICS_)]
3012 unsafe {
3013 let Control = XMVECTORU32 {
3014 u: [
3015 if XMISINF!(V.vector4_f32[0]) { 0xFFFFFFFFu32 } else { 0 },
3016 if XMISINF!(V.vector4_f32[1]) { 0xFFFFFFFFu32 } else { 0 },
3017 if XMISINF!(V.vector4_f32[2]) { 0xFFFFFFFFu32 } else { 0 },
3018 if XMISINF!(V.vector4_f32[3]) { 0xFFFFFFFFu32 } else { 0 },
3019 ]
3020 };
3021 return Control.v;
3022 }
3023
3024 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3025 {
3026 unimplemented!()
3027 }
3028
3029 #[cfg(_XM_SSE_INTRINSICS_)]
3030 unsafe {
3031 let mut vTemp: __m128 = _mm_and_ps(V, g_XMAbsMask.v);
3033 vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity.v);
3035 return vTemp;
3037 }
3038}
3039
3040#[inline]
3044pub fn XMVectorMin(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
3045 #[cfg(_XM_NO_INTRINSICS_)]
3046 unsafe {
3047 let Result = XMVECTORF32 {
3048 f: [
3049 if V1.vector4_f32[0] < V2.vector4_f32[0] { V1.vector4_f32[0] } else { V2.vector4_f32[0] },
3050 if V1.vector4_f32[1] < V2.vector4_f32[1] { V1.vector4_f32[1] } else { V2.vector4_f32[1] },
3051 if V1.vector4_f32[2] < V2.vector4_f32[2] { V1.vector4_f32[2] } else { V2.vector4_f32[2] },
3052 if V1.vector4_f32[3] < V2.vector4_f32[3] { V1.vector4_f32[3] } else { V2.vector4_f32[3] },
3053 ]
3054 };
3055 return Result.v;
3056 }
3057
3058 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3059 {
3060 unimplemented!()
3061 }
3062
3063 #[cfg(_XM_SSE_INTRINSICS_)]
3064 unsafe {
3065 return _mm_min_ps(V1, V2);
3066 }
3067}
3068
3069#[inline]
3073pub fn XMVectorMax(V1: FXMVECTOR, V2: FXMVECTOR) -> XMVECTOR {
3074 #[cfg(_XM_NO_INTRINSICS_)]
3075 unsafe {
3076 let Result = XMVECTORF32 {
3077 f: [
3078 if V1.vector4_f32[0] > V2.vector4_f32[0] { V1.vector4_f32[0] } else { V2.vector4_f32[0] },
3079 if V1.vector4_f32[1] > V2.vector4_f32[1] { V1.vector4_f32[1] } else { V2.vector4_f32[1] },
3080 if V1.vector4_f32[2] > V2.vector4_f32[2] { V1.vector4_f32[2] } else { V2.vector4_f32[2] },
3081 if V1.vector4_f32[3] > V2.vector4_f32[3] { V1.vector4_f32[3] } else { V2.vector4_f32[3] },
3082 ]
3083 };
3084 return Result.v;
3085 }
3086
3087 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3088 {
3089 unimplemented!()
3090 }
3091
3092 #[cfg(_XM_SSE_INTRINSICS_)]
3093 unsafe {
3094 return _mm_max_ps(V1, V2);
3095 }
3096}
3097
3098#[inline]
3100#[cfg(_XM_NO_INTRINSICS_)]
3101fn round_to_nearest(mut x: f32) -> f32 {
3102 let i = floorf(x);
3103 x -= i;
3104 if (x < 0.5) {
3105 return i;
3106 }
3107 if (x > 0.5) {
3108 return i + 1.0;
3109 }
3110
3111 let (_, int_part) = modff(i / 2.0);
3112 if ((2.0 * int_part) == i)
3113 {
3114 return i;
3115 }
3116
3117 return i + 1.0;
3118}
3119
3120#[test]
3121#[cfg(_XM_NO_INTRINSICS_)]
3122fn test_round_to_nearest() {
3123 assert_eq!(24.0, round_to_nearest(23.5));
3124 assert_eq!(24.0, round_to_nearest(24.5));
3125
3126 assert_eq!(-24.0, round_to_nearest(-23.5));
3127 assert_eq!(-24.0, round_to_nearest(-24.5));
3128}
3129
3130#[inline]
3149pub fn XMVectorRound(V: FXMVECTOR) -> XMVECTOR {
3150 #[cfg(_XM_NO_INTRINSICS_)]
3151 unsafe {
3152 let Result = XMVECTORF32 {
3153 f: [
3154 round_to_nearest(V.vector4_f32[0]),
3155 round_to_nearest(V.vector4_f32[0]),
3156 round_to_nearest(V.vector4_f32[0]),
3157 round_to_nearest(V.vector4_f32[0]),
3158 ]
3159 };
3160 return Result.v;
3161 }
3162
3163 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3164 {
3165 unimplemented!()
3166 }
3167
3168 #[cfg(_XM_SSE4_INTRINSICS_)]
3169 unsafe {
3170 return _mm_round_ps(V, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
3171 }
3172
3173 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
3174 unsafe {
3175 let sign: __m128 = _mm_and_ps(V, g_XMNegativeZero.v);
3176 let sMagic: __m128 = _mm_or_ps(g_XMNoFraction.v, sign);
3177 let mut R1: __m128 = _mm_add_ps(V, sMagic);
3178 R1 = _mm_sub_ps(R1, sMagic);
3179 let mut R2:__m128 = _mm_and_ps(V, g_XMAbsMask.v);
3180 let mask: __m128 = _mm_cmple_ps(R2, g_XMNoFraction.v);
3181 R2 = _mm_andnot_ps(mask, V);
3182 R1 = _mm_and_ps(R1, mask);
3183 let vResult: XMVECTOR = _mm_xor_ps(R1, R2);
3184 return vResult;
3185 }
3186}
3187
3188#[inline]
3213pub fn XMVectorTruncate(V: FXMVECTOR) -> XMVECTOR {
3214 #[cfg(_XM_NO_INTRINSICS_)]
3215 unsafe {
3216 let mut Result: XMVECTOR = crate::undefined();
3217
3218 for i in 0..4 {
3219 if (XMISNAN!(V.vector4_f32[i]))
3220 {
3221 Result.vector4_u32[i] = 0x7FC00000;
3222 }
3223 else if (fabsf(V.vector4_f32[i]) < 8388608.0)
3224 {
3225 Result.vector4_f32[i] = (V.vector4_f32[i] as i32) as f32;
3226 }
3227 else
3228 {
3229 Result.vector4_f32[i] = V.vector4_f32[i];
3230 }
3231 }
3232
3233 return Result;
3234 }
3235
3236 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3237 {
3238 unimplemented!()
3239 }
3240
3241 #[cfg(_XM_SSE4_INTRINSICS_)]
3242 unsafe {
3243 return _mm_round_ps(V, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
3244 }
3245
3246 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
3247 unsafe {
3248 let mut vTest: __m128i = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask.m128i());
3251 vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction.m128i());
3253 let vInt: __m128i = _mm_cvttps_epi32(V);
3255 let mut vResult: XMVECTOR = _mm_cvtepi32_ps(vInt);
3257 vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest));
3259 vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V));
3261 vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest));
3262 return vResult;
3263 }
3264}
3265
3266#[inline]
3270pub fn XMVectorFloor(V: FXMVECTOR) -> XMVECTOR {
3271 #[cfg(_XM_NO_INTRINSICS_)]
3272 unsafe {
3273 let Result = XMVECTORF32 {
3274 f: [
3275 floorf(V.vector4_f32[0]),
3276 floorf(V.vector4_f32[1]),
3277 floorf(V.vector4_f32[2]),
3278 floorf(V.vector4_f32[3])
3279 ]
3280 };
3281 return Result.v;
3282 }
3283
3284 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3285 {
3286 unimplemented!()
3287 }
3288
3289 #[cfg(_XM_SSE4_INTRINSICS_)]
3290 unsafe {
3291 return _mm_floor_ps(V);
3292 }
3293
3294 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
3295 unsafe {
3296 let mut vTest: __m128i = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask.m128i());
3298 vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction.m128i());
3299 let vInt: __m128i = _mm_cvttps_epi32(V);
3301 let mut vResult: XMVECTOR = _mm_cvtepi32_ps(vInt);
3302 let mut vLarger: __m128 = _mm_cmpgt_ps(vResult, V);
3303 vLarger = _mm_cvtepi32_ps(_mm_castps_si128(vLarger));
3305 vResult = _mm_add_ps(vResult, vLarger);
3306 vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest));
3308 vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V));
3310 vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest));
3311 return vResult;
3312 }
3313}
3314
3315#[inline]
3319pub fn XMVectorCeiling(V: FXMVECTOR) -> XMVECTOR {
3320 #[cfg(_XM_NO_INTRINSICS_)]
3321 unsafe {
3322 let Result = XMVECTORF32 {
3323 f: [
3324 ceilf(V.vector4_f32[0]),
3325 ceilf(V.vector4_f32[1]),
3326 ceilf(V.vector4_f32[2]),
3327 ceilf(V.vector4_f32[3])
3328 ]
3329 };
3330 return Result.v;
3331 }
3332
3333 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3334 {
3335 unimplemented!()
3336 }
3337
3338 #[cfg(_XM_SSE4_INTRINSICS_)]
3339 unsafe {
3340 return _mm_ceil_ps(V);
3341 }
3342
3343 #[cfg(all(not(_XM_SSE4_INTRINSICS_), _XM_SSE_INTRINSICS_))]
3344 unsafe {
3345 let mut vTest: __m128i = _mm_and_si128(_mm_castps_si128(V), g_XMAbsMask.m128i());
3347 vTest = _mm_cmplt_epi32(vTest, g_XMNoFraction.m128i());
3348 let vInt: __m128i = _mm_cvttps_epi32(V);
3350 let mut vResult: XMVECTOR = _mm_cvtepi32_ps(vInt);
3351 let mut vSmaller: __m128 = _mm_cmplt_ps(vResult, V);
3352 vSmaller = _mm_cvtepi32_ps(_mm_castps_si128(vSmaller));
3354 vResult = _mm_sub_ps(vResult, vSmaller);
3355 vResult = _mm_and_ps(vResult, _mm_castsi128_ps(vTest));
3357 vTest = _mm_andnot_si128(vTest, _mm_castps_si128(V));
3359 vResult = _mm_or_ps(vResult, _mm_castsi128_ps(vTest));
3360 return vResult;
3361 }
3362}
3363
3364#[inline]
3397pub fn XMVectorClamp(
3398 V: FXMVECTOR,
3399 Min: FXMVECTOR,
3400 Max: FXMVECTOR
3401) -> XMVECTOR
3402{
3403 debug_assert!(XMVector4LessOrEqual(Min, Max));
3404
3405 #[cfg(_XM_NO_INTRINSICS_)]
3406 {
3407 let mut Result: XMVECTOR;
3408 Result = XMVectorMax(Min, V);
3409 Result = XMVectorMin(Max, Result);
3410 return Result;
3411 }
3412
3413 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3414 {
3415 unimplemented!()
3416 }
3417
3418 #[cfg(_XM_SSE_INTRINSICS_)]
3419 unsafe {
3420 let mut vResult: XMVECTOR;
3421 vResult = _mm_max_ps(Min, V);
3422 vResult = _mm_min_ps(Max, vResult);
3423 return vResult;
3424 }
3425}
3426
3427#[inline]
3456pub fn XMVectorSaturate(
3457 V: FXMVECTOR,
3458) -> XMVECTOR
3459{
3460 #[cfg(_XM_NO_INTRINSICS_)]
3461 unsafe {
3462 return XMVectorClamp(V, g_XMZero.v, g_XMOne.v);
3464 }
3465
3466 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3467 {
3468 unimplemented!()
3469 }
3470
3471 #[cfg(_XM_SSE_INTRINSICS_)]
3472 unsafe {
3473 let vResult: XMVECTOR = _mm_max_ps(V, g_XMZero.v);
3475 return _mm_min_ps(vResult, g_XMOne.v);
3477 }
3478}
3479
3480#[inline]
3512pub fn XMVectorAndInt(
3513 V1: FXMVECTOR,
3514 V2: FXMVECTOR,
3515) -> XMVECTOR
3516{
3517 #[cfg(_XM_NO_INTRINSICS_)]
3518 unsafe {
3519 let Result = XMVECTORU32 {
3520 u: [
3521 V1.vector4_u32[0] & V2.vector4_u32[0],
3522 V1.vector4_u32[1] & V2.vector4_u32[1],
3523 V1.vector4_u32[2] & V2.vector4_u32[2],
3524 V1.vector4_u32[3] & V2.vector4_u32[3]
3525 ]
3526 };
3527 return Result.v;
3528 }
3529
3530 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3531 {
3532 unimplemented!()
3533 }
3534
3535 #[cfg(_XM_SSE_INTRINSICS_)]
3536 unsafe {
3537 return _mm_and_ps(V1, V2);
3538 }
3539}
3540
3541#[inline]
3573pub fn XMVectorAndCInt(
3574 V1: FXMVECTOR,
3575 V2: FXMVECTOR,
3576) -> XMVECTOR
3577{
3578 #[cfg(_XM_NO_INTRINSICS_)]
3579 unsafe {
3580 let Result = XMVECTORU32 {
3581 u: [
3582 V1.vector4_u32[0] & !V2.vector4_u32[0],
3583 V1.vector4_u32[1] & !V2.vector4_u32[1],
3584 V1.vector4_u32[2] & !V2.vector4_u32[2],
3585 V1.vector4_u32[3] & !V2.vector4_u32[3]
3586 ]
3587 };
3588 return Result.v;
3589 }
3590
3591 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3592 {
3593 unimplemented!()
3594 }
3595
3596 #[cfg(_XM_SSE_INTRINSICS_)]
3597 unsafe {
3598 let V: __m128i = _mm_andnot_si128(_mm_castps_si128(V2), _mm_castps_si128(V1));
3599 return _mm_castsi128_ps(V);
3600 }
3601}
3602
3603#[inline]
3635pub fn XMVectorOrInt(
3636 V1: FXMVECTOR,
3637 V2: FXMVECTOR,
3638) -> XMVECTOR
3639{
3640 #[cfg(_XM_NO_INTRINSICS_)]
3641 unsafe {
3642 let Result = XMVECTORU32 {
3643 u: [
3644 V1.vector4_u32[0] | V2.vector4_u32[0],
3645 V1.vector4_u32[1] | V2.vector4_u32[1],
3646 V1.vector4_u32[2] | V2.vector4_u32[2],
3647 V1.vector4_u32[3] | V2.vector4_u32[3]
3648 ]
3649 };
3650 return Result.v;
3651 }
3652
3653 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3654 {
3655 unimplemented!()
3656 }
3657
3658 #[cfg(_XM_SSE_INTRINSICS_)]
3659 unsafe {
3660 let V: __m128i = _mm_or_si128(_mm_castps_si128(V1), _mm_castps_si128(V2));
3661 return _mm_castsi128_ps(V);
3662 }
3663}
3664
3665#[inline]
3697pub fn XMVectorNorInt(
3698 V1: FXMVECTOR,
3699 V2: FXMVECTOR,
3700) -> XMVECTOR
3701{
3702 #[cfg(_XM_NO_INTRINSICS_)]
3703 unsafe {
3704 let Result = XMVECTORU32 {
3705 u: [
3706 !(V1.vector4_u32[0] | V2.vector4_u32[0]),
3707 !(V1.vector4_u32[1] | V2.vector4_u32[1]),
3708 !(V1.vector4_u32[2] | V2.vector4_u32[2]),
3709 !(V1.vector4_u32[3] | V2.vector4_u32[3])
3710 ]
3711 };
3712 return Result.v;
3713 }
3714
3715 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3716 {
3717 unimplemented!()
3718 }
3719
3720 #[cfg(_XM_SSE_INTRINSICS_)]
3721 unsafe {
3722 let mut Result: __m128i;
3723 Result = _mm_or_si128(_mm_castps_si128(V1), _mm_castps_si128(V2));
3724 Result = _mm_andnot_si128(Result, g_XMNegOneMask.m128i());
3725 return _mm_castsi128_ps(Result);
3726 }
3727}
3728
3729#[inline]
3761pub fn XMVectorXorInt(
3762 V1: FXMVECTOR,
3763 V2: FXMVECTOR,
3764) -> XMVECTOR
3765{
3766 #[cfg(_XM_NO_INTRINSICS_)]
3767 unsafe {
3768 let Result = XMVECTORU32 {
3769 u: [
3770 V1.vector4_u32[0] ^ V2.vector4_u32[0],
3771 V1.vector4_u32[1] ^ V2.vector4_u32[1],
3772 V1.vector4_u32[2] ^ V2.vector4_u32[2],
3773 V1.vector4_u32[3] ^ V2.vector4_u32[3]
3774 ]
3775 };
3776 return Result.v;
3777 }
3778
3779 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3780 {
3781 unimplemented!()
3782 }
3783
3784 #[cfg(_XM_SSE_INTRINSICS_)]
3785 unsafe {
3786 let V: __m128i = _mm_xor_si128(_mm_castps_si128(V1), _mm_castps_si128(V2));
3787 return _mm_castsi128_ps(V);
3788 }
3789}
3790
3791#[inline]
3820pub fn XMVectorNegate(
3821 V: FXMVECTOR,
3822) -> XMVECTOR
3823{
3824 #[cfg(_XM_NO_INTRINSICS_)]
3825 unsafe {
3826 let Result = XMVECTORF32 {
3827 f: [
3828 -V.vector4_f32[0],
3829 -V.vector4_f32[1],
3830 -V.vector4_f32[2],
3831 -V.vector4_f32[3]
3832 ]
3833 };
3834 return Result.v;
3835 }
3836
3837 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3838 {
3839 unimplemented!()
3840 }
3841
3842 #[cfg(_XM_SSE_INTRINSICS_)]
3843 unsafe {
3844 let Z: XMVECTOR;
3845
3846 Z = _mm_setzero_ps();
3847
3848 return _mm_sub_ps(Z, V);
3849 }
3850}
3851
3852#[inline]
3856pub fn XMVectorAdd(
3857 V1: FXMVECTOR,
3858 V2: FXMVECTOR
3859) -> XMVECTOR
3860{
3861 #[cfg(_XM_NO_INTRINSICS_)]
3862 unsafe {
3863 let Result = XMVECTORF32 {
3864 f: [
3865 V1.vector4_f32[0] + V2.vector4_f32[0],
3866 V1.vector4_f32[1] + V2.vector4_f32[1],
3867 V1.vector4_f32[2] + V2.vector4_f32[2],
3868 V1.vector4_f32[3] + V2.vector4_f32[3]
3869 ]
3870 };
3871 return Result.v;
3872 }
3873
3874 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3875 {
3876 unimplemented!()
3877 }
3878
3879 #[cfg(_XM_SSE_INTRINSICS_)]
3880 unsafe {
3881 return _mm_add_ps(V1, V2);
3882 }
3883}
3884
3885#[inline]
3891pub fn XMVectorSum(
3892 V: FXMVECTOR,
3893) -> XMVECTOR
3894{
3895 #[cfg(_XM_NO_INTRINSICS_)]
3896 unsafe {
3897 let sum = V.vector4_f32[0] + V.vector4_f32[1] + V.vector4_f32[2] + V.vector4_f32[3];
3898 let Result = XMVECTORF32 {
3899 f: [ sum, sum, sum, sum ]
3900 };
3901 return Result.v;
3902 }
3903
3904 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3905 {
3906 unimplemented!()
3907 }
3908
3909 #[cfg(_XM_SSE3_INTRINSICS_)]
3910 unsafe {
3911 let vTemp: XMVECTOR = _mm_hadd_ps(V, V);
3912 return _mm_hadd_ps(vTemp, vTemp);
3913 }
3914
3915 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_)))]
3916 unsafe {
3917 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(2, 3, 0, 1));
3918 let vTemp2: XMVECTOR = _mm_add_ps(V, vTemp);
3919 vTemp = XM_PERMUTE_PS!(vTemp2, _MM_SHUFFLE(1, 0, 3, 2));
3920 return _mm_add_ps(vTemp, vTemp2);
3921 }
3922}
3923
3924#[inline]
3941pub fn XMVectorAddAngles(
3942 V1: FXMVECTOR,
3943 V2: FXMVECTOR
3944) -> XMVECTOR
3945{
3946 #[cfg(_XM_NO_INTRINSICS_)]
3947 unsafe {
3948 const Zero: XMVECTOR = unsafe { g_XMZero.v };
3950
3951 let mut Result: XMVECTOR = XMVectorAdd(V1, V2);
3956
3957 let mut Mask: XMVECTOR = XMVectorLess(Result, g_XMNegativePi.v);
3958 let mut Offset: XMVECTOR = XMVectorSelect(Zero, g_XMTwoPi.v, Mask);
3959
3960 Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v);
3961 Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask);
3962
3963 Result = XMVectorAdd(Result, Offset);
3964
3965 return Result;
3966 }
3967
3968 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
3969 {
3970 unimplemented!()
3971 }
3972
3973 #[cfg(_XM_SSE_INTRINSICS_)]
3974 unsafe {
3975 let mut vResult: XMVECTOR = _mm_add_ps(V1, V2);
3977 let mut vOffset: XMVECTOR = _mm_cmplt_ps(vResult, g_XMNegativePi.v);
3979 vOffset = _mm_and_ps(vOffset, g_XMTwoPi.v);
3980 vResult = _mm_add_ps(vResult, vOffset);
3982 vOffset = _mm_cmpge_ps(vResult, g_XMPi.v);
3984 vOffset = _mm_and_ps(vOffset, g_XMTwoPi.v);
3985 vResult = _mm_sub_ps(vResult, vOffset);
3987 return vResult;
3988 }
3989}
3990
3991#[inline]
3995pub fn XMVectorSubtract(
3996 V1: FXMVECTOR,
3997 V2: FXMVECTOR
3998) -> XMVECTOR
3999{
4000 #[cfg(_XM_NO_INTRINSICS_)]
4001 unsafe {
4002 let Result = XMVECTORF32 {
4003 f: [
4004 V1.vector4_f32[0] - V2.vector4_f32[0],
4005 V1.vector4_f32[1] - V2.vector4_f32[1],
4006 V1.vector4_f32[2] - V2.vector4_f32[2],
4007 V1.vector4_f32[3] - V2.vector4_f32[3]
4008 ]
4009 };
4010 return Result.v;
4011 }
4012
4013 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4014 {
4015 unimplemented!()
4016 }
4017
4018 #[cfg(_XM_SSE_INTRINSICS_)]
4019 unsafe {
4020 return _mm_sub_ps(V1, V2);
4021 }
4022}
4023
4024#[inline]
4042pub fn XMVectorSubtractAngles(
4043 V1: FXMVECTOR,
4044 V2: FXMVECTOR
4045) -> XMVECTOR
4046{
4047 #[cfg(_XM_NO_INTRINSICS_)]
4048 unsafe {
4049 const Zero: XMVECTOR = unsafe { g_XMZero.v };
4051
4052 let mut Result: XMVECTOR = XMVectorSubtract(V1, V2);
4057
4058 let mut Mask: XMVECTOR = XMVectorLess(Result, g_XMNegativePi.v);
4059 let mut Offset: XMVECTOR = XMVectorSelect(Zero, g_XMTwoPi.v, Mask);
4060
4061 Mask = XMVectorGreaterOrEqual(Result, g_XMPi.v);
4062 Offset = XMVectorSelect(Offset, g_XMNegativeTwoPi.v, Mask);
4063
4064 Result = XMVectorAdd(Result, Offset);
4065
4066 return Result;
4067 }
4068
4069 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4070 {
4071 unimplemented!()
4072 }
4073
4074 #[cfg(_XM_SSE_INTRINSICS_)]
4075 unsafe {
4076 let mut vResult: XMVECTOR = _mm_sub_ps(V1, V2);
4078 let mut vOffset: XMVECTOR = _mm_cmplt_ps(vResult, g_XMNegativePi.v);
4080 vOffset = _mm_and_ps(vOffset, g_XMTwoPi.v);
4081 vResult = _mm_add_ps(vResult, vOffset);
4083 vOffset = _mm_cmpge_ps(vResult, g_XMPi.v);
4085 vOffset = _mm_and_ps(vOffset, g_XMTwoPi.v);
4086 vResult = _mm_sub_ps(vResult, vOffset);
4088 return vResult;
4089 }
4090}
4091
4092
4093#[inline]
4097pub fn XMVectorMultiply(
4098 V1: FXMVECTOR,
4099 V2: FXMVECTOR
4100) -> XMVECTOR
4101{
4102 #[cfg(_XM_NO_INTRINSICS_)]
4103 unsafe {
4104 let Result = XMVECTORF32 {
4105 f: [
4106 V1.vector4_f32[0] * V2.vector4_f32[0],
4107 V1.vector4_f32[1] * V2.vector4_f32[1],
4108 V1.vector4_f32[2] * V2.vector4_f32[2],
4109 V1.vector4_f32[3] * V2.vector4_f32[3]
4110 ]
4111 };
4112 return Result.v;
4113 }
4114
4115 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4116 {
4117 unimplemented!()
4118 }
4119
4120 #[cfg(_XM_SSE_INTRINSICS_)]
4121 unsafe {
4122 return _mm_mul_ps(V1, V2);
4123 }
4124}
4125
4126#[inline]
4159pub fn XMVectorMultiplyAdd(
4160 V1: FXMVECTOR,
4161 V2: FXMVECTOR,
4162 V3: FXMVECTOR
4163) -> XMVECTOR
4164{
4165 #[cfg(_XM_NO_INTRINSICS_)]
4166 unsafe {
4167 let Result = XMVECTORF32 {
4168 f: [
4169 V1.vector4_f32[0] * V2.vector4_f32[0] + V3.vector4_f32[0],
4170 V1.vector4_f32[1] * V2.vector4_f32[1] + V3.vector4_f32[1],
4171 V1.vector4_f32[2] * V2.vector4_f32[2] + V3.vector4_f32[2],
4172 V1.vector4_f32[3] * V2.vector4_f32[3] + V3.vector4_f32[3]
4173 ]
4174 };
4175 return Result.v;
4176 }
4177
4178 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4179 {
4180 unimplemented!()
4181 }
4182
4183 #[cfg(_XM_SSE_INTRINSICS_)]
4184 unsafe {
4185 return XM_FMADD_PS!(V1, V2, V3);
4186 }
4187}
4188
4189#[inline]
4215pub fn XMVectorDivide(
4216 V1: FXMVECTOR,
4217 V2: FXMVECTOR
4218) -> XMVECTOR
4219{
4220 #[cfg(_XM_NO_INTRINSICS_)]
4221 unsafe {
4222 let Result = XMVECTORF32 {
4223 f: [
4224 V1.vector4_f32[0] / V2.vector4_f32[0],
4225 V1.vector4_f32[1] / V2.vector4_f32[1],
4226 V1.vector4_f32[2] / V2.vector4_f32[2],
4227 V1.vector4_f32[3] / V2.vector4_f32[3]
4228 ]
4229 };
4230 return Result.v;
4231 }
4232
4233 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4234 {
4235 unimplemented!()
4236 }
4237
4238 #[cfg(_XM_SSE_INTRINSICS_)]
4239 unsafe {
4240 return _mm_div_ps(V1, V2);
4241 }
4242}
4243
4244#[inline]
4277pub fn XMVectorNegativeMultiplySubtract(
4278 V1: FXMVECTOR,
4279 V2: FXMVECTOR,
4280 V3: FXMVECTOR
4281) -> XMVECTOR
4282{
4283 #[cfg(_XM_NO_INTRINSICS_)]
4284 unsafe {
4285 let Result = XMVECTORF32 {
4286 f: [
4287 V3.vector4_f32[0] - (V1.vector4_f32[0] * V2.vector4_f32[0]),
4288 V3.vector4_f32[1] - (V1.vector4_f32[1] * V2.vector4_f32[1]),
4289 V3.vector4_f32[2] - (V1.vector4_f32[2] * V2.vector4_f32[2]),
4290 V3.vector4_f32[3] - (V1.vector4_f32[3] * V2.vector4_f32[3])
4291 ]
4292 };
4293 return Result.v;
4294 }
4295
4296 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4297 {
4298 unimplemented!()
4299 }
4300
4301 #[cfg(_XM_SSE_INTRINSICS_)]
4302 unsafe {
4303 return XM_FNMADD_PS!(V1, V2, V3);
4304 }
4305}
4306
4307#[inline]
4311pub fn XMVectorScale(
4312 V: FXMVECTOR,
4313 ScaleFactor: f32,
4314) -> XMVECTOR
4315{
4316 #[cfg(_XM_NO_INTRINSICS_)]
4317 unsafe {
4318 let Result = XMVECTORF32 {
4319 f: [
4320 V.vector4_f32[0] * ScaleFactor,
4321 V.vector4_f32[1] * ScaleFactor,
4322 V.vector4_f32[2] * ScaleFactor,
4323 V.vector4_f32[3] * ScaleFactor
4324 ]
4325 };
4326 return Result.v;
4327 }
4328
4329 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4330 {
4331 unimplemented!()
4332 }
4333
4334 #[cfg(_XM_SSE_INTRINSICS_)]
4335 unsafe {
4336 let vResult: XMVECTOR = _mm_set_ps1(ScaleFactor);
4337 return _mm_mul_ps(vResult, V);
4338 }
4339}
4340
4341#[inline]
4345pub fn XMVectorReciprocalEst(
4346 V: FXMVECTOR,
4347) -> XMVECTOR
4348{
4349 #[cfg(_XM_NO_INTRINSICS_)]
4350 unsafe {
4351 let Result = XMVECTORF32 {
4352 f: [
4353 1.0 / V.vector4_f32[0],
4354 1.0 / V.vector4_f32[1],
4355 1.0 / V.vector4_f32[2],
4356 1.0 / V.vector4_f32[3]
4357 ]
4358 };
4359 return Result.v;
4360 }
4361
4362 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4363 {
4364 unimplemented!()
4365 }
4366
4367 #[cfg(_XM_SSE_INTRINSICS_)]
4368 unsafe {
4369 return _mm_rcp_ps(V);
4370 }
4371}
4372
4373#[inline]
4377pub fn XMVectorReciprocal(
4378 V: FXMVECTOR,
4379) -> XMVECTOR
4380{
4381 #[cfg(_XM_NO_INTRINSICS_)]
4382 unsafe {
4383 let Result = XMVECTORF32 {
4384 f: [
4385 1.0 / V.vector4_f32[0],
4386 1.0 / V.vector4_f32[1],
4387 1.0 / V.vector4_f32[2],
4388 1.0 / V.vector4_f32[3]
4389 ]
4390 };
4391 return Result.v;
4392 }
4393
4394 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4395 {
4396 unimplemented!()
4397 }
4398
4399 #[cfg(_XM_SSE_INTRINSICS_)]
4400 unsafe {
4401 return _mm_div_ps(g_XMOne.v, V);
4402 }
4403}
4404
4405#[inline]
4409pub fn XMVectorSqrtEst(
4410 V: FXMVECTOR,
4411) -> XMVECTOR
4412{
4413 #[cfg(_XM_NO_INTRINSICS_)]
4414 unsafe {
4415 let Result = XMVECTORF32 {
4416 f: [
4417 sqrtf(V.vector4_f32[0]),
4418 sqrtf(V.vector4_f32[1]),
4419 sqrtf(V.vector4_f32[2]),
4420 sqrtf(V.vector4_f32[3])
4421 ]
4422 };
4423 return Result.v;
4424 }
4425
4426 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4427 {
4428 unimplemented!()
4429 }
4430
4431 #[cfg(_XM_SSE_INTRINSICS_)]
4432 unsafe {
4433 return _mm_sqrt_ps(V);
4434 }
4435}
4436
4437#[inline]
4441pub fn XMVectorSqrt(
4442 V: FXMVECTOR,
4443) -> XMVECTOR
4444{
4445 #[cfg(_XM_NO_INTRINSICS_)]
4446 unsafe {
4447 let Result = XMVECTORF32 {
4448 f: [
4449 sqrtf(V.vector4_f32[0]),
4450 sqrtf(V.vector4_f32[1]),
4451 sqrtf(V.vector4_f32[2]),
4452 sqrtf(V.vector4_f32[3])
4453 ]
4454 };
4455 return Result.v;
4456 }
4457
4458 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4459 {
4460 unimplemented!()
4461 }
4462
4463 #[cfg(_XM_SSE_INTRINSICS_)]
4464 unsafe {
4465 return _mm_sqrt_ps(V);
4466 }
4467}
4468
4469#[inline]
4473pub fn XMVectorReciprocalSqrtEst(
4474 V: FXMVECTOR,
4475) -> XMVECTOR
4476{
4477 #[cfg(_XM_NO_INTRINSICS_)]
4478 unsafe {
4479 let Result = XMVECTORF32 {
4480 f: [
4481 1.0 / sqrtf(V.vector4_f32[0]),
4482 1.0 / sqrtf(V.vector4_f32[1]),
4483 1.0 / sqrtf(V.vector4_f32[2]),
4484 1.0 / sqrtf(V.vector4_f32[3])
4485 ]
4486 };
4487 return Result.v;
4488 }
4489
4490 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4491 {
4492 unimplemented!()
4493 }
4494
4495 #[cfg(_XM_SSE_INTRINSICS_)]
4496 unsafe {
4497 return _mm_rsqrt_ps(V);
4498 }
4499}
4500
4501#[inline]
4505pub fn XMVectorReciprocalSqrt(
4506 V: FXMVECTOR,
4507) -> XMVECTOR
4508{
4509 #[cfg(_XM_NO_INTRINSICS_)]
4510 unsafe {
4511 let Result = XMVECTORF32 {
4512 f: [
4513 1.0 / sqrtf(V.vector4_f32[0]),
4514 1.0 / sqrtf(V.vector4_f32[1]),
4515 1.0 / sqrtf(V.vector4_f32[2]),
4516 1.0 / sqrtf(V.vector4_f32[3])
4517 ]
4518 };
4519 return Result.v;
4520 }
4521
4522 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4523 {
4524 unimplemented!()
4525 }
4526
4527 #[cfg(_XM_SSE_INTRINSICS_)]
4528 unsafe {
4529 let mut vResult: XMVECTOR = _mm_sqrt_ps(V);
4530 vResult = _mm_div_ps(g_XMOne.v, vResult);
4531 return vResult;
4532 }
4533}
4534
4535#[inline]
4563pub fn XMVectorExp2(
4564 V: FXMVECTOR,
4565) -> XMVECTOR
4566{
4567 #[cfg(_XM_NO_INTRINSICS_)]
4568 unsafe {
4569 let Result = XMVECTORF32 {
4570 f: [
4571 powf(2.0, V.vector4_f32[0]),
4572 powf(2.0, V.vector4_f32[1]),
4573 powf(2.0, V.vector4_f32[2]),
4574 powf(2.0, V.vector4_f32[3])
4575 ]
4576 };
4577 return Result.v;
4578 }
4579
4580 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4581 {
4582 unimplemented!()
4583 }
4584
4585 #[cfg(_XM_SSE_INTRINSICS_)]
4586 unsafe {
4587 let itrunc: __m128i = _mm_cvttps_epi32(V);
4588 let ftrunc: __m128 = _mm_cvtepi32_ps(itrunc);
4589 let y: __m128 = _mm_sub_ps(V, ftrunc);
4590
4591 let mut poly: __m128 = XM_FMADD_PS!(g_XMExpEst7.v, y, g_XMExpEst6.v);
4592 poly = XM_FMADD_PS!(poly, y, g_XMExpEst5.v);
4593 poly = XM_FMADD_PS!(poly, y, g_XMExpEst4.v);
4594 poly = XM_FMADD_PS!(poly, y, g_XMExpEst3.v);
4595 poly = XM_FMADD_PS!(poly, y, g_XMExpEst2.v);
4596 poly = XM_FMADD_PS!(poly, y, g_XMExpEst1.v);
4597 poly = XM_FMADD_PS!(poly, y, g_XMOne.v);
4598
4599 let mut biased: __m128i = _mm_add_epi32(itrunc, g_XMExponentBias.m128i());
4600 biased = _mm_slli_epi32(biased, 23);
4601 let result0: __m128 = _mm_div_ps(_mm_castsi128_ps(biased), poly);
4602
4603 biased = _mm_add_epi32(itrunc, g_XM253.m128i());
4604 biased = _mm_slli_epi32(biased, 23);
4605 let mut result1: __m128 = _mm_div_ps(_mm_castsi128_ps(biased), poly);
4606 result1 = _mm_mul_ps(g_XMMinNormal.v, result1);
4607
4608 let mut comp: __m128i = _mm_cmplt_epi32(_mm_castps_si128(V), g_XMBin128.m128i());
4620 let mut select0: __m128i = _mm_and_si128(comp, _mm_castps_si128(result0));
4621 let mut select1: __m128i = _mm_andnot_si128(comp, g_XMInfinity.m128i());
4622 let result2: __m128i = _mm_or_si128(select0, select1);
4623
4624 comp = _mm_cmplt_epi32(itrunc, g_XMSubnormalExponent.m128i());
4625 select1 = _mm_and_si128(comp, _mm_castps_si128(result1));
4626 select0 = _mm_andnot_si128(comp, _mm_castps_si128(result0));
4627 let result3: __m128i = _mm_or_si128(select0, select1);
4628
4629 comp = _mm_cmplt_epi32(_mm_castps_si128(V), g_XMBinNeg150.m128i());
4630 select0 = _mm_and_si128(comp, result3);
4631 select1 = _mm_andnot_si128(comp, g_XMZero.m128i());
4632 let result4: __m128i = _mm_or_si128(select0, select1);
4633
4634 let sign: __m128i = _mm_and_si128(_mm_castps_si128(V), g_XMNegativeZero.m128i());
4635 comp = _mm_cmpeq_epi32(sign, g_XMNegativeZero.m128i());
4636 select0 = _mm_and_si128(comp, result4);
4637 select1 = _mm_andnot_si128(comp, result2);
4638 let result5: __m128i = _mm_or_si128(select0, select1);
4639
4640 let mut t0: __m128i = _mm_and_si128(_mm_castps_si128(V), g_XMQNaNTest.m128i());
4641 let mut t1: __m128i = _mm_and_si128(_mm_castps_si128(V), g_XMInfinity.m128i());
4642 t0 = _mm_cmpeq_epi32(t0, g_XMZero.m128i());
4643 t1 = _mm_cmpeq_epi32(t1, g_XMInfinity.m128i());
4644 let isNaN: __m128i = _mm_andnot_si128(t0, t1);
4645
4646 select0 = _mm_and_si128(isNaN, g_XMQNaN.m128i());
4647 select1 = _mm_andnot_si128(isNaN, result5);
4648 let vResult: __m128i = _mm_or_si128(select0, select1);
4649
4650 return _mm_castsi128_ps(vResult);
4651 }
4652}
4653
4654#[inline]
4660pub fn XMVectorExp(
4661 V: FXMVECTOR,
4662) -> XMVECTOR {
4663 return XMVectorExp2(V);
4664}
4665
4666#[inline]
4706pub fn XMVectorPow(
4707 V1: FXMVECTOR,
4708 V2: FXMVECTOR,
4709) -> XMVECTOR
4710{
4711 #[cfg(_XM_NO_INTRINSICS_)]
4712 unsafe {
4713 let Result = XMVECTORF32 {
4714 f: [
4715 powf(V1.vector4_f32[0], V2.vector4_f32[0]),
4716 powf(V1.vector4_f32[1], V2.vector4_f32[1]),
4717 powf(V1.vector4_f32[2], V2.vector4_f32[2]),
4718 powf(V1.vector4_f32[3], V2.vector4_f32[3])
4719 ]
4720 };
4721 return Result.v;
4722 }
4723
4724 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4725 {
4726 unimplemented!()
4727 }
4728
4729 #[cfg(_XM_SSE_INTRINSICS_)]
4730 unsafe {
4731 let mut a: Align16<[f32; 4]> = crate::undefined();
4732 let mut b: Align16<[f32; 4]> = crate::undefined();
4733 _mm_store_ps(a.as_mut_ptr(), V1);
4734 _mm_store_ps(b.as_mut_ptr(), V2);
4735 let vResult: XMVECTOR = _mm_setr_ps(
4736 powf(a[0], b[0]),
4737 powf(a[1], b[1]),
4738 powf(a[2], b[2]),
4739 powf(a[3], b[3]));
4740 return vResult;
4741 }
4742}
4743
4744#[inline]
4748pub fn XMVectorAbs(
4749 V: FXMVECTOR,
4750) -> XMVECTOR
4751{
4752 #[cfg(_XM_NO_INTRINSICS_)]
4753 unsafe {
4754 let Result = XMVECTORF32 {
4755 f: [
4756 fabsf(V.vector4_f32[0]),
4757 fabsf(V.vector4_f32[1]),
4758 fabsf(V.vector4_f32[2]),
4759 fabsf(V.vector4_f32[3])
4760 ]
4761 };
4762 return Result.v;
4763 }
4764
4765 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4766 {
4767 unimplemented!()
4768 }
4769
4770 #[cfg(_XM_SSE_INTRINSICS_)]
4771 unsafe {
4772 let mut vResult: XMVECTOR = _mm_setzero_ps();
4773 vResult = _mm_sub_ps(vResult, V);
4774 vResult = _mm_max_ps(vResult, V);
4775 return vResult;
4776 }
4777}
4778
4779#[inline]
4810pub fn XMVectorMod(
4811 V1: FXMVECTOR,
4812 V2: FXMVECTOR,
4813) -> XMVECTOR
4814{
4815 #[cfg(_XM_NO_INTRINSICS_)]
4818 {
4819 let mut Quotient: XMVECTOR = XMVectorDivide(V1, V2);
4820 Quotient = XMVectorTruncate(Quotient);
4821 let Result: XMVECTOR = XMVectorNegativeMultiplySubtract(V2, Quotient, V1);
4822 return Result;
4823 }
4824
4825 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4826 {
4827 unimplemented!()
4828 }
4829
4830 #[cfg(_XM_SSE_INTRINSICS_)]
4831 unsafe {
4832 let mut vResult: XMVECTOR = _mm_div_ps(V1, V2);
4833 vResult = XMVectorTruncate(vResult);
4834 return XM_FNMADD_PS!(vResult, V2, V1);
4835 }
4836}
4837
4838#[inline]
4867pub fn XMVectorModAngles(
4868 Angles: FXMVECTOR,
4869) -> XMVECTOR
4870{
4871 #[cfg(_XM_NO_INTRINSICS_)]
4874 unsafe {
4875 let mut V: XMVECTOR;
4876 let Result: XMVECTOR;
4877
4878 V = XMVectorMultiply(Angles, g_XMReciprocalTwoPi.v);
4880 V = XMVectorRound(V);
4881 Result = XMVectorNegativeMultiplySubtract(g_XMTwoPi.v, V, Angles);
4882 return Result;
4883 }
4884
4885 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4886 {
4887 unimplemented!()
4888 }
4889
4890 #[cfg(_XM_SSE_INTRINSICS_)]
4891 unsafe {
4892 let mut vResult: XMVECTOR = _mm_mul_ps(Angles, g_XMReciprocalTwoPi.v);
4894 vResult = XMVectorRound(vResult);
4896 return XM_FNMADD_PS!(vResult, g_XMTwoPi.v, Angles);
4897 }
4898}
4899
4900#[inline]
4918pub fn XMVectorSin(
4919 V: FXMVECTOR,
4920) -> XMVECTOR
4921{
4922 #[cfg(_XM_NO_INTRINSICS_)]
4925 unsafe {
4926 let Result = XMVECTORF32 {
4927 f: [
4928 sinf(V.vector4_f32[0]),
4929 sinf(V.vector4_f32[1]),
4930 sinf(V.vector4_f32[2]),
4931 sinf(V.vector4_f32[3])
4932 ]
4933 };
4934 return Result.v;
4935 }
4936
4937 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
4938 {
4939 unimplemented!()
4940 }
4941
4942 #[cfg(_XM_SSE_INTRINSICS_)]
4943 unsafe {
4944 let mut x: XMVECTOR = XMVectorModAngles(V);
4946
4947 let sign: __m128 = _mm_and_ps(x, g_XMNegativeZero.v);
4949 let c: __m128 = _mm_or_ps(g_XMPi.v, sign); let absx: __m128 = _mm_andnot_ps(sign, x); let rflx: __m128 = _mm_sub_ps(c, x);
4952 let comp: __m128 = _mm_cmple_ps(absx, g_XMHalfPi.v);
4953 let select0: __m128 = _mm_and_ps(comp, x);
4954 let select1: __m128 = _mm_andnot_ps(comp, rflx);
4955 x = _mm_or_ps(select0, select1);
4956
4957 let x2: __m128 = _mm_mul_ps(x, x);
4958
4959 const SC1: XMVECTOR = unsafe { g_XMSinCoefficients1.v };
4961 let vConstantsB: __m128 = XM_PERMUTE_PS!(SC1, _MM_SHUFFLE(0, 0, 0, 0));
4962 const SC0: XMVECTOR = unsafe { g_XMSinCoefficients0.v };
4963 let mut vConstants: __m128 = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(3, 3, 3, 3));
4964 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
4965
4966 vConstants = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(2, 2, 2, 2));
4967 Result = XM_FMADD_PS!(Result, x2, vConstants);
4968
4969 vConstants = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(1, 1, 1, 1));
4970 Result = XM_FMADD_PS!(Result, x2, vConstants);
4971
4972 vConstants = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(0, 0, 0, 0));
4973 Result = XM_FMADD_PS!(Result, x2, vConstants);
4974
4975 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
4976 Result = _mm_mul_ps(Result, x);
4977 return Result;
4978 }
4979}
4980
4981#[test]
4982fn test_XMVectorSin() {
4983 for angle in &[-XM_PI, -XM_PI/2.0, -XM_PI/3.0, -XM_PI/4.0, 0.0, XM_PI/4.0, XM_PI/3.0, XM_PI/2.0, XM_PI] {
4984 let scalar = angle.sin();
4985 let vector = XMVectorReplicate(*angle);
4986 let vector = XMVectorSin(vector);
4987 assert_approx_eq!(scalar, XMVectorGetX(vector));
4988 assert_approx_eq!(scalar, XMVectorGetY(vector));
4989 assert_approx_eq!(scalar, XMVectorGetZ(vector));
4990 assert_approx_eq!(scalar, XMVectorGetW(vector));
4991 }
4992}
4993
4994#[inline]
5012pub fn XMVectorCos(
5013 V: FXMVECTOR,
5014) -> XMVECTOR
5015{
5016 #[cfg(_XM_NO_INTRINSICS_)]
5019 unsafe {
5020 let Result = XMVECTORF32 {
5021 f: [
5022 cosf(V.vector4_f32[0]),
5023 cosf(V.vector4_f32[1]),
5024 cosf(V.vector4_f32[2]),
5025 cosf(V.vector4_f32[3])
5026 ]
5027 };
5028 return Result.v;
5029 }
5030
5031 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5032 {
5033 unimplemented!()
5034 }
5035
5036 #[cfg(_XM_SSE_INTRINSICS_)]
5037 unsafe {
5038 let mut x: XMVECTOR = XMVectorModAngles(V);
5040
5041 let mut sign: XMVECTOR = _mm_and_ps(x, g_XMNegativeZero.v);
5043 let c: __m128 = _mm_or_ps(g_XMPi.v, sign); let absx: __m128 = _mm_andnot_ps(sign, x); let rflx: __m128 = _mm_sub_ps(c, x);
5046 let comp: __m128 = _mm_cmple_ps(absx, g_XMHalfPi.v);
5047 let mut select0: __m128 = _mm_and_ps(comp, x);
5048 let mut select1: __m128 = _mm_andnot_ps(comp, rflx);
5049 x = _mm_or_ps(select0, select1);
5050 select0 = _mm_and_ps(comp, g_XMOne.v);
5051 select1 = _mm_andnot_ps(comp, g_XMNegativeOne.v);
5052 sign = _mm_or_ps(select0, select1);
5053
5054 let x2: __m128 = _mm_mul_ps(x, x);
5055
5056 const CC1: XMVECTOR = unsafe { g_XMCosCoefficients1.v };
5058 let vConstantsB: __m128 = XM_PERMUTE_PS!(CC1, _MM_SHUFFLE(0, 0, 0, 0));
5059 const CC0: XMVECTOR = unsafe { g_XMCosCoefficients0.v };
5060 let mut vConstants: __m128 = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(3, 3, 3, 3));
5061 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
5062
5063 vConstants = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(2, 2, 2, 2));
5064 Result = XM_FMADD_PS!(Result, x2, vConstants);
5065
5066 vConstants = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(1, 1, 1, 1));
5067 Result = XM_FMADD_PS!(Result, x2, vConstants);
5068
5069 vConstants = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(0, 0, 0, 0));
5070 Result = XM_FMADD_PS!(Result, x2, vConstants);
5071
5072 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
5073 Result = _mm_mul_ps(Result, sign);
5074 return Result;
5075 }
5076}
5077
5078#[test]
5079fn test_XMVectorCos() {
5080 for angle in &[-XM_PI, -XM_PI/2.0, -XM_PI/3.0, -XM_PI/4.0, 0.0, XM_PI/4.0, XM_PI/3.0, XM_PI/2.0, XM_PI] {
5081 let scalar = angle.cos();
5082 let vector = XMVectorReplicate(*angle);
5083 let vector = XMVectorCos(vector);
5084 assert_approx_eq!(scalar, XMVectorGetX(vector));
5085 assert_approx_eq!(scalar, XMVectorGetY(vector));
5086 assert_approx_eq!(scalar, XMVectorGetZ(vector));
5087 assert_approx_eq!(scalar, XMVectorGetW(vector));
5088 }
5089}
5090
5091#[inline]
5113pub fn XMVectorSinCos(
5114 pSin: &mut XMVECTOR,
5115 pCos: &mut XMVECTOR,
5116 V: FXMVECTOR,
5117)
5118{
5119 #[cfg(_XM_NO_INTRINSICS_)]
5122 unsafe {
5123 let Sin = XMVECTORF32 {
5124 f: [
5125 sinf(V.vector4_f32[0]),
5126 sinf(V.vector4_f32[1]),
5127 sinf(V.vector4_f32[2]),
5128 sinf(V.vector4_f32[3])
5129 ]
5130 };
5131 let Cos = XMVECTORF32 {
5132 f: [
5133 cosf(V.vector4_f32[0]),
5134 cosf(V.vector4_f32[1]),
5135 cosf(V.vector4_f32[2]),
5136 cosf(V.vector4_f32[3])
5137 ]
5138 };
5139 *pSin = Sin.v;
5140 *pCos = Cos.v;
5141 }
5142
5143 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5144 {
5145 unimplemented!()
5146 }
5147
5148 #[cfg(_XM_SSE_INTRINSICS_)]
5149 unsafe {
5150 let mut x: XMVECTOR = XMVectorModAngles(V);
5152
5153 let mut sign: XMVECTOR = _mm_and_ps(x, g_XMNegativeZero.v);
5155 let c: __m128 = _mm_or_ps(g_XMPi.v, sign); let absx: __m128 = _mm_andnot_ps(sign, x); let rflx: __m128 = _mm_sub_ps(c, x);
5158 let comp: __m128 = _mm_cmple_ps(absx, g_XMHalfPi.v);
5159 let mut select0: __m128 = _mm_and_ps(comp, x);
5160 let mut select1: __m128 = _mm_andnot_ps(comp, rflx);
5161 x = _mm_or_ps(select0, select1);
5162 select0 = _mm_and_ps(comp, g_XMOne.v);
5163 select1 = _mm_andnot_ps(comp, g_XMNegativeOne.v);
5164 sign = _mm_or_ps(select0, select1);
5165
5166 let x2: __m128 = _mm_mul_ps(x, x);
5167
5168 const SC1: XMVECTOR = unsafe { g_XMSinCoefficients1.v };
5170 let mut vConstantsB: __m128 = XM_PERMUTE_PS!(SC1, _MM_SHUFFLE(0, 0, 0, 0));
5171 const SC0: XMVECTOR = unsafe { g_XMSinCoefficients0.v };
5172 let mut vConstants: __m128 = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(3, 3, 3, 3));
5173 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
5174
5175 vConstants = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(2, 2, 2, 2));
5176 Result = XM_FMADD_PS!(Result, x2, vConstants);
5177
5178 vConstants = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(1, 1, 1, 1));
5179 Result = XM_FMADD_PS!(Result, x2, vConstants);
5180
5181 vConstants = XM_PERMUTE_PS!(SC0, _MM_SHUFFLE(0, 0, 0, 0));
5182 Result = XM_FMADD_PS!(Result, x2, vConstants);
5183
5184 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
5185 Result = _mm_mul_ps(Result, x);
5186 *pSin = Result;
5187
5188 const CC1: XMVECTOR = unsafe { g_XMCosCoefficients1.v };
5190 vConstantsB = XM_PERMUTE_PS!(CC1, _MM_SHUFFLE(0, 0, 0, 0));
5191 const CC0: XMVECTOR = unsafe { g_XMCosCoefficients0.v };
5192 vConstants = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(3, 3, 3, 3));
5193 Result = XM_FMADD_PS!(vConstantsB, x2, vConstants);
5194
5195 vConstants = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(2, 2, 2, 2));
5196 Result = XM_FMADD_PS!(Result, x2, vConstants);
5197
5198 vConstants = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(1, 1, 1, 1));
5199 Result = XM_FMADD_PS!(Result, x2, vConstants);
5200
5201 vConstants = XM_PERMUTE_PS!(CC0, _MM_SHUFFLE(0, 0, 0, 0));
5202 Result = XM_FMADD_PS!(Result, x2, vConstants);
5203
5204 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
5205 Result = _mm_mul_ps(Result, sign);
5206 *pCos = Result;
5207 }
5208}
5209
5210#[inline]
5224pub fn XMVectorTan(
5225 V: FXMVECTOR,
5226) -> XMVECTOR
5227{
5228 #[cfg(_XM_NO_INTRINSICS_)]
5231 unsafe {
5232 let Result = XMVECTORF32 {
5233 f: [
5234 tanf(V.vector4_f32[0]),
5235 tanf(V.vector4_f32[1]),
5236 tanf(V.vector4_f32[2]),
5237 tanf(V.vector4_f32[3])
5238 ]
5239 };
5240 return Result.v;
5241 }
5242
5243 #[cfg(any(_XM_SSE_INTRINSICS_, _XM_ARM_NEON_INTRINSICS_))]
5244 unsafe {
5245 const TanCoefficients0: XMVECTORF32 = XMVECTORF32 { f: [1.0, -4.667168334e-1, 2.566383229e-2, -3.118153191e-4] };
5246 const TanCoefficients1: XMVECTORF32 = XMVECTORF32 { f: [4.981943399e-7, -1.333835001e-1, 3.424887824e-3, -1.786170734e-5] };
5247 const TanConstants: XMVECTORF32 = XMVECTORF32 { f: [1.570796371, 6.077100628e-11, 0.000244140625, 0.63661977228 ] };
5248 const Mask: XMVECTORU32 = XMVECTORU32 { u: [0x1, 0x1, 0x1, 0x1] };
5249
5250 let TwoDivPi: XMVECTOR = XMVectorSplatW(TanConstants.v);
5251
5252 let Zero: XMVECTOR = XMVectorZero();
5253
5254 let C0: XMVECTOR = XMVectorSplatX(TanConstants.v);
5255 let C1: XMVECTOR = XMVectorSplatY(TanConstants.v);
5256 let Epsilon: XMVECTOR = XMVectorSplatZ(TanConstants.v);
5257
5258 let mut VA: XMVECTOR = XMVectorMultiply(V, TwoDivPi);
5259
5260 VA = XMVectorRound(VA);
5261
5262 let mut VC: XMVECTOR = XMVectorNegativeMultiplySubtract(VA, C0, V);
5263
5264 let mut VB: XMVECTOR = XMVectorAbs(VA);
5265
5266 VC = XMVectorNegativeMultiplySubtract(VA, C1, VC);
5267
5268 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5269 {
5270 }
5273
5274 #[cfg(_XM_SSE_INTRINSICS_)]
5275 {
5276 VB = mem::transmute(_mm_cvttps_epi32(VB));
5277 }
5278
5279 #[cfg(all(not(_XM_SSE_INTRINSICS_), not(_XM_ARM_NEON_INTRINSICS_)))]
5282 {
5283 for i in 0..4 {
5284 VB.vector4_u32[i] = (VB.vector4_f32[i]) as i32 as u32;
5285 }
5286 }
5287
5288 let VC2: XMVECTOR = XMVectorMultiply(VC, VC);
5289
5290 let T7: XMVECTOR = XMVectorSplatW(TanCoefficients1.v);
5291 let T6: XMVECTOR = XMVectorSplatZ(TanCoefficients1.v);
5292 let T4: XMVECTOR = XMVectorSplatX(TanCoefficients1.v);
5293 let T3: XMVECTOR = XMVectorSplatW(TanCoefficients0.v);
5294 let T5: XMVECTOR = XMVectorSplatY(TanCoefficients1.v);
5295 let T2: XMVECTOR = XMVectorSplatZ(TanCoefficients0.v);
5296 let T1: XMVECTOR = XMVectorSplatY(TanCoefficients0.v);
5297 let T0: XMVECTOR = XMVectorSplatX(TanCoefficients0.v);
5298
5299 let mut VBIsEven: XMVECTOR = XMVectorAndInt(VB, Mask.v);
5300 VBIsEven = XMVectorEqualInt(VBIsEven, Zero);
5301
5302 let mut N: XMVECTOR = XMVectorMultiplyAdd(VC2, T7, T6);
5303 let mut D: XMVECTOR = XMVectorMultiplyAdd(VC2, T4, T3);
5304 N = XMVectorMultiplyAdd(VC2, N, T5);
5305 D = XMVectorMultiplyAdd(VC2, D, T2);
5306 N = XMVectorMultiply(VC2, N);
5307 D = XMVectorMultiplyAdd(VC2, D, T1);
5308 N = XMVectorMultiplyAdd(VC, N, VC);
5309 let VCNearZero: XMVECTOR = XMVectorInBounds(VC, Epsilon);
5310 D = XMVectorMultiplyAdd(VC2, D, T0);
5311
5312 N = XMVectorSelect(N, VC, VCNearZero);
5313 D = XMVectorSelect(D, g_XMOne.v, VCNearZero);
5314
5315 let mut R0: XMVECTOR = XMVectorNegate(N);
5316 let R1: XMVECTOR = XMVectorDivide(N, D);
5317 R0 = XMVectorDivide(D, R0);
5318
5319 let VIsZero: XMVECTOR = XMVectorEqual(V, Zero);
5320
5321 let mut Result: XMVECTOR = XMVectorSelect(R0, R1, VBIsEven);
5322
5323 Result = XMVectorSelect(Result, Zero, VIsZero);
5324
5325 return Result;
5326 }
5327}
5328
5329#[test]
5330fn test_XMVectorTan() {
5331 for angle in &[-XM_PI/3.0, -XM_PI/4.0, 0.0, XM_PI/4.0, XM_PI/3.0] {
5332 let scalar = angle.tan();
5333 let vector = XMVectorReplicate(*angle);
5334 let vector = XMVectorTan(vector);
5335 assert_approx_eq!(scalar, XMVectorGetX(vector));
5336 assert_approx_eq!(scalar, XMVectorGetY(vector));
5337 assert_approx_eq!(scalar, XMVectorGetZ(vector));
5338 assert_approx_eq!(scalar, XMVectorGetW(vector));
5339 }
5340}
5341
5342#[inline]
5356pub fn XMVectorSinH(
5357 V: FXMVECTOR,
5358) -> XMVECTOR
5359{
5360 #[cfg(_XM_NO_INTRINSICS_)]
5363 unsafe {
5364 let Result = XMVECTORF32 {
5365 f: [
5366 sinh(V.vector4_f32[0]),
5367 sinh(V.vector4_f32[1]),
5368 sinh(V.vector4_f32[2]),
5369 sinh(V.vector4_f32[3])
5370 ]
5371 };
5372 return Result.v;
5373 }
5374
5375 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5376 {
5377 unimplemented!()
5378 }
5379
5380 #[cfg(_XM_SSE_INTRINSICS_)]
5381 unsafe {
5382 const Scale: XMVECTORF32 = XMVECTORF32 { f :[ 1.442695040888963, 1.442695040888963, 1.442695040888963, 1.442695040888963 ] }; let V1: XMVECTOR = XM_FMADD_PS!(V, Scale.v, g_XMNegativeOne.v);
5385 let V2: XMVECTOR = XM_FNMADD_PS!(V, Scale.v, g_XMNegativeOne.v);
5386 let E1: XMVECTOR = XMVectorExp(V1);
5387 let E2: XMVECTOR = XMVectorExp(V2);
5388
5389 return _mm_sub_ps(E1, E2);
5390 }
5391}
5392
5393#[inline]
5407pub fn XMVectorCosH(
5408 V: FXMVECTOR,
5409) -> XMVECTOR
5410{
5411 #[cfg(_XM_NO_INTRINSICS_)]
5412 unsafe {
5413 let Result = XMVECTORF32 {
5414 f: [
5415 cosh(V.vector4_f32[0]),
5416 cosh(V.vector4_f32[1]),
5417 cosh(V.vector4_f32[2]),
5418 cosh(V.vector4_f32[3])
5419 ]
5420 };
5421 return Result.v;
5422 }
5423
5424 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5425 {
5426 unimplemented!()
5427 }
5428
5429 #[cfg(_XM_SSE_INTRINSICS_)]
5430 unsafe {
5431 const Scale: XMVECTORF32 = XMVECTORF32 { f :[ 1.442695040888963, 1.442695040888963, 1.442695040888963, 1.442695040888963 ] }; let V1: XMVECTOR = XM_FMADD_PS!(V, Scale.v, g_XMNegativeOne.v);
5434 let V2: XMVECTOR = XM_FNMADD_PS!(V, Scale.v, g_XMNegativeOne.v);
5435 let E1: XMVECTOR = XMVectorExp(V1);
5436 let E2: XMVECTOR = XMVectorExp(V2);
5437
5438 return _mm_add_ps(E1, E2);
5439 }
5440}
5441
5442#[inline]
5456pub fn XMVectorTanH(
5457 V: FXMVECTOR,
5458) -> XMVECTOR
5459{
5460 #[cfg(_XM_NO_INTRINSICS_)]
5461 unsafe {
5462 let Result = XMVECTORF32 {
5463 f: [
5464 tanh(V.vector4_f32[0]),
5465 tanh(V.vector4_f32[1]),
5466 tanh(V.vector4_f32[2]),
5467 tanh(V.vector4_f32[3])
5468 ]
5469 };
5470 return Result.v;
5471 }
5472
5473 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5474 {
5475 unimplemented!()
5476 }
5477
5478 #[cfg(_XM_SSE_INTRINSICS_)]
5479 unsafe {
5480 const Scale: XMVECTORF32 = XMVECTORF32 { f :[ 2.8853900817779268, 2.8853900817779268, 2.8853900817779268, 2.8853900817779268 ] }; let mut E: XMVECTOR = _mm_mul_ps(V, Scale.v);
5483 E = XMVectorExp(E);
5484 E = XM_FMADD_PS!(E, g_XMOneHalf.v, g_XMOneHalf.v);
5485 E = _mm_div_ps(g_XMOne.v, E);
5486 return _mm_sub_ps(g_XMOne.v, E);
5487 }
5488}
5489
5490#[inline]
5508pub fn XMVectorASin(
5509 V: FXMVECTOR,
5510) -> XMVECTOR
5511{
5512 #[cfg(_XM_NO_INTRINSICS_)]
5515 unsafe {
5516 let Result = XMVECTORF32 {
5517 f: [
5518 asinf(V.vector4_f32[0]),
5519 asinf(V.vector4_f32[1]),
5520 asinf(V.vector4_f32[2]),
5521 asinf(V.vector4_f32[3])
5522 ]
5523 };
5524 return Result.v;
5525 }
5526
5527 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5528 {
5529 unimplemented!()
5530 }
5531
5532 #[cfg(_XM_SSE_INTRINSICS_)]
5533 unsafe {
5534 let nonnegative: __m128 = _mm_cmpge_ps(V, g_XMZero.v);
5535 let mvalue: __m128 = _mm_sub_ps(g_XMZero.v, V);
5536 let x: __m128 = _mm_max_ps(V, mvalue); let oneMValue: __m128 = _mm_sub_ps(g_XMOne.v, x);
5540 let clampOneMValue: __m128 = _mm_max_ps(g_XMZero.v, oneMValue);
5541 let root: __m128 = _mm_sqrt_ps(clampOneMValue); const AC1: XMVECTOR = unsafe { g_XMArcCoefficients1.v };
5545 let vConstantsB: __m128 = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(3, 3, 3, 3));
5546 let mut vConstants: __m128 = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(2, 2, 2, 2));
5547 let mut t0: __m128 = XM_FMADD_PS!(vConstantsB, x, vConstants);
5548
5549 vConstants = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(1, 1, 1, 1));
5550 t0 = XM_FMADD_PS!(t0, x, vConstants);
5551
5552 vConstants = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(0, 0, 0, 0));
5553 t0 = XM_FMADD_PS!(t0, x, vConstants);
5554
5555 const AC0: XMVECTOR = unsafe { g_XMArcCoefficients0.v };
5556 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(3, 3, 3, 3));
5557 t0 = XM_FMADD_PS!(t0, x, vConstants);
5558
5559 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(2, 2, 2, 2));
5560 t0 = XM_FMADD_PS!(t0, x, vConstants);
5561
5562 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(1, 1, 1, 1));
5563 t0 = XM_FMADD_PS!(t0, x, vConstants);
5564
5565 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(0, 0, 0, 0));
5566 t0 = XM_FMADD_PS!(t0, x, vConstants);
5567 t0 = _mm_mul_ps(t0, root);
5568
5569 let mut t1: __m128 = _mm_sub_ps(g_XMPi.v, t0);
5570 t0 = _mm_and_ps(nonnegative, t0);
5571 t1 = _mm_andnot_ps(nonnegative, t1);
5572 t0 = _mm_or_ps(t0, t1);
5573 t0 = _mm_sub_ps(g_XMHalfPi.v, t0);
5574 return t0;
5575 }
5576}
5577
5578#[inline]
5596pub fn XMVectorACos(
5597 V: FXMVECTOR,
5598) -> XMVECTOR
5599{
5600 #[cfg(_XM_NO_INTRINSICS_)]
5603 unsafe {
5604 let Result = XMVECTORF32 {
5605 f: [
5606 acosf(V.vector4_f32[0]),
5607 acosf(V.vector4_f32[1]),
5608 acosf(V.vector4_f32[2]),
5609 acosf(V.vector4_f32[3])
5610 ]
5611 };
5612 return Result.v;
5613 }
5614
5615 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5616 {
5617 unimplemented!()
5618 }
5619
5620 #[cfg(_XM_SSE_INTRINSICS_)]
5621 unsafe {
5622 let nonnegative: __m128 = _mm_cmpge_ps(V, g_XMZero.v);
5623 let mvalue: __m128 = _mm_sub_ps(g_XMZero.v, V);
5624 let x: __m128 = _mm_max_ps(V, mvalue); let oneMValue: __m128 = _mm_sub_ps(g_XMOne.v, x);
5628 let clampOneMValue: __m128 = _mm_max_ps(g_XMZero.v, oneMValue);
5629 let root: __m128 = _mm_sqrt_ps(clampOneMValue); const AC1: XMVECTOR = unsafe { g_XMArcCoefficients1.v };
5633 let vConstantsB: __m128 = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(3, 3, 3, 3));
5634 let mut vConstants: __m128 = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(2, 2, 2, 2));
5635 let mut t0: __m128 = XM_FMADD_PS!(vConstantsB, x, vConstants);
5636
5637 vConstants = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(1, 1, 1, 1));
5638 t0 = XM_FMADD_PS!(t0, x, vConstants);
5639
5640 vConstants = XM_PERMUTE_PS!(AC1, _MM_SHUFFLE(0, 0, 0, 0));
5641 t0 = XM_FMADD_PS!(t0, x, vConstants);
5642
5643 const AC0: XMVECTOR = unsafe { g_XMArcCoefficients0.v };
5644 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(3, 3, 3, 3));
5645 t0 = XM_FMADD_PS!(t0, x, vConstants);
5646
5647 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(2, 2, 2, 2));
5648 t0 = XM_FMADD_PS!(t0, x, vConstants);
5649
5650 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(1, 1, 1, 1));
5651 t0 = XM_FMADD_PS!(t0, x, vConstants);
5652
5653 vConstants = XM_PERMUTE_PS!(AC0, _MM_SHUFFLE(0, 0, 0, 0));
5654 t0 = XM_FMADD_PS!(t0, x, vConstants);
5655 t0 = _mm_mul_ps(t0, root);
5656
5657 let mut t1: __m128 = _mm_sub_ps(g_XMPi.v, t0);
5658 t0 = _mm_and_ps(nonnegative, t0);
5659 t1 = _mm_andnot_ps(nonnegative, t1);
5660 t0 = _mm_or_ps(t0, t1);
5661 return t0;
5662 }
5663}
5664
5665#[inline]
5683pub fn XMVectorATan(
5684 V: FXMVECTOR,
5685) -> XMVECTOR
5686{
5687 #[cfg(_XM_NO_INTRINSICS_)]
5690 unsafe {
5691 let Result = XMVECTORF32 {
5692 f: [
5693 atanf(V.vector4_f32[0]),
5694 atanf(V.vector4_f32[1]),
5695 atanf(V.vector4_f32[2]),
5696 atanf(V.vector4_f32[3])
5697 ]
5698 };
5699 return Result.v;
5700 }
5701
5702 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5703 {
5704 unimplemented!()
5705 }
5706
5707 #[cfg(_XM_SSE_INTRINSICS_)]
5708 unsafe {
5709 let absV: __m128 = XMVectorAbs(V);
5710 let invV: __m128 = _mm_div_ps(g_XMOne.v, V);
5711 let mut comp: __m128 = _mm_cmpgt_ps(V, g_XMOne.v);
5712 let mut select0: __m128 = _mm_and_ps(comp, g_XMOne.v);
5713 let mut select1: __m128 = _mm_andnot_ps(comp, g_XMNegativeOne.v);
5714 let mut sign: __m128 = _mm_or_ps(select0, select1);
5715 comp = _mm_cmple_ps(absV, g_XMOne.v);
5716 select0 = _mm_and_ps(comp, g_XMZero.v);
5717 select1 = _mm_andnot_ps(comp, sign);
5718 sign = _mm_or_ps(select0, select1);
5719 select0 = _mm_and_ps(comp, V);
5720 select1 = _mm_andnot_ps(comp, invV);
5721 let x: __m128 = _mm_or_ps(select0, select1);
5722
5723 let x2: __m128 = _mm_mul_ps(x, x);
5724
5725 const TC1: XMVECTOR = unsafe { g_XMATanCoefficients1.v };
5727 let vConstantsB: __m128 = XM_PERMUTE_PS!(TC1, _MM_SHUFFLE(3, 3, 3, 3));
5728 let mut vConstants: __m128 = XM_PERMUTE_PS!(TC1, _MM_SHUFFLE(2, 2, 2, 2));
5729 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
5730
5731 vConstants = XM_PERMUTE_PS!(TC1, _MM_SHUFFLE(1, 1, 1, 1));
5732 Result = XM_FMADD_PS!(Result, x2, vConstants);
5733
5734 vConstants = XM_PERMUTE_PS!(TC1, _MM_SHUFFLE(0, 0, 0, 0));
5735 Result = XM_FMADD_PS!(Result, x2, vConstants);
5736
5737 const TC0: XMVECTOR = unsafe { g_XMATanCoefficients0.v };
5738 vConstants = XM_PERMUTE_PS!(TC0, _MM_SHUFFLE(3, 3, 3, 3));
5739 Result = XM_FMADD_PS!(Result, x2, vConstants);
5740
5741 vConstants = XM_PERMUTE_PS!(TC0, _MM_SHUFFLE(2, 2, 2, 2));
5742 Result = XM_FMADD_PS!(Result, x2, vConstants);
5743
5744 vConstants = XM_PERMUTE_PS!(TC0, _MM_SHUFFLE(1, 1, 1, 1));
5745 Result = XM_FMADD_PS!(Result, x2, vConstants);
5746
5747 vConstants = XM_PERMUTE_PS!(TC0, _MM_SHUFFLE(0, 0, 0, 0));
5748 Result = XM_FMADD_PS!(Result, x2, vConstants);
5749
5750 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
5751
5752 Result = _mm_mul_ps(Result, x);
5753 let mut result1: __m128 = _mm_mul_ps(sign, g_XMHalfPi.v);
5754 result1 = _mm_sub_ps(result1, Result);
5755
5756 comp = _mm_cmpeq_ps(sign, g_XMZero.v);
5757 select0 = _mm_and_ps(comp, Result);
5758 select1 = _mm_andnot_ps(comp, result1);
5759 Result = _mm_or_ps(select0, select1);
5760 return Result;
5761 }
5762}
5763
5764
5765#[inline]
5798pub fn XMVectorATan2(
5799 Y: FXMVECTOR,
5800 X: FXMVECTOR,
5801) -> XMVECTOR
5802{
5803 #[cfg(_XM_NO_INTRINSICS_)]
5804 unsafe {
5805 let Result = XMVECTORF32 {
5806 f: [
5807 atan2f(Y.vector4_f32[0], X.vector4_f32[0]),
5808 atan2f(Y.vector4_f32[1], X.vector4_f32[1]),
5809 atan2f(Y.vector4_f32[2], X.vector4_f32[2]),
5810 atan2f(Y.vector4_f32[3], X.vector4_f32[3])
5811 ]
5812 };
5813 return Result.v;
5814 }
5815
5816 #[cfg(not(_XM_NO_INTRINSICS_))]
5817 unsafe {
5818 const ATan2Constants: XMVECTORF32 = XMVECTORF32 { f: [ XM_PI, XM_PIDIV2, XM_PIDIV4, XM_PI * 3.0 / 4.0 ] };
5831
5832 let Zero: XMVECTOR = XMVectorZero();
5833 let mut ATanResultValid: XMVECTOR = XMVectorTrueInt();
5834
5835 let mut Pi: XMVECTOR = XMVectorSplatX(ATan2Constants.v);
5836 let mut PiOverTwo: XMVECTOR = XMVectorSplatY(ATan2Constants.v);
5837 let mut PiOverFour: XMVECTOR = XMVectorSplatZ(ATan2Constants.v);
5838 let mut ThreePiOverFour: XMVECTOR = XMVectorSplatW(ATan2Constants.v);
5839
5840 let YEqualsZero: XMVECTOR = XMVectorEqual(Y, Zero);
5841 let XEqualsZero: XMVECTOR = XMVectorEqual(X, Zero);
5842 let mut XIsPositive: XMVECTOR = XMVectorAndInt(X, g_XMNegativeZero.v);
5843 XIsPositive = XMVectorEqualInt(XIsPositive, Zero);
5844 let YEqualsInfinity: XMVECTOR = XMVectorIsInfinite(Y);
5845 let XEqualsInfinity: XMVECTOR = XMVectorIsInfinite(X);
5846
5847 let YSign: XMVECTOR = XMVectorAndInt(Y, g_XMNegativeZero.v);
5848 Pi = XMVectorOrInt(Pi, YSign);
5849 PiOverTwo = XMVectorOrInt(PiOverTwo, YSign);
5850 PiOverFour = XMVectorOrInt(PiOverFour, YSign);
5851 ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign);
5852
5853 let mut R1: XMVECTOR = XMVectorSelect(Pi, YSign, XIsPositive);
5854 let mut R2: XMVECTOR = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero);
5855 let R3: XMVECTOR = XMVectorSelect(R2, R1, YEqualsZero);
5856 let R4: XMVECTOR = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive);
5857 let R5: XMVECTOR = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity);
5858 let Result: XMVECTOR = XMVectorSelect(R3, R5, YEqualsInfinity);
5859 ATanResultValid = XMVectorEqualInt(Result, ATanResultValid);
5860
5861 let V: XMVECTOR = XMVectorDivide(Y, X);
5862
5863 let R0: XMVECTOR = XMVectorATan(V);
5864
5865 R1 = XMVectorSelect(Pi, g_XMNegativeZero.v, XIsPositive);
5866 R2 = XMVectorAdd(R0, R1);
5867
5868 return XMVectorSelect(Result, R2, ATanResultValid);
5869 }
5870}
5871
5872
5873#[inline]
5895pub fn XMVectorSinEst(
5896 V: FXMVECTOR,
5897) -> XMVECTOR
5898{
5899 #[cfg(_XM_NO_INTRINSICS_)]
5902 unsafe {
5903 let Result = XMVECTORF32 {
5904 f: [
5905 sinf(V.vector4_f32[0]),
5906 sinf(V.vector4_f32[1]),
5907 sinf(V.vector4_f32[2]),
5908 sinf(V.vector4_f32[3])
5909 ]
5910 };
5911 return Result.v;
5912 }
5913
5914 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5915 {
5916 unimplemented!()
5917 }
5918
5919 #[cfg(_XM_SSE_INTRINSICS_)]
5920 unsafe {
5921 let mut x: XMVECTOR = XMVectorModAngles(V);
5923
5924 let sign: __m128 = _mm_and_ps(x, g_XMNegativeZero.v);
5926 let c: __m128 = _mm_or_ps(g_XMPi.v, sign); let absx: __m128 = _mm_andnot_ps(sign, x); let rflx: __m128 = _mm_sub_ps(c, x);
5929 let comp: __m128 = _mm_cmple_ps(absx, g_XMHalfPi.v);
5930 let select0: __m128 = _mm_and_ps(comp, x);
5931 let select1: __m128 = _mm_andnot_ps(comp, rflx);
5932 x = _mm_or_ps(select0, select1);
5933
5934 let x2: __m128 = _mm_mul_ps(x, x);
5935
5936 const SEC: XMVECTOR = unsafe { g_XMSinCoefficients1.v };
5938 let vConstantsB: __m128 = XM_PERMUTE_PS!(SEC, _MM_SHUFFLE(3, 3, 3, 3));
5939 let mut vConstants: __m128 = XM_PERMUTE_PS!(SEC, _MM_SHUFFLE(2, 2, 2, 2));
5940 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
5941
5942 vConstants = XM_PERMUTE_PS!(SEC, _MM_SHUFFLE(1, 1, 1, 1));
5943 Result = XM_FMADD_PS!(Result, x2, vConstants);
5944 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
5945 Result = _mm_mul_ps(Result, x);
5946 return Result;
5947 }
5948}
5949
5950
5951#[inline]
5973pub fn XMVectorCosEst(
5974 V: FXMVECTOR,
5975) -> XMVECTOR
5976{
5977 #[cfg(_XM_NO_INTRINSICS_)]
5980 unsafe {
5981 let Result = XMVECTORF32 {
5982 f: [
5983 cosf(V.vector4_f32[0]),
5984 cosf(V.vector4_f32[1]),
5985 cosf(V.vector4_f32[2]),
5986 cosf(V.vector4_f32[3])
5987 ]
5988 };
5989 return Result.v;
5990 }
5991
5992 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
5993 {
5994 unimplemented!()
5995 }
5996
5997 #[cfg(_XM_SSE_INTRINSICS_)]
5998 unsafe {
5999 let mut x: XMVECTOR = XMVectorModAngles(V);
6001
6002 let mut sign: XMVECTOR = _mm_and_ps(x, g_XMNegativeZero.v);
6004 let c: __m128 = _mm_or_ps(g_XMPi.v, sign); let absx: __m128 = _mm_andnot_ps(sign, x); let rflx: __m128 = _mm_sub_ps(c, x);
6007 let comp: __m128 = _mm_cmple_ps(absx, g_XMHalfPi.v);
6008 let mut select0: __m128 = _mm_and_ps(comp, x);
6009 let mut select1: __m128 = _mm_andnot_ps(comp, rflx);
6010 x = _mm_or_ps(select0, select1);
6011 select0 = _mm_and_ps(comp, g_XMOne.v);
6012 select1 = _mm_andnot_ps(comp, g_XMNegativeOne.v);
6013 sign = _mm_or_ps(select0, select1);
6014
6015 let x2: __m128 = _mm_mul_ps(x, x);
6016
6017 const CEC: XMVECTOR = unsafe { g_XMCosCoefficients1.v };
6019 let vConstantsB: __m128 = XM_PERMUTE_PS!(CEC, _MM_SHUFFLE(3, 3, 3, 3));
6020 let mut vConstants: __m128 = XM_PERMUTE_PS!(CEC, _MM_SHUFFLE(2, 2, 2, 2));
6021 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
6022
6023 vConstants = XM_PERMUTE_PS!(CEC, _MM_SHUFFLE(1, 1, 1, 1));
6024 Result = XM_FMADD_PS!(Result, x2, vConstants);
6025 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
6026 Result = _mm_mul_ps(Result, sign);
6027 return Result;
6028 }
6029}
6030
6031
6032#[inline]
6060pub fn XMVectorSinCosEst(
6061 pSin: &mut XMVECTOR,
6062 pCos: &mut XMVECTOR,
6063 V: FXMVECTOR,
6064)
6065{
6066 #[cfg(_XM_NO_INTRINSICS_)]
6069 unsafe {
6070 let Sin = XMVECTORF32 {
6071 f: [
6072 sinf(V.vector4_f32[0]),
6073 sinf(V.vector4_f32[1]),
6074 sinf(V.vector4_f32[2]),
6075 sinf(V.vector4_f32[3])
6076 ]
6077 };
6078 let Cos = XMVECTORF32 {
6079 f: [
6080 cosf(V.vector4_f32[0]),
6081 cosf(V.vector4_f32[1]),
6082 cosf(V.vector4_f32[2]),
6083 cosf(V.vector4_f32[3])
6084 ]
6085 };
6086 *pSin = Sin.v;
6087 *pCos = Cos.v;
6088 }
6089
6090 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6091 {
6092 unimplemented!()
6093 }
6094
6095 #[cfg(_XM_SSE_INTRINSICS_)]
6096 unsafe {
6097 let mut x: XMVECTOR = XMVectorModAngles(V);
6099
6100 let mut sign: XMVECTOR = _mm_and_ps(x, g_XMNegativeZero.v);
6102 let c: __m128 = _mm_or_ps(g_XMPi.v, sign); let absx: __m128 = _mm_andnot_ps(sign, x); let rflx: __m128 = _mm_sub_ps(c, x);
6105 let comp: __m128 = _mm_cmple_ps(absx, g_XMHalfPi.v);
6106 let mut select0: __m128 = _mm_and_ps(comp, x);
6107 let mut select1: __m128 = _mm_andnot_ps(comp, rflx);
6108 x = _mm_or_ps(select0, select1);
6109 select0 = _mm_and_ps(comp, g_XMOne.v);
6110 select1 = _mm_andnot_ps(comp, g_XMNegativeOne.v);
6111 sign = _mm_or_ps(select0, select1);
6112
6113 let x2: __m128 = _mm_mul_ps(x, x);
6114
6115 const SEC: XMVECTOR = unsafe { g_XMSinCoefficients1.v };
6117 let mut vConstantsB: __m128 = XM_PERMUTE_PS!(SEC, _MM_SHUFFLE(3, 3, 3, 3));
6118 let mut vConstants: __m128 = XM_PERMUTE_PS!(SEC, _MM_SHUFFLE(2, 2, 2, 2));
6119 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
6120
6121 vConstants = XM_PERMUTE_PS!(SEC, _MM_SHUFFLE(1, 1, 1, 1));
6122 Result = XM_FMADD_PS!(Result, x2, vConstants);
6123 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
6124 Result = _mm_mul_ps(Result, x);
6125 *pSin = Result;
6126
6127 const CEC: XMVECTOR = unsafe { g_XMCosCoefficients1.v };
6129 vConstantsB = XM_PERMUTE_PS!(CEC, _MM_SHUFFLE(3, 3, 3, 3));
6130 vConstants = XM_PERMUTE_PS!(CEC, _MM_SHUFFLE(2, 2, 2, 2));
6131 Result = XM_FMADD_PS!(vConstantsB, x2, vConstants);
6132
6133 vConstants = XM_PERMUTE_PS!(CEC, _MM_SHUFFLE(1, 1, 1, 1));
6134 Result = XM_FMADD_PS!(Result, x2, vConstants);
6135 Result = XM_FMADD_PS!(Result, x2, g_XMOne.v);
6136 Result = _mm_mul_ps(Result, sign);
6137 *pCos = Result;
6138 }
6139}
6140
6141#[inline]
6161pub fn XMVectorTanEst(
6162 V: FXMVECTOR,
6163) -> FXMVECTOR
6164{
6165 #[cfg(_XM_NO_INTRINSICS_)]
6166 unsafe {
6167 let Result = XMVECTORF32 {
6168 f: [
6169 tanf(V.vector4_f32[0]),
6170 tanf(V.vector4_f32[1]),
6171 tanf(V.vector4_f32[2]),
6172 tanf(V.vector4_f32[3])
6173 ]
6174 };
6175 return Result.v;
6176 }
6177
6178 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6179 {
6180 unimplemented!()
6181 }
6182
6183 #[cfg(_XM_SSE_INTRINSICS_)]
6184 unsafe {
6185 let OneOverPi: XMVECTOR = XMVectorSplatW(g_XMTanEstCoefficients.v);
6186
6187 let mut V1: XMVECTOR = XMVectorMultiply(V, OneOverPi);
6188 V1 = XMVectorRound(V1);
6189
6190 V1 = XMVectorNegativeMultiplySubtract(g_XMPi.v, V1, V);
6191
6192 let T0: XMVECTOR = XMVectorSplatX(g_XMTanEstCoefficients.v);
6193 let T1: XMVECTOR = XMVectorSplatY(g_XMTanEstCoefficients.v);
6194 let T2: XMVECTOR = XMVectorSplatZ(g_XMTanEstCoefficients.v);
6195
6196 let V2T2: XMVECTOR = XMVectorNegativeMultiplySubtract(V1, V1, T2);
6197 let V2: XMVECTOR = XMVectorMultiply(V1, V1);
6198 let V1T0: XMVECTOR = XMVectorMultiply(V1, T0);
6199 let V1T1: XMVECTOR = XMVectorMultiply(V1, T1);
6200
6201 let D: XMVECTOR = XMVectorReciprocalEst(V2T2);
6202 let N: XMVECTOR = XMVectorMultiplyAdd(V2, V1T1, V1T0);
6203
6204 return XMVectorMultiply(N, D);
6205 }
6206}
6207
6208#[inline]
6230pub fn XMVectorASinEst(
6231 V: FXMVECTOR,
6232) -> FXMVECTOR
6233{
6234 #[cfg(_XM_NO_INTRINSICS_)]
6235 unsafe {
6236 let Result = XMVECTORF32 {
6237 f: [
6238 asinf(V.vector4_f32[0]),
6239 asinf(V.vector4_f32[1]),
6240 asinf(V.vector4_f32[2]),
6241 asinf(V.vector4_f32[3])
6242 ]
6243 };
6244 return Result.v;
6245 }
6246
6247 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6248 {
6249 unimplemented!()
6250 }
6251
6252 #[cfg(_XM_SSE_INTRINSICS_)]
6253 unsafe {
6254 let nonnegative: __m128 = _mm_cmpge_ps(V, g_XMZero.v);
6255 let mvalue: __m128 = _mm_sub_ps(g_XMZero.v, V);
6256 let x: __m128 = _mm_max_ps(V, mvalue); let oneMValue: __m128 = _mm_sub_ps(g_XMOne.v, x);
6260 let clampOneMValue: __m128 = _mm_max_ps(g_XMZero.v, oneMValue);
6261 let root: __m128 = _mm_sqrt_ps(clampOneMValue); const AEC: XMVECTOR = unsafe { g_XMArcEstCoefficients.v };
6265 let vConstantsB: __m128 = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(3, 3, 3, 3));
6266 let mut vConstants: __m128 = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(2, 2, 2, 2));
6267 let mut t0: __m128 = XM_FMADD_PS!(vConstantsB, x, vConstants);
6268
6269 vConstants = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(1, 1, 1, 1));
6270 t0 = XM_FMADD_PS!(t0, x, vConstants);
6271
6272 vConstants = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(0, 0, 0, 0));
6273 t0 = XM_FMADD_PS!(t0, x, vConstants);
6274 t0 = _mm_mul_ps(t0, root);
6275
6276 let mut t1: __m128 = _mm_sub_ps(g_XMPi.v, t0);
6277 t0 = _mm_and_ps(nonnegative, t0);
6278 t1 = _mm_andnot_ps(nonnegative, t1);
6279 t0 = _mm_or_ps(t0, t1);
6280 t0 = _mm_sub_ps(g_XMHalfPi.v, t0);
6281 return t0;
6282 }
6283}
6284
6285#[inline]
6308pub fn XMVectorACosEst(
6309 V: FXMVECTOR,
6310) -> FXMVECTOR
6311{
6312 #[cfg(_XM_NO_INTRINSICS_)]
6313 unsafe {
6314 let Result = XMVECTORF32 {
6315 f: [
6316 acosf(V.vector4_f32[0]),
6317 acosf(V.vector4_f32[1]),
6318 acosf(V.vector4_f32[2]),
6319 acosf(V.vector4_f32[3])
6320 ]
6321 };
6322 return Result.v;
6323 }
6324
6325 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6326 {
6327 unimplemented!()
6328 }
6329
6330 #[cfg(_XM_SSE_INTRINSICS_)]
6331 unsafe {
6332 let nonnegative: __m128 = _mm_cmpge_ps(V, g_XMZero.v);
6333 let mvalue: __m128 = _mm_sub_ps(g_XMZero.v, V);
6334 let x: __m128 = _mm_max_ps(V, mvalue); let oneMValue: __m128 = _mm_sub_ps(g_XMOne.v, x);
6338 let clampOneMValue: __m128 = _mm_max_ps(g_XMZero.v, oneMValue);
6339 let root: __m128 = _mm_sqrt_ps(clampOneMValue); const AEC: XMVECTOR = unsafe { g_XMArcEstCoefficients.v };
6343 let vConstantsB: __m128 = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(3, 3, 3, 3));
6344 let mut vConstants: __m128 = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(2, 2, 2, 2));
6345 let mut t0: __m128 = XM_FMADD_PS!(vConstantsB, x, vConstants);
6346
6347 vConstants = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(1, 1, 1, 1));
6348 t0 = XM_FMADD_PS!(t0, x, vConstants);
6349
6350 vConstants = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(0, 0, 0, 0));
6351 t0 = XM_FMADD_PS!(t0, x, vConstants);
6352 t0 = _mm_mul_ps(t0, root);
6353
6354 let mut t1: __m128 = _mm_sub_ps(g_XMPi.v, t0);
6355 t0 = _mm_and_ps(nonnegative, t0);
6356 t1 = _mm_andnot_ps(nonnegative, t1);
6357 t0 = _mm_or_ps(t0, t1);
6358 return t0;
6359 }
6360}
6361
6362#[inline]
6385pub fn XMVectorATanEst(
6386 V: FXMVECTOR,
6387) -> FXMVECTOR
6388{
6389 #[cfg(_XM_NO_INTRINSICS_)]
6390 unsafe {
6391 let Result = XMVECTORF32 {
6392 f: [
6393 atanf(V.vector4_f32[0]),
6394 atanf(V.vector4_f32[1]),
6395 atanf(V.vector4_f32[2]),
6396 atanf(V.vector4_f32[3])
6397 ]
6398 };
6399 return Result.v;
6400 }
6401
6402 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6403 {
6404 unimplemented!()
6405 }
6406
6407 #[cfg(_XM_SSE_INTRINSICS_)]
6408 unsafe {
6409 let absV: __m128 = XMVectorAbs(V);
6410 let invV: __m128 = _mm_div_ps(g_XMOne.v, V);
6411 let mut comp: __m128 = _mm_cmpgt_ps(V, g_XMOne.v);
6412 let mut select0: __m128 = _mm_and_ps(comp, g_XMOne.v);
6413 let mut select1: __m128 = _mm_andnot_ps(comp, g_XMNegativeOne.v);
6414 let mut sign: __m128 = _mm_or_ps(select0, select1);
6415 comp = _mm_cmple_ps(absV, g_XMOne.v);
6416 select0 = _mm_and_ps(comp, g_XMZero.v);
6417 select1 = _mm_andnot_ps(comp, sign);
6418 sign = _mm_or_ps(select0, select1);
6419 select0 = _mm_and_ps(comp, V);
6420 select1 = _mm_andnot_ps(comp, invV);
6421 let x: __m128 = _mm_or_ps(select0, select1);
6422
6423 let x2: __m128 = _mm_mul_ps(x, x);
6424
6425 const AEC: XMVECTOR = unsafe { g_XMATanEstCoefficients1.v };
6427 let vConstantsB: __m128 = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(3, 3, 3, 3));
6428 let mut vConstants: __m128 = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(2, 2, 2, 2));
6429 let mut Result: __m128 = XM_FMADD_PS!(vConstantsB, x2, vConstants);
6430
6431 vConstants = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(1, 1, 1, 1));
6432 Result = XM_FMADD_PS!(Result, x2, vConstants);
6433
6434 vConstants = XM_PERMUTE_PS!(AEC, _MM_SHUFFLE(0, 0, 0, 0));
6435 Result = XM_FMADD_PS!(Result, x2, vConstants);
6436 Result = XM_FMADD_PS!(Result, x2, g_XMATanEstCoefficients0.v);
6438 Result = _mm_mul_ps(Result, x);
6439 let mut result1: __m128 = _mm_mul_ps(sign, g_XMHalfPi.v);
6440 result1 = _mm_sub_ps(result1, Result);
6441
6442 comp = _mm_cmpeq_ps(sign, g_XMZero.v);
6443 select0 = _mm_and_ps(comp, Result);
6444 select1 = _mm_andnot_ps(comp, result1);
6445 Result = _mm_or_ps(select0, select1);
6446 return Result;
6447 }
6448}
6449
6450
6451#[inline]
6488pub fn XMVectorATan2Est(
6489 Y: FXMVECTOR,
6490 X: FXMVECTOR,
6491) -> XMVECTOR
6492{
6493 #[cfg(_XM_NO_INTRINSICS_)]
6494 unsafe {
6495 let Result = XMVECTORF32 {
6496 f: [
6497 atan2f(Y.vector4_f32[0], X.vector4_f32[0]),
6498 atan2f(Y.vector4_f32[1], X.vector4_f32[1]),
6499 atan2f(Y.vector4_f32[2], X.vector4_f32[2]),
6500 atan2f(Y.vector4_f32[3], X.vector4_f32[3])
6501 ]
6502 };
6503 return Result.v;
6504 }
6505
6506 #[cfg(not(_XM_NO_INTRINSICS_))]
6507 unsafe {
6508 const ATan2Constants: XMVECTORF32 = XMVECTORF32 { f: [ XM_PI, XM_PIDIV2, XM_PIDIV4, XM_PI * 3.0 / 4.0 ] };
6509
6510 let Zero: XMVECTOR = XMVectorZero();
6511 let mut ATanResultValid: XMVECTOR = XMVectorTrueInt();
6512
6513 let mut Pi: XMVECTOR = XMVectorSplatX(ATan2Constants.v);
6514 let mut PiOverTwo: XMVECTOR = XMVectorSplatY(ATan2Constants.v);
6515 let mut PiOverFour: XMVECTOR = XMVectorSplatZ(ATan2Constants.v);
6516 let mut ThreePiOverFour: XMVECTOR = XMVectorSplatW(ATan2Constants.v);
6517
6518 let YEqualsZero: XMVECTOR = XMVectorEqual(Y, Zero);
6519 let XEqualsZero: XMVECTOR = XMVectorEqual(X, Zero);
6520 let mut XIsPositive: XMVECTOR = XMVectorAndInt(X, g_XMNegativeZero.v);
6521 XIsPositive = XMVectorEqualInt(XIsPositive, Zero);
6522 let YEqualsInfinity: XMVECTOR = XMVectorIsInfinite(Y);
6523 let XEqualsInfinity: XMVECTOR = XMVectorIsInfinite(X);
6524
6525 let YSign: XMVECTOR = XMVectorAndInt(Y, g_XMNegativeZero.v);
6526 Pi = XMVectorOrInt(Pi, YSign);
6527 PiOverTwo = XMVectorOrInt(PiOverTwo, YSign);
6528 PiOverFour = XMVectorOrInt(PiOverFour, YSign);
6529 ThreePiOverFour = XMVectorOrInt(ThreePiOverFour, YSign);
6530
6531 let mut R1: XMVECTOR = XMVectorSelect(Pi, YSign, XIsPositive);
6532 let mut R2: XMVECTOR = XMVectorSelect(ATanResultValid, PiOverTwo, XEqualsZero);
6533 let R3: XMVECTOR = XMVectorSelect(R2, R1, YEqualsZero);
6534 let R4: XMVECTOR = XMVectorSelect(ThreePiOverFour, PiOverFour, XIsPositive);
6535 let R5: XMVECTOR = XMVectorSelect(PiOverTwo, R4, XEqualsInfinity);
6536 let Result: XMVECTOR = XMVectorSelect(R3, R5, YEqualsInfinity);
6537 ATanResultValid = XMVectorEqualInt(Result, ATanResultValid);
6538
6539 let Reciprocal: XMVECTOR = XMVectorReciprocalEst(X);
6540 let V: XMVECTOR = XMVectorMultiply(Y, Reciprocal);
6541 let R0: XMVECTOR = XMVectorATanEst(V);
6542
6543 R1 = XMVectorSelect(Pi, g_XMNegativeZero.v, XIsPositive);
6544 R2 = XMVectorAdd(R0, R1);
6545
6546 return XMVectorSelect(Result, R2, ATanResultValid);
6547 }
6548}
6549
6550
6551#[inline]
6596pub fn XMVectorLerp(
6597 V0: FXMVECTOR,
6598 V1: FXMVECTOR,
6599 t: f32,
6600) -> XMVECTOR
6601{
6602 #[cfg(_XM_NO_INTRINSICS_)]
6605 {
6606 let Scale: XMVECTOR = XMVectorReplicate(t);
6607 let Length: XMVECTOR = XMVectorSubtract(V1, V0);
6608 return XMVectorMultiplyAdd(Length, Scale, V0);
6609 }
6610
6611 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6612 {
6613 unimplemented!()
6614 }
6615
6616 #[cfg(_XM_SSE_INTRINSICS_)]
6617 unsafe {
6618 let L: XMVECTOR = _mm_sub_ps(V1, V0);
6619 let S: XMVECTOR = _mm_set_ps1(t);
6620 return XM_FMADD_PS!(L, S, V0);
6621 }
6622}
6623
6624#[inline]
6657pub fn XMVectorLerpV(
6658 V0: FXMVECTOR,
6659 V1: FXMVECTOR,
6660 T: FXMVECTOR,
6661) -> XMVECTOR
6662{
6663 #[cfg(_XM_NO_INTRINSICS_)]
6666 {
6667 let Length: XMVECTOR = XMVectorSubtract(V1, V0);
6668 return XMVectorMultiplyAdd(Length, T, V0);
6669 }
6670
6671 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6672 {
6673 unimplemented!()
6674 }
6675
6676 #[cfg(_XM_SSE_INTRINSICS_)]
6677 unsafe {
6678 let Length: XMVECTOR = _mm_sub_ps(V1, V0);
6679 return XM_FMADD_PS!(Length, T, V0);
6680 }
6681}
6682
6683#[inline]
6733pub fn XMVectorHermite(
6734 Position0: FXMVECTOR,
6735 Tangent0: FXMVECTOR,
6736 Position1: FXMVECTOR,
6737 Tangent1: GXMVECTOR,
6738 t: f32,
6739) -> XMVECTOR
6740{
6741 #[cfg(_XM_NO_INTRINSICS_)]
6747 {
6748 let t2: f32 = t * t;
6749 let t3: f32 = t * t2;
6750
6751 let P0: XMVECTOR = XMVectorReplicate(2.0 * t3 - 3.0 * t2 + 1.0);
6752 let T0: XMVECTOR = XMVectorReplicate(t3 - 2.0 * t2 + t);
6753 let P1: XMVECTOR = XMVectorReplicate(-2.0 * t3 + 3.0 * t2);
6754 let T1: XMVECTOR = XMVectorReplicate(t3 - t2);
6755
6756 let mut Result: XMVECTOR = XMVectorMultiply(P0, Position0);
6757 Result = XMVectorMultiplyAdd(T0, Tangent0, Result);
6758 Result = XMVectorMultiplyAdd(P1, Position1, Result);
6759 Result = XMVectorMultiplyAdd(T1, Tangent1, Result);
6760
6761 return Result;
6762 }
6763
6764 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6765 {
6766 unimplemented!()
6767 }
6768
6769 #[cfg(_XM_SSE_INTRINSICS_)]
6770 unsafe {
6771 let t2: f32 = t * t;
6772 let t3: f32 = t * t2;
6773
6774 let P0: XMVECTOR = _mm_set_ps1(2.0 * t3 - 3.0 * t2 + 1.0);
6775 let T0: XMVECTOR = _mm_set_ps1(t3 - 2.0 * t2 + t);
6776 let P1: XMVECTOR = _mm_set_ps1(-2.0 * t3 + 3.0 * t2);
6777 let T1: XMVECTOR = _mm_set_ps1(t3 - t2);
6778
6779 let mut vResult: XMVECTOR = _mm_mul_ps(P0, Position0);
6780 vResult = XM_FMADD_PS!(Tangent0, T0, vResult);
6781 vResult = XM_FMADD_PS!(Position1, P1, vResult);
6782 vResult = XM_FMADD_PS!(Tangent1, T1, vResult);
6783 return vResult;
6784 }
6785}
6786
6787#[inline]
6827pub fn XMVectorHermiteV(
6828 Position0: FXMVECTOR,
6829 Tangent0: FXMVECTOR,
6830 Position1: FXMVECTOR,
6831 Tangent1: GXMVECTOR,
6832 T: HXMVECTOR,
6833) -> XMVECTOR
6834{
6835 #[cfg(_XM_NO_INTRINSICS_)]
6841 unsafe {
6842 let T2: XMVECTOR = XMVectorMultiply(T, T);
6843 let T3: XMVECTOR = XMVectorMultiply(T, T2);
6844
6845 let P0: XMVECTOR = XMVectorReplicate(2.0 * T3.vector4_f32[0] - 3.0 * T2.vector4_f32[0] + 1.0);
6846 let T0: XMVECTOR = XMVectorReplicate(T3.vector4_f32[1] - 2.0 * T2.vector4_f32[1] + T.vector4_f32[1]);
6847 let P1: XMVECTOR = XMVectorReplicate(-2.0 * T3.vector4_f32[2] + 3.0 * T2.vector4_f32[2]);
6848 let T1: XMVECTOR = XMVectorReplicate(T3.vector4_f32[3] - T2.vector4_f32[3]);
6849
6850 let mut Result: XMVECTOR = XMVectorMultiply(P0, Position0);
6851 Result = XMVectorMultiplyAdd(T0, Tangent0, Result);
6852 Result = XMVectorMultiplyAdd(P1, Position1, Result);
6853 Result = XMVectorMultiplyAdd(T1, Tangent1, Result);
6854
6855 return Result;
6856 }
6857
6858 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6859 {
6860 unimplemented!()
6861 }
6862
6863 #[cfg(_XM_SSE_INTRINSICS_)]
6864 unsafe {
6865 const CatMulT2: XMVECTORF32 = XMVECTORF32 { f: [ -3.0, -2.0, 3.0, -1.0 ] };
6866 const CatMulT3: XMVECTORF32 = XMVECTORF32 { f: [ 2.0, 1.0, -2.0, 1.0 ] };
6867
6868 let mut T2: XMVECTOR = _mm_mul_ps(T, T);
6869 let mut T3: XMVECTOR = _mm_mul_ps(T, T2);
6870 T2 = _mm_mul_ps(T2, CatMulT2.v);
6872 T3 = XM_FMADD_PS!(T3, CatMulT3.v, T2);
6874 T2 = _mm_and_ps(T, g_XMMaskY.v);
6877 T3 = _mm_add_ps(T3, T2);
6878 T3 = _mm_add_ps(T3, g_XMIdentityR0.v);
6880 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(T3, _MM_SHUFFLE(0, 0, 0, 0));
6883 vResult = _mm_mul_ps(vResult, Position0);
6884 T2 = XM_PERMUTE_PS!(T3, _MM_SHUFFLE(1, 1, 1, 1));
6886 vResult = XM_FMADD_PS!(T2, Tangent0, vResult);
6887 T2 = XM_PERMUTE_PS!(T3, _MM_SHUFFLE(2, 2, 2, 2));
6889 vResult = XM_FMADD_PS!(T2, Position1, vResult);
6890 T3 = XM_PERMUTE_PS!(T3, _MM_SHUFFLE(3, 3, 3, 3));
6892 vResult = XM_FMADD_PS!(T3, Tangent1, vResult);
6893 return vResult;
6894 }
6895}
6896
6897#[inline]
6940pub fn XMVectorCatmullRom(
6941 Position0: FXMVECTOR,
6942 Position1: FXMVECTOR,
6943 Position2: FXMVECTOR,
6944 Position3: GXMVECTOR,
6945 t: f32,
6946) -> XMVECTOR
6947{
6948 #[cfg(_XM_NO_INTRINSICS_)]
6954 {
6955 let t2: f32 = t * t;
6956 let t3: f32 = t * t2;
6957
6958 let P0: XMVECTOR = XMVectorReplicate((-t3 + 2.0 * t2 - t) * 0.5);
6959 let P1: XMVECTOR = XMVectorReplicate((3.0 * t3 - 5.0 * t2 + 2.0) * 0.5);
6960 let P2: XMVECTOR = XMVectorReplicate((-3.0 * t3 + 4.0 * t2 + t) * 0.5);
6961 let P3: XMVECTOR = XMVectorReplicate((t3 - t2) * 0.5);
6962
6963 let mut Result: XMVECTOR = XMVectorMultiply(P0, Position0);
6964 Result = XMVectorMultiplyAdd(P1, Position1, Result);
6965 Result = XMVectorMultiplyAdd(P2, Position2, Result);
6966 Result = XMVectorMultiplyAdd(P3, Position3, Result);
6967
6968 return Result;
6969 }
6970
6971 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
6972 {
6973 unimplemented!()
6974 }
6975
6976 #[cfg(_XM_SSE_INTRINSICS_)]
6977 unsafe {
6978 let t2: f32 = t * t;
6979 let t3: f32 = t * t2;
6980
6981 let mut P0: XMVECTOR = _mm_set_ps1((-t3 + 2.0 * t2 - t) * 0.5);
6982 let mut P1: XMVECTOR = _mm_set_ps1((3.0 * t3 - 5.0 * t2 + 2.0) * 0.5);
6983 let mut P2: XMVECTOR = _mm_set_ps1((-3.0 * t3 + 4.0 * t2 + t) * 0.5);
6984 let mut P3: XMVECTOR = _mm_set_ps1((t3 - t2) * 0.5);
6985
6986 P1 = _mm_mul_ps(Position1, P1);
6987 P0 = XM_FMADD_PS!(Position0, P0, P1);
6988 P3 = _mm_mul_ps(Position3, P3);
6989 P2 = XM_FMADD_PS!(Position2, P2, P3);
6990 P0 = _mm_add_ps(P0, P2);
6991 return P0;
6992 }
6993}
6994
6995#[inline]
7026pub fn XMVectorCatmullRomV(
7027 Position0: FXMVECTOR,
7028 Position1: FXMVECTOR,
7029 Position2: FXMVECTOR,
7030 Position3: GXMVECTOR,
7031 T: HXMVECTOR,
7032) -> XMVECTOR
7033{
7034 #[cfg(_XM_NO_INTRINSICS_)]
7040 unsafe {
7041 let fx: f32 = T.vector4_f32[0];
7042 let fy: f32 = T.vector4_f32[1];
7043 let fz: f32 = T.vector4_f32[2];
7044 let fw: f32 = T.vector4_f32[3];
7045 let vResult = XMVECTORF32 { f: [
7046 0.5 * ((-fx * fx * fx + 2.0 * fx * fx - fx) * Position0.vector4_f32[0]
7047 + (3.0 * fx * fx * fx - 5.0 * fx * fx + 2.0) * Position1.vector4_f32[0]
7048 + (-3.0 * fx * fx * fx + 4.0 * fx * fx + fx) * Position2.vector4_f32[0]
7049 + (fx * fx * fx - fx * fx) * Position3.vector4_f32[0]),
7050
7051 0.5 * ((-fy * fy * fy + 2.0 * fy * fy - fy) * Position0.vector4_f32[1]
7052 + (3.0 * fy * fy * fy - 5.0 * fy * fy + 2.0) * Position1.vector4_f32[1]
7053 + (-3.0 * fy * fy * fy + 4.0 * fy * fy + fy) * Position2.vector4_f32[1]
7054 + (fy * fy * fy - fy * fy) * Position3.vector4_f32[1]),
7055
7056 0.5 * ((-fz * fz * fz + 2.0 * fz * fz - fz) * Position0.vector4_f32[2]
7057 + (3.0 * fz * fz * fz - 5.0 * fz * fz + 2.0) * Position1.vector4_f32[2]
7058 + (-3.0 * fz * fz * fz + 4.0 * fz * fz + fz) * Position2.vector4_f32[2]
7059 + (fz * fz * fz - fz * fz) * Position3.vector4_f32[2]),
7060
7061 0.5 * ((-fw * fw * fw + 2.0 * fw * fw - fw) * Position0.vector4_f32[3]
7062 + (3.0 * fw * fw * fw - 5.0 * fw * fw + 2.0) * Position1.vector4_f32[3]
7063 + (-3.0 * fw * fw * fw + 4.0 * fw * fw + fw) * Position2.vector4_f32[3]
7064 + (fw * fw * fw - fw * fw) * Position3.vector4_f32[3])
7065 ] };
7066 return vResult.v;
7067 }
7068
7069 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7070 {
7071 unimplemented!()
7072 }
7073
7074 #[cfg(_XM_SSE_INTRINSICS_)]
7075 unsafe {
7076 const Catmul2: XMVECTORF32 = XMVECTORF32 { f: [ 2.0, 2.0, 2.0, 2.0 ] };
7077 const Catmul3: XMVECTORF32 = XMVECTORF32 { f: [ 3.0, 3.0, 3.0, 3.0 ] };
7078 const Catmul4: XMVECTORF32 = XMVECTORF32 { f :[ 4.0, 4.0, 4.0, 4.0 ] };
7079 const Catmul5: XMVECTORF32 = XMVECTORF32 { f: [5.0, 5.0, 5.0, 5.0 ] };
7080 let T2: XMVECTOR = _mm_mul_ps(T, T);
7082 let mut T3: XMVECTOR = _mm_mul_ps(T, T2);
7083 let mut vResult: XMVECTOR = _mm_add_ps(T2, T2);
7085 vResult = _mm_sub_ps(vResult, T);
7086 vResult = _mm_sub_ps(vResult, T3);
7087 vResult = _mm_mul_ps(vResult, Position0);
7088 let mut vTemp: XMVECTOR = _mm_mul_ps(T3, Catmul3.v);
7090 vTemp = XM_FNMADD_PS!(T2, Catmul5.v, vTemp);
7091 vTemp = _mm_add_ps(vTemp, Catmul2.v);
7092 vResult = XM_FMADD_PS!(vTemp, Position1, vResult);
7093 vTemp = _mm_mul_ps(T2, Catmul4.v);
7095 vTemp = XM_FNMADD_PS!(T3, Catmul3.v, vTemp);
7096 vTemp = _mm_add_ps(vTemp, T);
7097 vResult = XM_FMADD_PS!(vTemp, Position2, vResult);
7098 T3 = _mm_sub_ps(T3, T2);
7100 vResult = XM_FMADD_PS!(T3, Position3, vResult);
7101 vResult = _mm_mul_ps(vResult, g_XMOneHalf.v);
7103 return vResult;
7104 }
7105}
7106
7107#[inline]
7154pub fn XMVectorBaryCentric(
7155 Position0: FXMVECTOR,
7156 Position1: FXMVECTOR,
7157 Position2: FXMVECTOR,
7158 f: f32,
7159 g: f32,
7160) -> XMVECTOR
7161{
7162 #[cfg(_XM_NO_INTRINSICS_)]
7165 {
7166 let P10: XMVECTOR = XMVectorSubtract(Position1, Position0);
7167 let ScaleF: XMVECTOR = XMVectorReplicate(f);
7168
7169 let P20: XMVECTOR = XMVectorSubtract(Position2, Position0);
7170 let ScaleG: XMVECTOR = XMVectorReplicate(g);
7171
7172 let mut Result: XMVECTOR = XMVectorMultiplyAdd(P10, ScaleF, Position0);
7173 Result = XMVectorMultiplyAdd(P20, ScaleG, Result);
7174
7175 return Result;
7176 }
7177
7178 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7179 {
7180 unimplemented!()
7181 }
7182
7183 #[cfg(_XM_SSE_INTRINSICS_)]
7184 unsafe {
7185 let mut R1: XMVECTOR = _mm_sub_ps(Position1, Position0);
7186 let R2: XMVECTOR = _mm_sub_ps(Position2, Position0);
7187 let SF: XMVECTOR = _mm_set_ps1(f);
7188 R1 = XM_FMADD_PS!(R1, SF, Position0);
7189 let SG: XMVECTOR = _mm_set_ps1(g);
7190 return XM_FMADD_PS!(R2, SG, R1);
7191 }
7192}
7193
7194#[inline]
7225pub fn XMVectorBaryCentricV(
7226 Position0: FXMVECTOR,
7227 Position1: FXMVECTOR,
7228 Position2: FXMVECTOR,
7229 F: GXMVECTOR,
7230 G: HXMVECTOR,
7231) -> XMVECTOR
7232{
7233 #[cfg(_XM_NO_INTRINSICS_)]
7236 {
7237 let P10: XMVECTOR = XMVectorSubtract(Position1, Position0);
7238 let P20: XMVECTOR = XMVectorSubtract(Position2, Position0);
7239
7240 let mut Result: XMVECTOR = XMVectorMultiplyAdd(P10, F, Position0);
7241 Result = XMVectorMultiplyAdd(P20, G, Result);
7242
7243 return Result;
7244 }
7245
7246 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7247 {
7248 unimplemented!()
7249 }
7250
7251 #[cfg(_XM_SSE_INTRINSICS_)]
7252 unsafe {
7253 let mut R1: XMVECTOR = _mm_sub_ps(Position1, Position0);
7254 let R2: XMVECTOR = _mm_sub_ps(Position2, Position0);
7255 R1 = XM_FMADD_PS!(R1, F, Position0);
7256 return XM_FMADD_PS!(R2, G, R1);
7257 }
7258}
7259
7260#[inline]
7266pub fn XMVector2Equal(
7267 V1: FXMVECTOR,
7268 V2: FXMVECTOR,
7269) -> bool
7270{
7271 #[cfg(_XM_NO_INTRINSICS_)]
7272 unsafe {
7273 return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1])) != false);
7274 }
7275
7276 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7277 {
7278 unimplemented!()
7279 }
7280
7281 #[cfg(_XM_SSE_INTRINSICS_)]
7282 unsafe {
7283 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
7284 return (((_mm_movemask_ps(vTemp) & 3) == 3) != false);
7286 }
7287}
7288
7289#[inline]
7293pub fn XMVector2EqualR(
7294 V1: FXMVECTOR,
7295 V2: FXMVECTOR,
7296) -> u32
7297{
7298 #[cfg(_XM_NO_INTRINSICS_)]
7299 unsafe {
7300 let mut CR: u32 = 0;
7301 if ((V1.vector4_f32[0] == V2.vector4_f32[0]) &&
7302 (V1.vector4_f32[1] == V2.vector4_f32[1]))
7303 {
7304 CR = XM_CRMASK_CR6TRUE;
7305 }
7306 else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) &&
7307 (V1.vector4_f32[1] != V2.vector4_f32[1]))
7308 {
7309 CR = XM_CRMASK_CR6FALSE;
7310 }
7311 return CR;
7312 }
7313
7314 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7315 {
7316 unimplemented!()
7317 }
7318
7319 #[cfg(all(_XM_SSE_INTRINSICS_))]
7320 unsafe {
7321 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
7322 let iTest: i32 = _mm_movemask_ps(vTemp) & 3;
7324 let mut CR = 0;
7325 if (iTest == 3)
7326 {
7327 CR = XM_CRMASK_CR6TRUE;
7328 }
7329 else if (!ibool(iTest))
7330 {
7331 CR = XM_CRMASK_CR6FALSE;
7332 }
7333 return CR;
7334 }
7335}
7336
7337#[inline]
7341pub fn XMVector2EqualInt(
7342 V1: FXMVECTOR,
7343 V2: FXMVECTOR,
7344) -> bool
7345{
7346 #[cfg(_XM_NO_INTRINSICS_)]
7347 unsafe {
7348 return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1])) != false);
7349 }
7350
7351 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7352 {
7353 unimplemented!()
7354 }
7355
7356 #[cfg(_XM_SSE_INTRINSICS_)]
7357 unsafe {
7358 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
7359 return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3) == 3) != false);
7360 }
7361}
7362
7363#[inline]
7367pub fn XMVector2EqualIntR(
7368 V1: FXMVECTOR,
7369 V2: FXMVECTOR,
7370) -> u32
7371{
7372 #[cfg(_XM_NO_INTRINSICS_)]
7373 unsafe {
7374 let mut CR: u32 = 0;
7375 if ((V1.vector4_u32[0] == V2.vector4_u32[0]) &&
7376 (V1.vector4_u32[1] == V2.vector4_u32[1]))
7377 {
7378 CR = XM_CRMASK_CR6TRUE;
7379 }
7380 else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) &&
7381 (V1.vector4_u32[1] != V2.vector4_u32[1]))
7382 {
7383 CR = XM_CRMASK_CR6FALSE;
7384 }
7385 return CR;
7386 }
7387
7388 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7389 {
7390 unimplemented!()
7391 }
7392
7393 #[cfg(all(_XM_SSE_INTRINSICS_))]
7394 unsafe {
7395 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
7396 let iTest: i32 = _mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3;
7397 let mut CR: u32 = 0;
7398 if (iTest == 3)
7399 {
7400 CR = XM_CRMASK_CR6TRUE;
7401 }
7402 else if (!ibool(iTest))
7403 {
7404 CR = XM_CRMASK_CR6FALSE;
7405 }
7406 return CR;
7407 }
7408}
7409
7410#[inline]
7414pub fn XMVector2NearEqual(
7415 V1: FXMVECTOR,
7416 V2: FXMVECTOR,
7417 Epsilon: FXMVECTOR,
7418) -> bool
7419{
7420 #[cfg(_XM_NO_INTRINSICS_)]
7421 unsafe {
7422 let dx: f32 = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]);
7423 let dy: f32 = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]);
7424 return ((dx <= Epsilon.vector4_f32[0]) &&
7425 (dy <= Epsilon.vector4_f32[1]));
7426 }
7427
7428 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7429 {
7430 unimplemented!()
7431 }
7432
7433 #[cfg(all(_XM_SSE_INTRINSICS_))]
7434 unsafe {
7435 let vDelta: XMVECTOR = _mm_sub_ps(V1, V2);
7437 let mut vTemp: XMVECTOR = _mm_setzero_ps();
7439 vTemp = _mm_sub_ps(vTemp, vDelta);
7440 vTemp = _mm_max_ps(vTemp, vDelta);
7441 vTemp = _mm_cmple_ps(vTemp, Epsilon);
7442 return (((_mm_movemask_ps(vTemp) & 3) == 0x3) != false);
7444 }
7445}
7446
7447
7448#[inline]
7452pub fn XMVector2NotEqual(
7453 V1: FXMVECTOR,
7454 V2: FXMVECTOR,
7455) -> bool
7456{
7457 #[cfg(_XM_NO_INTRINSICS_)]
7458 unsafe {
7459 return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1])) != false);
7460 }
7461
7462 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7463 {
7464 unimplemented!()
7465 }
7466
7467 #[cfg(all(_XM_SSE_INTRINSICS_))]
7468 unsafe {
7469 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
7470 return (((_mm_movemask_ps(vTemp) & 3) != 3) != false);
7472 }
7473}
7474
7475#[inline]
7479pub fn XMVector2NotEqualInt(
7480 V1: FXMVECTOR,
7481 V2: FXMVECTOR,
7482) -> bool
7483{
7484 #[cfg(_XM_NO_INTRINSICS_)]
7485 unsafe {
7486 return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1])) != false);
7487 }
7488
7489 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7490 {
7491 unimplemented!()
7492 }
7493
7494 #[cfg(all(_XM_SSE_INTRINSICS_))]
7495 unsafe {
7496 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
7497 return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 3) != 3) != false);
7498 }
7499}
7500
7501#[inline]
7505pub fn XMVector2Greater(
7506 V1: FXMVECTOR,
7507 V2: FXMVECTOR,
7508) -> bool
7509{
7510 #[cfg(_XM_NO_INTRINSICS_)]
7511 unsafe {
7512 return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1])) != false);
7513 }
7514
7515 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7516 {
7517 unimplemented!()
7518 }
7519
7520 #[cfg(all(_XM_SSE_INTRINSICS_))]
7521 unsafe {
7522 let vTemp: XMVECTOR = _mm_cmpgt_ps(V1, V2);
7523 return (((_mm_movemask_ps(vTemp) & 3) == 3) != false);
7525 }
7526}
7527
7528#[inline]
7532pub fn XMVector2GreaterR(
7533 V1: FXMVECTOR,
7534 V2: FXMVECTOR,
7535) -> u32
7536{
7537 #[cfg(_XM_NO_INTRINSICS_)]
7538 unsafe {
7539 let mut CR = 0;
7540 if ((V1.vector4_f32[0] > V2.vector4_f32[0]) &&
7541 (V1.vector4_f32[1] > V2.vector4_f32[1]))
7542 {
7543 CR = XM_CRMASK_CR6TRUE;
7544 }
7545 else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) &&
7546 (V1.vector4_f32[1] <= V2.vector4_f32[1]))
7547 {
7548 CR = XM_CRMASK_CR6FALSE;
7549 }
7550 return CR;
7551 }
7552
7553 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7554 {
7555 unimplemented!()
7556 }
7557
7558 #[cfg(_XM_SSE_INTRINSICS_)]
7559 unsafe {
7560 let vTemp: XMVECTOR = _mm_cmpgt_ps(V1, V2);
7561 let iTest: i32 = _mm_movemask_ps(vTemp) & 3;
7562 let mut CR = 0;
7563 if (iTest == 3)
7564 {
7565 CR = XM_CRMASK_CR6TRUE;
7566 }
7567 else if (!ibool(iTest))
7568 {
7569 CR = XM_CRMASK_CR6FALSE;
7570 }
7571 return CR;
7572 }
7573}
7574
7575#[inline]
7579pub fn XMVector2GreaterOrEqual(
7580 V1: FXMVECTOR,
7581 V2: FXMVECTOR,
7582) -> bool
7583{
7584 #[cfg(_XM_NO_INTRINSICS_)]
7585 unsafe {
7586 return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1])) != false);
7587 }
7588
7589 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7590 {
7591 unimplemented!()
7592 }
7593
7594 #[cfg(all(_XM_SSE_INTRINSICS_))]
7595 unsafe {
7596 let vTemp: XMVECTOR = _mm_cmpge_ps(V1, V2);
7597 return (((_mm_movemask_ps(vTemp) & 3) == 3) != false);
7598 }
7599}
7600
7601#[inline]
7605pub fn XMVector2GreaterOrEqualR(
7606 V1: FXMVECTOR,
7607 V2: FXMVECTOR,
7608) -> u32
7609{
7610 #[cfg(_XM_NO_INTRINSICS_)]
7611 unsafe {
7612 let mut CR = 0;
7613 if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) &&
7614 (V1.vector4_f32[1] >= V2.vector4_f32[1]))
7615 {
7616 CR = XM_CRMASK_CR6TRUE;
7617 }
7618 else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) &&
7619 (V1.vector4_f32[1] < V2.vector4_f32[1]))
7620 {
7621 CR = XM_CRMASK_CR6FALSE;
7622 }
7623 return CR;
7624 }
7625
7626 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7627 {
7628 unimplemented!()
7629 }
7630
7631 #[cfg(_XM_SSE_INTRINSICS_)]
7632 unsafe {
7633 let vTemp: XMVECTOR = _mm_cmpge_ps(V1, V2);
7634 let iTest: i32 = _mm_movemask_ps(vTemp) & 3;
7635 let mut CR: u32 = 0;
7636 if (iTest == 3)
7637 {
7638 CR = XM_CRMASK_CR6TRUE;
7639 }
7640 else if (!ibool(iTest))
7641 {
7642 CR = XM_CRMASK_CR6FALSE;
7643 }
7644 return CR;
7645 }
7646}
7647
7648#[inline]
7652pub fn XMVector2Less(
7653 V1: FXMVECTOR,
7654 V2: FXMVECTOR,
7655) -> bool
7656{
7657 #[cfg(_XM_NO_INTRINSICS_)]
7658 unsafe {
7659 return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1])) != false);
7660 }
7661
7662 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7663 {
7664 unimplemented!()
7665 }
7666
7667 #[cfg(all(_XM_SSE_INTRINSICS_))]
7668 unsafe {
7669 let vTemp: XMVECTOR = _mm_cmplt_ps(V1, V2);
7670 return (((_mm_movemask_ps(vTemp) & 3) == 3) != false);
7671 }
7672}
7673
7674#[inline]
7678pub fn XMVector2LessOrEqual(
7679 V1: FXMVECTOR,
7680 V2: FXMVECTOR,
7681) -> bool
7682{
7683 #[cfg(_XM_NO_INTRINSICS_)]
7684 unsafe {
7685 return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1])) != false);
7686 }
7687
7688 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7689 {
7690 unimplemented!()
7691 }
7692
7693 #[cfg(all(_XM_SSE_INTRINSICS_))]
7694 unsafe {
7695 let vTemp: XMVECTOR = _mm_cmple_ps(V1, V2);
7696 return (((_mm_movemask_ps(vTemp) & 3) == 3) != false);
7697 }
7698}
7699
7700#[inline]
7704pub fn XMVector2InBounds(
7705 V: FXMVECTOR,
7706 Bounds: FXMVECTOR,
7707) -> bool
7708{
7709 #[cfg(_XM_NO_INTRINSICS_)]
7710 unsafe {
7711 return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) &&
7712 (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1])) != false);
7713 }
7714
7715 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7716 {
7717 unimplemented!()
7718 }
7719
7720 #[cfg(all(_XM_SSE_INTRINSICS_))]
7721 unsafe {
7722 let mut vTemp1: XMVECTOR = _mm_cmple_ps(V, Bounds);
7724 let mut vTemp2: XMVECTOR = _mm_mul_ps(Bounds, g_XMNegativeOne.v);
7726 vTemp2 = _mm_cmple_ps(vTemp2, V);
7728 vTemp1 = _mm_and_ps(vTemp1, vTemp2);
7730 return (((_mm_movemask_ps(vTemp1) & 0x3) == 0x3) != false);
7732 }
7733}
7734
7735#[inline]
7739pub fn XMVector2IsNaN(
7740 V: FXMVECTOR,
7741) -> bool
7742{
7743 #[cfg(_XM_NO_INTRINSICS_)]
7744 unsafe {
7745 return (XMISNAN!(V.vector4_f32[0]) ||
7746 XMISNAN!(V.vector4_f32[1]));
7747 }
7748
7749 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7750 {
7751 unimplemented!()
7752 }
7753
7754 #[cfg(all(_XM_SSE_INTRINSICS_))]
7755 unsafe {
7756 let vTempNan: XMVECTOR = _mm_cmpneq_ps(V, V);
7758 return ((_mm_movemask_ps(vTempNan) & 3) != 0);
7760 }
7761}
7762
7763#[inline]
7767pub fn XMVector2IsInfinite(
7768 V: FXMVECTOR,
7769) -> bool
7770{
7771 #[cfg(_XM_NO_INTRINSICS_)]
7772 unsafe {
7773 return (XMISINF!(V.vector4_f32[0]) ||
7774 XMISINF!(V.vector4_f32[1]));
7775 }
7776
7777 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7778 {
7779 unimplemented!()
7780 }
7781
7782 #[cfg(all(_XM_SSE_INTRINSICS_))]
7783 unsafe {
7784 let mut vTemp: __m128 = _mm_and_ps(V, g_XMAbsMask.v);
7786 vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity.v);
7788 return ((_mm_movemask_ps(vTemp) & 3) != 0);
7790 }
7791}
7792
7793#[inline]
7797pub fn XMVector2Dot(
7798 V1: FXMVECTOR,
7799 V2: FXMVECTOR,
7800) -> XMVECTOR
7801{
7802 #[cfg(_XM_NO_INTRINSICS_)]
7803 unsafe {
7804 let fDot: f32 = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1];
7805 let mut Result: XMVECTORF32 = crate::undefined();
7806 Result.f[0] = fDot;
7807 Result.f[1] = fDot;
7808 Result.f[2] = fDot;
7809 Result.f[3] = fDot;
7810 return Result.v;
7811 }
7812
7813 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7814 {
7815 unimplemented!()
7816 }
7817
7818 #[cfg(all(_XM_SSE_INTRINSICS_))]
7819 unsafe {
7820 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V1, V2);
7822 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
7824 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
7826 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
7827 return vLengthSq;
7828 }
7829}
7830
7831#[inline]
7866pub fn XMVector2Cross(
7867 V1: FXMVECTOR,
7868 V2: FXMVECTOR,
7869) -> XMVECTOR
7870{
7871 #[cfg(_XM_NO_INTRINSICS_)]
7872 unsafe {
7873 let fCross: f32 = (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]);
7874 let mut Result: XMVECTORF32 = crate::undefined();
7875 Result.f[0] = fCross;
7876 Result.f[1] = fCross;
7877 Result.f[2] = fCross;
7878 Result.f[3] = fCross;
7879 return Result.v;
7880 }
7881
7882 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7883 {
7884 unimplemented!()
7885 }
7886
7887 #[cfg(all(_XM_SSE_INTRINSICS_))]
7888 unsafe {
7889 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V2, _MM_SHUFFLE(0, 1, 0, 1));
7891 vResult = _mm_mul_ps(vResult, V1);
7893 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(1, 1, 1, 1));
7895 vResult = _mm_sub_ss(vResult, vTemp);
7897 vResult = XM_PERMUTE_PS!(vResult, _MM_SHUFFLE(0, 0, 0, 0));
7899 return vResult;
7900 }
7901}
7902
7903#[inline]
7907pub fn XMVector2LengthSq(
7908 V: FXMVECTOR,
7909) -> XMVECTOR
7910{
7911 return XMVector2Dot(V, V);
7912}
7913
7914#[inline]
7918pub fn XMVector2ReciprocalLengthEst(
7919 V: FXMVECTOR,
7920) -> XMVECTOR
7921{
7922 #[cfg(_XM_NO_INTRINSICS_)]
7923 {
7924 let mut Result: XMVECTOR;
7925 Result = XMVector2LengthSq(V);
7926 Result = XMVectorReciprocalSqrtEst(Result);
7927 return Result;
7928 }
7929
7930 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7931 {
7932 unimplemented!()
7933 }
7934
7935 #[cfg(all(_XM_SSE4_INTRINSICS_))]
7936 unsafe {
7937 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x3f);
7938 return _mm_rsqrt_ps(vTemp);
7939 }
7940
7941 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
7942 unsafe {
7943 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
7944 let vTemp: XMVECTOR = _mm_hadd_ps(vLengthSq, vLengthSq);
7945 vLengthSq = _mm_rsqrt_ss(vTemp);
7946 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
7947 return vLengthSq;
7948 }
7949
7950 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
7951 unsafe {
7952 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
7954 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
7956 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
7958 vLengthSq = _mm_rsqrt_ss(vLengthSq);
7959 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
7960 return vLengthSq;
7961 }
7962}
7963
7964#[inline]
7968pub fn XMVector2ReciprocalLength(
7969 V: FXMVECTOR,
7970) -> XMVECTOR
7971{
7972 #[cfg(_XM_NO_INTRINSICS_)]
7973 {
7974 let mut Result: XMVECTOR;
7975 Result = XMVector2LengthSq(V);
7976 Result = XMVectorReciprocalSqrt(Result);
7977 return Result;
7978 }
7979
7980 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
7981 {
7982 unimplemented!()
7983 }
7984
7985 #[cfg(all(_XM_SSE4_INTRINSICS_))]
7986 unsafe {
7987 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x3f);
7988 let vLengthSq: XMVECTOR = _mm_sqrt_ps(vTemp);
7989 return _mm_div_ps(g_XMOne.v, vLengthSq);
7990 }
7991
7992 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
7993 unsafe {
7994 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
7995 let vTemp: XMVECTOR = _mm_hadd_ps(vLengthSq, vLengthSq);
7996 vLengthSq = _mm_sqrt_ss(vTemp);
7997 vLengthSq = _mm_div_ss(g_XMOne.v, vLengthSq);
7998 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
7999 return vLengthSq;
8000 }
8001
8002 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
8003 unsafe {
8004 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8006 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
8008 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
8010 vLengthSq = _mm_sqrt_ss(vLengthSq);
8011 vLengthSq = _mm_div_ss(g_XMOne.v, vLengthSq);
8012 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8013 return vLengthSq;
8014 }
8015}
8016
8017#[inline]
8021pub fn XMVector2LengthEst(
8022 V: FXMVECTOR,
8023) -> XMVECTOR
8024{
8025 #[cfg(_XM_NO_INTRINSICS_)]
8026 {
8027 let mut Result: XMVECTOR;
8028 Result = XMVector2LengthSq(V);
8029 Result = XMVectorSqrtEst(Result);
8030 return Result;
8031 }
8032
8033 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8034 {
8035 unimplemented!()
8036 }
8037
8038 #[cfg(all(_XM_SSE4_INTRINSICS_))]
8039 unsafe {
8040 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x3f);
8041 return _mm_sqrt_ps(vTemp);
8042 }
8043
8044 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
8045 unsafe {
8046 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8047 let vTemp: XMVECTOR = _mm_hadd_ps(vLengthSq, vLengthSq);
8048 vLengthSq = _mm_sqrt_ss(vTemp);
8049 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8050 return vLengthSq;
8051 }
8052
8053 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
8054 unsafe {
8055 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8057 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
8059 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
8061 vLengthSq = _mm_sqrt_ss(vLengthSq);
8062 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8063 return vLengthSq;
8064 }
8065}
8066
8067#[inline]
8071pub fn XMVector2Length(
8072 V: FXMVECTOR,
8073) -> XMVECTOR
8074{
8075 #[cfg(_XM_NO_INTRINSICS_)]
8076 {
8077 let mut Result: XMVECTOR;
8078 Result = XMVector2LengthSq(V);
8079 Result = XMVectorSqrt(Result);
8080 return Result;
8081 }
8082
8083 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8084 {
8085 unimplemented!()
8086 }
8087
8088 #[cfg(all(_XM_SSE4_INTRINSICS_))]
8089 unsafe {
8090 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x3f);
8091 return _mm_sqrt_ps(vTemp);
8092 }
8093
8094 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
8095 unsafe {
8096 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8097 let vTemp: XMVECTOR = _mm_hadd_ps(vLengthSq, vLengthSq);
8098 vLengthSq = _mm_sqrt_ss(vTemp);
8099 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8100 return vLengthSq;
8101 }
8102
8103 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
8104 unsafe {
8105 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8107 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
8109 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
8111 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8112 vLengthSq = _mm_sqrt_ps(vLengthSq);
8113 return vLengthSq;
8114 }
8115}
8116
8117#[inline]
8121pub fn XMVector2NormalizeEst(
8122 V: FXMVECTOR,
8123) -> XMVECTOR
8124{
8125 #[cfg(_XM_NO_INTRINSICS_)]
8126 {
8127 let mut Result: XMVECTOR;
8128 Result = XMVector2ReciprocalLength(V);
8129 Result = XMVectorMultiply(V, Result);
8130 return Result;
8131 }
8132
8133 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8134 {
8135 unimplemented!()
8136 }
8137
8138 #[cfg(all(_XM_SSE4_INTRINSICS_))]
8139 unsafe {
8140 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x3f);
8141 let vResult: XMVECTOR = _mm_rsqrt_ps(vTemp);
8142 return _mm_mul_ps(vResult, V);
8143 }
8144
8145 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
8146 unsafe {
8147 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8149 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
8151 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
8153 vLengthSq = _mm_rsqrt_ss(vLengthSq);
8154 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8155 vLengthSq = _mm_mul_ps(vLengthSq, V);
8156 return vLengthSq;
8157 }
8158
8159 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
8160 unsafe {
8161 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8163 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
8164 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
8165 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8166 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
8168 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
8170 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
8172 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
8175 vResult = _mm_div_ps(V, vResult);
8177 vResult = _mm_and_ps(vResult, vZeroMask);
8179 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
8181 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
8182 vResult = _mm_or_ps(vTemp1, vTemp2);
8183 return vResult;
8184 }
8185}
8186
8187#[inline]
8191pub fn XMVector2Normalize(
8192 V: FXMVECTOR,
8193) -> XMVECTOR
8194{
8195 #[cfg(_XM_NO_INTRINSICS_)]
8196 unsafe {
8197 let mut vResult: XMVECTOR = XMVector2Length(V);
8198 let mut fLength: f32 = vResult.vector4_f32[0];
8199
8200 if (fLength > 0.0)
8202 {
8203 fLength = 1.0 / fLength;
8204 }
8205
8206 vResult.vector4_f32[0] = V.vector4_f32[0] * fLength;
8207 vResult.vector4_f32[1] = V.vector4_f32[1] * fLength;
8208 vResult.vector4_f32[2] = V.vector4_f32[2] * fLength;
8209 vResult.vector4_f32[3] = V.vector4_f32[3] * fLength;
8210 return vResult;
8211 }
8212
8213 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8214 {
8215 unimplemented!()
8216 }
8217
8218 #[cfg(all(_XM_SSE4_INTRINSICS_))]
8219 unsafe {
8220 let mut vLengthSq: XMVECTOR = _mm_dp_ps(V, V, 0x3f);
8221 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
8223 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
8225 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
8227 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
8230 vResult = _mm_div_ps(V, vResult);
8232 vResult = _mm_and_ps(vResult, vZeroMask);
8234 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
8236 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
8237 vResult = _mm_or_ps(vTemp1, vTemp2);
8238 return vResult;
8239 }
8240
8241 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
8242 unsafe {
8243 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8245 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
8246 vLengthSq = _mm_moveldup_ps(vLengthSq);
8247 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
8249 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
8251 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
8253 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
8256 vResult = _mm_div_ps(V, vResult);
8258 vResult = _mm_and_ps(vResult, vZeroMask);
8260 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
8262 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
8263 vResult = _mm_or_ps(vTemp1, vTemp2);
8264 return vResult;
8265 }
8266
8267 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
8268 unsafe {
8269 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
8271 let vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 1, 1, 1));
8272 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
8273 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
8274 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
8276 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
8278 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
8280 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
8283 vResult = _mm_div_ps(V, vResult);
8285 vResult = _mm_and_ps(vResult, vZeroMask);
8287 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
8289 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
8290 vResult = _mm_or_ps(vTemp1, vTemp2);
8291 return vResult;
8292 }
8293}
8294
8295#[inline]
8299pub fn XMVector2ClampLength(
8300 V: FXMVECTOR,
8301 LengthMin: f32,
8302 LengthMax: f32,
8303) -> XMVECTOR
8304{
8305 let ClampMax: XMVECTOR = XMVectorReplicate(LengthMax);
8306 let ClampMin: XMVECTOR = XMVectorReplicate(LengthMin);
8307 return XMVector2ClampLengthV(V, ClampMin, ClampMax);
8308}
8309
8310#[inline]
8314pub fn XMVector2ClampLengthV(
8315 V: FXMVECTOR,
8316 LengthMin: FXMVECTOR,
8317 LengthMax: FXMVECTOR,
8318) -> XMVECTOR
8319{
8320 unsafe {
8321 debug_assert!((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)));
8322 debug_assert!((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)));
8323 debug_assert!(XMVector2GreaterOrEqual(LengthMin, g_XMZero.v));
8324 debug_assert!(XMVector2GreaterOrEqual(LengthMax, g_XMZero.v));
8325 debug_assert!(XMVector2GreaterOrEqual(LengthMax, LengthMin));
8326
8327 let LengthSq: XMVECTOR = XMVector2LengthSq(V);
8328
8329 let Zero: XMVECTOR = XMVectorZero();
8331
8332 let RcpLength: XMVECTOR = XMVectorReciprocalSqrt(LengthSq);
8333
8334 let InfiniteLength: XMVECTOR = XMVectorEqualInt(LengthSq, g_XMInfinity.v);
8335 let ZeroLength: XMVECTOR = XMVectorEqual(LengthSq, Zero);
8336
8337 let mut Length: XMVECTOR = XMVectorMultiply(LengthSq, RcpLength);
8338
8339 let mut Normal: XMVECTOR = XMVectorMultiply(V, RcpLength);
8340
8341 let Select: XMVECTOR = XMVectorEqualInt(InfiniteLength, ZeroLength);
8342 Length = XMVectorSelect(LengthSq, Length, Select);
8343 Normal = XMVectorSelect(LengthSq, Normal, Select);
8344
8345 let ControlMax: XMVECTOR = XMVectorGreater(Length, LengthMax);
8346 let ControlMin: XMVECTOR = XMVectorLess(Length, LengthMin);
8347
8348 let mut ClampLength: XMVECTOR = XMVectorSelect(Length, LengthMax, ControlMax);
8349 ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin);
8350
8351 let mut Result: XMVECTOR = XMVectorMultiply(Normal, ClampLength);
8352
8353 let Control: XMVECTOR = XMVectorEqualInt(ControlMax, ControlMin);
8355 Result = XMVectorSelect(Result, V, Control);
8356
8357 return Result;
8358 }
8359}
8360
8361#[inline]
8394pub fn XMVector2Reflect(
8395 Incident: FXMVECTOR,
8396 Normal: FXMVECTOR,
8397) -> XMVECTOR
8398{
8399 let mut Result: XMVECTOR;
8402 Result = XMVector2Dot(Incident, Normal);
8403 Result = XMVectorAdd(Result, Result);
8404 Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident);
8405 return Result;
8406}
8407
8408#[inline]
8460pub fn XMVector2Refract(
8461 Incident: FXMVECTOR,
8462 Normal: FXMVECTOR,
8463 RefractionIndex: f32,
8464) -> XMVECTOR
8465{
8466 let Index: XMVECTOR = XMVectorReplicate(RefractionIndex);
8467 return XMVector2RefractV(Incident, Normal, Index);
8468}
8469
8470#[inline]
8495pub fn XMVector2RefractV(
8496 Incident: FXMVECTOR,
8497 Normal: FXMVECTOR,
8498 RefractionIndex: FXMVECTOR,
8499) -> XMVECTOR
8500{
8501 #[cfg(_XM_NO_INTRINSICS_)]
8505 unsafe {
8506 let IDotN: f32 = (Incident.vector4_f32[0] * Normal.vector4_f32[0]) + (Incident.vector4_f32[1] * Normal.vector4_f32[1]);
8507 let mut RY: f32 = 1.0 - (IDotN * IDotN);
8509 let mut RX: f32 = 1.0 - (RY * RefractionIndex.vector4_f32[0] * RefractionIndex.vector4_f32[0]);
8510 RY = 1.0 - (RY * RefractionIndex.vector4_f32[1] * RefractionIndex.vector4_f32[1]);
8511 if (RX >= 0.0)
8512 {
8513 RX = (RefractionIndex.vector4_f32[0] * Incident.vector4_f32[0]) - (Normal.vector4_f32[0] * ((RefractionIndex.vector4_f32[0] * IDotN) + sqrtf(RX)));
8514 }
8515 else
8516 {
8517 RX = 0.0;
8518 }
8519 if (RY >= 0.0)
8520 {
8521 RY = (RefractionIndex.vector4_f32[1] * Incident.vector4_f32[1]) - (Normal.vector4_f32[1] * ((RefractionIndex.vector4_f32[1] * IDotN) + sqrtf(RY)));
8522 }
8523 else
8524 {
8525 RY = 0.0;
8526 }
8527
8528 let mut vResult: XMVECTOR = crate::undefined();
8529 vResult.vector4_f32[0] = RX;
8530 vResult.vector4_f32[1] = RY;
8531 vResult.vector4_f32[2] = 0.0;
8532 vResult.vector4_f32[3] = 0.0;
8533 return vResult;
8534 }
8535
8536 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8537 {
8538 unimplemented!()
8539 }
8540
8541 #[cfg(_XM_SSE_INTRINSICS_)]
8542 unsafe {
8543 let IDotN: XMVECTOR = XMVector2Dot(Incident, Normal);
8547 let mut vTemp: XMVECTOR = XM_FNMADD_PS!(IDotN, IDotN, g_XMOne.v);
8549 vTemp = _mm_mul_ps(vTemp, RefractionIndex);
8550 vTemp = XM_FNMADD_PS!(vTemp, RefractionIndex, g_XMOne.v);
8551 let vMask: XMVECTOR = _mm_cmpgt_ps(vTemp, g_XMZero.v);
8553 vTemp = _mm_sqrt_ps(vTemp);
8555 vTemp = XM_FMADD_PS!(RefractionIndex, IDotN, vTemp);
8556 let mut vResult: XMVECTOR = _mm_mul_ps(RefractionIndex, Incident);
8558 vResult = XM_FNMADD_PS!(vTemp, Normal, vResult);
8559 vResult = _mm_and_ps(vResult, vMask);
8560 return vResult;
8561 }
8562}
8563
8564#[inline]
8596pub fn XMVector2Orthogonal(
8597 V: FXMVECTOR,
8598) -> XMVECTOR
8599{
8600 #[cfg(_XM_NO_INTRINSICS_)]
8601 unsafe {
8602 let Result: XMVECTORF32 = XMVECTORF32 { f: [
8603 -V.vector4_f32[1],
8604 V.vector4_f32[0],
8605 0.0,
8606 0.0
8607 ]};
8608 return Result.v;
8609 }
8610
8611 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8612 {
8613 unimplemented!()
8614 }
8615
8616 #[cfg(all(_XM_SSE_INTRINSICS_))]
8617 unsafe {
8618 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 2, 0, 1));
8619 vResult = _mm_mul_ps(vResult, g_XMNegateX.v);
8620 return vResult;
8621 }
8622}
8623
8624#[inline]
8647pub fn XMVector2AngleBetweenNormalsEst(
8648 N1: FXMVECTOR,
8649 N2: FXMVECTOR,
8650) -> XMVECTOR
8651{
8652 unsafe {
8653 let mut Result: XMVECTOR = XMVector2Dot(N1, N2);
8654 Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
8655 Result = XMVectorACosEst(Result);
8656 return Result;
8657 }
8658}
8659
8660#[inline]
8676pub fn XMVector2AngleBetweenNormals(
8677 N1: FXMVECTOR,
8678 N2: FXMVECTOR,
8679) -> XMVECTOR
8680{
8681 unsafe {
8682 let mut Result: XMVECTOR = XMVector2Dot(N1, N2);
8683 Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
8684 Result = XMVectorACos(Result);
8685 return Result;
8686 }
8687}
8688
8689#[inline]
8709pub fn XMVector2AngleBetweenVectors(
8710 V1: FXMVECTOR,
8711 V2: FXMVECTOR,
8712) -> XMVECTOR
8713{
8714 unsafe {
8715 let mut L1: XMVECTOR = XMVector2ReciprocalLength(V1);
8716 let L2: XMVECTOR = XMVector2ReciprocalLength(V2);
8717
8718 let Dot: XMVECTOR = XMVector2Dot(V1, V2);
8719
8720 L1 = XMVectorMultiply(L1, L2);
8721
8722 let mut CosAngle: XMVECTOR = XMVectorMultiply(Dot, L1);
8723 CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v);
8724
8725 return XMVectorACos(CosAngle);
8726 }
8727}
8728
8729#[inline]
8747pub fn XMVector2LinePointDistance(
8748 LinePoint1: FXMVECTOR,
8749 LinePoint2: FXMVECTOR,
8750 Point: FXMVECTOR,
8751) -> XMVECTOR
8752{
8753 let PointVector: XMVECTOR = XMVectorSubtract(Point, LinePoint1);
8761 let LineVector: XMVECTOR = XMVectorSubtract(LinePoint2, LinePoint1);
8762
8763 let LengthSq: XMVECTOR = XMVector2LengthSq(LineVector);
8764
8765 let mut PointProjectionScale: XMVECTOR = XMVector2Dot(PointVector, LineVector);
8766 PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq);
8767
8768 let mut DistanceVector: XMVECTOR = XMVectorMultiply(LineVector, PointProjectionScale);
8769 DistanceVector = XMVectorSubtract(PointVector, DistanceVector);
8770
8771 return XMVector2Length(DistanceVector);
8772}
8773
8774#[inline]
8795pub fn XMVector2IntersectLine(
8796 Line1Point1: FXMVECTOR,
8797 Line1Point2: FXMVECTOR,
8798 Line2Point1: FXMVECTOR,
8799 Line2Point2: FXMVECTOR,
8800) -> XMVECTOR
8801{
8802 #[cfg(_XM_NO_INTRINSICS_)]
8803 unsafe {
8804 let V1: XMVECTOR = XMVectorSubtract(Line1Point2, Line1Point1);
8805 let V2: XMVECTOR = XMVectorSubtract(Line2Point2, Line2Point1);
8806 let V3: XMVECTOR = XMVectorSubtract(Line1Point1, Line2Point1);
8807
8808 let C1: XMVECTOR = XMVector2Cross(V1, V2);
8809 let C2: XMVECTOR = XMVector2Cross(V2, V3);
8810
8811 let Result: XMVECTOR;
8812 const Zero: XMVECTOR = unsafe { g_XMZero.v };
8814 if (XMVector2NearEqual(C1, Zero, g_XMEpsilon.v))
8815 {
8816 if (XMVector2NearEqual(C2, Zero, g_XMEpsilon.v))
8817 {
8818 Result = g_XMInfinity.v;
8820 }
8821 else
8822 {
8823 Result = g_XMQNaN.v;
8825 }
8826 }
8827 else
8828 {
8829 let mut Scale: XMVECTOR = XMVectorReciprocal(C1);
8831 Scale = XMVectorMultiply(C2, Scale);
8832 Result = XMVectorMultiplyAdd(V1, Scale, Line1Point1);
8833 }
8834
8835 return Result;
8836 }
8837
8838 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8839 {
8840 unimplemented!()
8841 }
8842
8843 #[cfg(all(_XM_SSE_INTRINSICS_))]
8844 unsafe {
8845 let V1: XMVECTOR = _mm_sub_ps(Line1Point2, Line1Point1);
8846 let V2: XMVECTOR = _mm_sub_ps(Line2Point2, Line2Point1);
8847 let V3: XMVECTOR = _mm_sub_ps(Line1Point1, Line2Point1);
8848 let C1: XMVECTOR = XMVector2Cross(V1, V2);
8850 let C2: XMVECTOR = XMVector2Cross(V2, V3);
8851 let mut vResultMask: XMVECTOR = _mm_setzero_ps();
8853 vResultMask = _mm_sub_ps(vResultMask, C1);
8854 vResultMask = _mm_max_ps(vResultMask, C1);
8855 vResultMask = _mm_cmpgt_ps(vResultMask, g_XMEpsilon.v);
8857 let mut vFailMask: XMVECTOR = _mm_setzero_ps();
8859 vFailMask = _mm_sub_ps(vFailMask, C2);
8860 vFailMask = _mm_max_ps(vFailMask, C2);
8861 vFailMask = _mm_cmple_ps(vFailMask, g_XMEpsilon.v);
8862 let mut vFail: XMVECTOR = _mm_and_ps(vFailMask, g_XMInfinity.v);
8863 vFailMask = _mm_andnot_ps(vFailMask, g_XMQNaN.v);
8864 vFail = _mm_or_ps(vFail, vFailMask);
8866 let mut vResult: XMVECTOR = _mm_div_ps(C2, C1);
8868 vResult = XM_FMADD_PS!(vResult, V1, Line1Point1);
8869 vResult = _mm_and_ps(vResult, vResultMask);
8871 vResultMask = _mm_andnot_ps(vResultMask, vFail);
8872 vResult = _mm_or_ps(vResult, vResultMask);
8873 return vResult;
8874 }
8875}
8876
8877#[inline]
8902pub fn XMVector2Transform(
8903 V: FXMVECTOR,
8904 M: FXMMATRIX,
8905) -> FXMVECTOR
8906{
8907 #[cfg(_XM_NO_INTRINSICS_)]
8908 unsafe {
8909 let Y: XMVECTOR = XMVectorSplatY(V);
8910 let X: XMVECTOR = XMVectorSplatX(V);
8911
8912 let mut Result: XMVECTOR = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]);
8913 Result = XMVectorMultiplyAdd(X, M.r[0], Result);
8914
8915 return Result;
8916 }
8917
8918 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
8919 {
8920 unimplemented!()
8921 }
8922
8923 #[cfg(_XM_SSE_INTRINSICS_)]
8924 unsafe {
8925 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1)); vResult = XM_FMADD_PS!(vResult, M.r[1], M.r[3]);
8927 let vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 0, 0, 0)); vResult = XM_FMADD_PS!(vTemp, M.r[0], vResult);
8929 return vResult;
8930 }
8931}
8932
8933#[inline]
8960pub fn XMVector2TransformCoord(
8961 V: FXMVECTOR,
8962 M: FXMMATRIX,
8963) -> FXMVECTOR {
8964 unsafe {
8965 let Y: XMVECTOR = XMVectorSplatY(V);
8966 let X: XMVECTOR = XMVectorSplatX(V);
8967
8968 let mut Result: XMVECTOR = XMVectorMultiplyAdd(Y, M.r[1], M.r[3]);
8969 Result = XMVectorMultiplyAdd(X, M.r[0], Result);
8970
8971 let W: XMVECTOR = XMVectorSplatW(Result);
8972 return XMVectorDivide(Result, W);
8973 }
8974}
8975
8976#[inline]
8999pub fn XMVector2TransformNormal(
9000 V: FXMVECTOR,
9001 M: FXMMATRIX,
9002) -> FXMVECTOR
9003{
9004 #[cfg(_XM_NO_INTRINSICS_)]
9005 unsafe {
9006 let Y: XMVECTOR = XMVectorSplatY(V);
9007 let X: XMVECTOR = XMVectorSplatX(V);
9008
9009 let mut Result: XMVECTOR = XMVectorMultiply(Y, M.r[1]);
9010 Result = XMVectorMultiplyAdd(X, M.r[0], Result);
9011
9012 return Result;
9013 }
9014
9015 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9016 {
9017 unimplemented!()
9018 }
9019
9020 #[cfg(_XM_SSE_INTRINSICS_)]
9021 unsafe {
9022 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1)); vResult = _mm_mul_ps(vResult, M.r[1]);
9024 let vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 0, 0, 0)); vResult = XM_FMADD_PS!(vTemp, M.r[0], vResult);
9026 return vResult;
9027 }
9028}
9029
9030#[inline]
9038pub fn XMVector3Equal(
9039 V1: FXMVECTOR,
9040 V2: FXMVECTOR,
9041) -> bool
9042{
9043 #[cfg(_XM_NO_INTRINSICS_)]
9044 unsafe {
9045 return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2])) != false);
9046 }
9047
9048 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9049 {
9050 unimplemented!()
9051 }
9052
9053 #[cfg(_XM_SSE_INTRINSICS_)]
9054 unsafe {
9055 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
9056 return (((_mm_movemask_ps(vTemp) & 7) == 7) != false);
9057 }
9058}
9059
9060#[test]
9061fn test_XMVector3Equal() {
9062 let a = XMVectorReplicate(1.0);
9063 let b = XMVectorReplicate(1.0);
9064
9065 assert!(XMVector3Equal(a, b));
9066 assert!(XMVector3Equal(a, XMVectorSetW(b, 2.0)));
9067
9068 assert!(!XMVector3Equal(a, XMVectorSetX(b, 2.0)));
9069 assert!(!XMVector3Equal(a, XMVectorSetY(b, 2.0)));
9070 assert!(!XMVector3Equal(a, XMVectorSetZ(b, 2.0)));
9071}
9072
9073#[inline]
9078pub fn XMVector3EqualR(
9079 V1: FXMVECTOR,
9080 V2: FXMVECTOR,
9081) -> u32
9082{
9083 #[cfg(_XM_NO_INTRINSICS_)]
9084 unsafe {
9085 let mut CR: u32 = 0;
9086 if ((V1.vector4_f32[0] == V2.vector4_f32[0]) &&
9087 (V1.vector4_f32[1] == V2.vector4_f32[1]) &&
9088 (V1.vector4_f32[2] == V2.vector4_f32[2]))
9089 {
9090 CR = XM_CRMASK_CR6TRUE;
9091 }
9092 else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) &&
9093 (V1.vector4_f32[1] != V2.vector4_f32[1]) &&
9094 (V1.vector4_f32[2] != V2.vector4_f32[2]))
9095 {
9096 CR = XM_CRMASK_CR6FALSE;
9097 }
9098 return CR;
9099 }
9100
9101 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9102 {
9103 unimplemented!()
9104 }
9105
9106 #[cfg(_XM_SSE_INTRINSICS_)]
9107 unsafe {
9108 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
9109 let iTest: i32 = _mm_movemask_ps(vTemp) & 7;
9110 let mut CR: u32 = 0;
9111 if (iTest == 7)
9112 {
9113 CR = XM_CRMASK_CR6TRUE;
9114 }
9115 else if !ibool(iTest)
9116 {
9117 CR = XM_CRMASK_CR6FALSE;
9118 }
9119 return CR;
9120 }
9121}
9122
9123#[test]
9124fn test_XMVector3EqualR() {
9125 let a = XMVectorReplicate(1.0);
9126 let b = XMVectorReplicate(1.0);
9127
9128 let r = XMVector3EqualR(a, b);
9129 assert!(XMComparisonAnyTrue(r));
9130 assert!(!XMComparisonAnyFalse(r));
9131 assert!(XMComparisonAllTrue(r));
9132 assert!(!XMComparisonAllFalse(r));
9133
9134 let r = XMVector3EqualR(a, XMVectorReplicate(2.0));
9135 assert!(!XMComparisonAnyTrue(r));
9136 assert!(XMComparisonAnyFalse(r));
9137 assert!(!XMComparisonAllTrue(r));
9138 assert!(XMComparisonAllFalse(r));
9139
9140 let r = XMVector3EqualR(a, XMVectorSetX(b, 2.0));
9141 assert!(XMComparisonAnyTrue(r));
9142 assert!(XMComparisonAnyFalse(r));
9143 assert!(!XMComparisonAllTrue(r));
9144 assert!(!XMComparisonAllFalse(r));
9145}
9146
9147#[inline]
9151pub fn XMVector3EqualInt(
9152 V1: FXMVECTOR,
9153 V2: FXMVECTOR,
9154) -> bool
9155{
9156 #[cfg(_XM_NO_INTRINSICS_)]
9157 unsafe {
9158 return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2])) != false);
9159 }
9160
9161 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9162 {
9163 unimplemented!()
9164 }
9165
9166 #[cfg(_XM_SSE_INTRINSICS_)]
9167 unsafe {
9168 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
9169 return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7) == 7) != false);
9170 }
9171}
9172
9173#[inline]
9179pub fn XMVector3EqualIntR(
9180 V1: FXMVECTOR,
9181 V2: FXMVECTOR,
9182) -> u32
9183{
9184 #[cfg(_XM_NO_INTRINSICS_)]
9185 unsafe {
9186 let mut CR: u32 = 0;
9187 if ((V1.vector4_u32[0] == V2.vector4_u32[0]) &&
9188 (V1.vector4_u32[1] == V2.vector4_u32[1]) &&
9189 (V1.vector4_u32[2] == V2.vector4_u32[2]))
9190 {
9191 CR = XM_CRMASK_CR6TRUE;
9192 }
9193 else if ((V1.vector4_u32[0] != V2.vector4_u32[0]) &&
9194 (V1.vector4_u32[1] != V2.vector4_u32[1]) &&
9195 (V1.vector4_u32[2] != V2.vector4_u32[2]))
9196 {
9197 CR = XM_CRMASK_CR6FALSE;
9198 }
9199 return CR;
9200 }
9201
9202 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9203 {
9204 unimplemented!()
9205 }
9206
9207 #[cfg(_XM_SSE_INTRINSICS_)]
9208 unsafe {
9209 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
9210 let iTemp: i32 = _mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7;
9211 let mut CR: u32 = 0;
9212 if (iTemp == 7)
9213 {
9214 CR = XM_CRMASK_CR6TRUE;
9215 }
9216 else if !ibool(iTemp)
9217 {
9218 CR = XM_CRMASK_CR6FALSE;
9219 }
9220 return CR;
9221 }
9222}
9223
9224#[inline]
9228pub fn XMVector3NearEqual(
9229 V1: FXMVECTOR,
9230 V2: FXMVECTOR,
9231 Epsilon: FXMVECTOR,
9232) -> bool
9233{
9234 #[cfg(_XM_NO_INTRINSICS_)]
9235 unsafe {
9236 let (dx, dy, dz): (f32, f32, f32);
9237
9238 dx = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]);
9239 dy = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]);
9240 dz = fabsf(V1.vector4_f32[2] - V2.vector4_f32[2]);
9241 return (((dx <= Epsilon.vector4_f32[0]) &&
9242 (dy <= Epsilon.vector4_f32[1]) &&
9243 (dz <= Epsilon.vector4_f32[2])) != false);
9244 }
9245
9246 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9247 {
9248 unimplemented!()
9249 }
9250
9251 #[cfg(_XM_SSE_INTRINSICS_)]
9252 unsafe {
9253 let vDelta: XMVECTOR = _mm_sub_ps(V1, V2);
9255 let mut vTemp: XMVECTOR = _mm_setzero_ps();
9257 vTemp = _mm_sub_ps(vTemp, vDelta);
9258 vTemp = _mm_max_ps(vTemp, vDelta);
9259 vTemp = _mm_cmple_ps(vTemp, Epsilon);
9260 return (((_mm_movemask_ps(vTemp) & 7) == 0x7) != false);
9262 }
9263}
9264
9265#[inline]
9269pub fn XMVector3NotEqual(
9270 V1: FXMVECTOR,
9271 V2: FXMVECTOR,
9272) -> bool
9273{
9274 #[cfg(_XM_NO_INTRINSICS_)]
9275 unsafe {
9276 return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2])) != false);
9277 }
9278
9279 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9280 {
9281 unimplemented!()
9282 }
9283
9284 #[cfg(_XM_SSE_INTRINSICS_)]
9285 unsafe {
9286 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
9287 return (((_mm_movemask_ps(vTemp) & 7) != 7) != false);
9288 }
9289}
9290
9291#[inline]
9295pub fn XMVector3NotEqualInt(
9296 V1: FXMVECTOR,
9297 V2: FXMVECTOR,
9298) -> bool
9299{
9300 #[cfg(_XM_NO_INTRINSICS_)]
9301 unsafe {
9302 return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2])) != false);
9303 }
9304
9305 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9306 {
9307 unimplemented!()
9308 }
9309
9310 #[cfg(_XM_SSE_INTRINSICS_)]
9311 unsafe {
9312 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
9313 return (((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) & 7) != 7) != false);
9314 }
9315}
9316
9317#[inline]
9321pub fn XMVector3Greater(
9322 V1: FXMVECTOR,
9323 V2: FXMVECTOR,
9324) -> bool
9325{
9326 #[cfg(_XM_NO_INTRINSICS_)]
9327 unsafe {
9328 return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2])) != false);
9329 }
9330
9331 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9332 {
9333 unimplemented!()
9334 }
9335
9336 #[cfg(_XM_SSE_INTRINSICS_)]
9337 unsafe {
9338 let vTemp: XMVECTOR = _mm_cmpgt_ps(V1, V2);
9339 return (((_mm_movemask_ps(vTemp) & 7) == 7) != false);
9340 }
9341}
9342
9343#[inline]
9348pub fn XMVector3GreaterR(
9349 V1: FXMVECTOR,
9350 V2: FXMVECTOR,
9351) -> u32
9352{
9353 #[cfg(_XM_NO_INTRINSICS_)]
9354 unsafe {
9355 let mut CR: u32 = 0;
9356 if ((V1.vector4_f32[0] > V2.vector4_f32[0]) &&
9357 (V1.vector4_f32[1] > V2.vector4_f32[1]) &&
9358 (V1.vector4_f32[2] > V2.vector4_f32[2]))
9359 {
9360 CR = XM_CRMASK_CR6TRUE;
9361 }
9362 else if ((V1.vector4_f32[0] <= V2.vector4_f32[0]) &&
9363 (V1.vector4_f32[1] <= V2.vector4_f32[1]) &&
9364 (V1.vector4_f32[2] <= V2.vector4_f32[2]))
9365 {
9366 CR = XM_CRMASK_CR6FALSE;
9367 }
9368 return CR;
9369 }
9370
9371 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9372 {
9373 unimplemented!()
9374 }
9375
9376 #[cfg(_XM_SSE_INTRINSICS_)]
9377 unsafe {
9378 let vTemp: XMVECTOR = _mm_cmpgt_ps(V1, V2);
9379 let mut CR: u32 = 0;
9380 let iTest: i32 = _mm_movemask_ps(vTemp) & 7;
9381 if (iTest == 7)
9382 {
9383 CR = XM_CRMASK_CR6TRUE;
9384 }
9385 else if !ibool(iTest)
9386 {
9387 CR = XM_CRMASK_CR6FALSE;
9388 }
9389 return CR;
9390 }
9391}
9392
9393#[inline]
9397pub fn XMVector3GreaterOrEqual(
9398 V1: FXMVECTOR,
9399 V2: FXMVECTOR,
9400) -> bool
9401{
9402 #[cfg(_XM_NO_INTRINSICS_)]
9403 unsafe {
9404 return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2])) != false);
9405 }
9406
9407 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9408 {
9409 unimplemented!()
9410 }
9411
9412 #[cfg(_XM_SSE_INTRINSICS_)]
9413 unsafe {
9414 let vTemp: XMVECTOR = _mm_cmpge_ps(V1, V2);
9415 return (((_mm_movemask_ps(vTemp) & 7) == 7) != false);
9416 }
9417}
9418
9419#[inline]
9424pub fn XMVector3GreaterOrEqualR(
9425 V1: FXMVECTOR,
9426 V2: FXMVECTOR,
9427) -> u32
9428{
9429 #[cfg(_XM_NO_INTRINSICS_)]
9430 unsafe {
9431 let mut CR: u32 = 0;
9432 if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) &&
9433 (V1.vector4_f32[1] >= V2.vector4_f32[1]) &&
9434 (V1.vector4_f32[2] >= V2.vector4_f32[2]))
9435 {
9436 CR = XM_CRMASK_CR6TRUE;
9437 }
9438 else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) &&
9439 (V1.vector4_f32[1] < V2.vector4_f32[1]) &&
9440 (V1.vector4_f32[2] < V2.vector4_f32[2]))
9441 {
9442 CR = XM_CRMASK_CR6FALSE;
9443 }
9444 return CR;
9445 }
9446
9447 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9448 {
9449 unimplemented!()
9450 }
9451
9452 #[cfg(_XM_SSE_INTRINSICS_)]
9453 unsafe {
9454 let vTemp: XMVECTOR = _mm_cmpge_ps(V1, V2);
9455 let mut CR: u32 = 0;
9456 let iTest: i32 = _mm_movemask_ps(vTemp) & 7;
9457 if (iTest == 7)
9458 {
9459 CR = XM_CRMASK_CR6TRUE;
9460 }
9461 else if !ibool(iTest)
9462 {
9463 CR = XM_CRMASK_CR6FALSE;
9464 }
9465 return CR;
9466 }
9467}
9468
9469#[inline]
9473pub fn XMVector3Less(
9474 V1: FXMVECTOR,
9475 V2: FXMVECTOR,
9476) -> bool
9477{
9478 #[cfg(_XM_NO_INTRINSICS_)]
9479 unsafe {
9480 return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2])) != false);
9481 }
9482
9483 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9484 {
9485 unimplemented!()
9486 }
9487
9488 #[cfg(_XM_SSE_INTRINSICS_)]
9489 unsafe {
9490 let vTemp: XMVECTOR = _mm_cmplt_ps(V1, V2);
9491 return (((_mm_movemask_ps(vTemp) & 7) == 7) != false);
9492 }
9493}
9494
9495
9496#[inline]
9500pub fn XMVector3LessOrEqual(
9501 V1: FXMVECTOR,
9502 V2: FXMVECTOR,
9503) -> bool
9504{
9505 #[cfg(_XM_NO_INTRINSICS_)]
9506 unsafe {
9507 return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2])) != false);
9508 }
9509
9510 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9511 {
9512 unimplemented!()
9513 }
9514
9515 #[cfg(_XM_SSE_INTRINSICS_)]
9516 unsafe {
9517 let vTemp: XMVECTOR = _mm_cmple_ps(V1, V2);
9518 return (((_mm_movemask_ps(vTemp) & 7) == 7) != false);
9519 }
9520}
9521
9522#[inline]
9548pub fn XMVector3InBounds(
9549 V: FXMVECTOR,
9550 Bounds: FXMVECTOR,
9551) -> bool
9552{
9553 #[cfg(_XM_NO_INTRINSICS_)]
9554 unsafe {
9555 return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) &&
9556 (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) &&
9557 (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2])) != false);
9558 }
9559
9560 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9561 {
9562 unimplemented!()
9563 }
9564
9565 #[cfg(_XM_SSE_INTRINSICS_)]
9566 unsafe {
9567 let mut vTemp1: XMVECTOR = _mm_cmple_ps(V, Bounds);
9569 let mut vTemp2: XMVECTOR = _mm_mul_ps(Bounds, g_XMNegativeOne.v);
9571 vTemp2 = _mm_cmple_ps(vTemp2, V);
9573 vTemp1 = _mm_and_ps(vTemp1, vTemp2);
9575 return (((_mm_movemask_ps(vTemp1) & 0x7) == 0x7) != false);
9577 }
9578
9579 }
9582
9583#[inline]
9587pub fn XMVector3IsNaN(
9588 V: FXMVECTOR,
9589) -> bool
9590{
9591 #[cfg(_XM_NO_INTRINSICS_)]
9592 unsafe {
9593 return (XMISNAN!(V.vector4_f32[0]) ||
9594 XMISNAN!(V.vector4_f32[1]) ||
9595 XMISNAN!(V.vector4_f32[2]));
9596 }
9597
9598 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9599 {
9600 unimplemented!()
9601 }
9602
9603 #[cfg(_XM_SSE_INTRINSICS_)]
9604 unsafe {
9605 let vTempNan: XMVECTOR = _mm_cmpneq_ps(V, V);
9607 return ((_mm_movemask_ps(vTempNan) & 7) != 0);
9609 }
9610}
9611
9612#[inline]
9616pub fn XMVector3IsInfinite(
9617 V: FXMVECTOR,
9618) -> bool
9619{
9620 #[cfg(_XM_NO_INTRINSICS_)]
9621 unsafe {
9622 return (XMISINF!(V.vector4_f32[0]) ||
9623 XMISINF!(V.vector4_f32[1]) ||
9624 XMISINF!(V.vector4_f32[2]));
9625 }
9626
9627 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9628 {
9629 unimplemented!()
9630 }
9631
9632 #[cfg(_XM_SSE_INTRINSICS_)]
9633 unsafe {
9634 let mut vTemp: __m128 = _mm_and_ps(V, g_XMAbsMask.v);
9636 vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity.v);
9638 return ((_mm_movemask_ps(vTemp) & 7) != 0);
9640 }
9641}
9642
9643#[inline]
9659pub fn XMVector3Dot(
9660 V1: FXMVECTOR,
9661 V2: FXMVECTOR,
9662) -> FXMVECTOR
9663{
9664 #[cfg(_XM_NO_INTRINSICS_)]
9665 unsafe {
9666 let fValue: f32 = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2];
9667 let mut vResult: XMVECTORF32 = crate::undefined();
9668 vResult.f[0] = fValue;
9669 vResult.f[1] = fValue;
9670 vResult.f[2] = fValue;
9671 vResult.f[3] = fValue;
9672 return vResult.v;
9673 }
9674
9675 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9676 {
9677 unimplemented!()
9678 }
9679
9680 #[cfg(_XM_SSE4_INTRINSICS_)]
9681 unsafe {
9682 return _mm_dp_ps(V1, V2, 0x7f);
9683 }
9684
9685 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
9698 unsafe {
9699 let mut vDot: XMVECTOR = _mm_mul_ps(V1, V2);
9701 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vDot, _MM_SHUFFLE(2, 1, 2, 1));
9703 vDot = _mm_add_ss(vDot, vTemp);
9705 vTemp = XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(1, 1, 1, 1));
9707 vDot = _mm_add_ss(vDot, vTemp);
9709 return XM_PERMUTE_PS!(vDot, _MM_SHUFFLE(0, 0, 0, 0));
9711 }
9712}
9713
9714#[inline]
9743pub fn XMVector3Cross(
9744 V1: FXMVECTOR,
9745 V2: FXMVECTOR,
9746) -> FXMVECTOR
9747{
9748 #[cfg(_XM_NO_INTRINSICS_)]
9749 unsafe {
9750 let vResult = XMVECTORF32 {
9751 f: [
9752 (V1.vector4_f32[1] * V2.vector4_f32[2]) - (V1.vector4_f32[2] * V2.vector4_f32[1]),
9753 (V1.vector4_f32[2] * V2.vector4_f32[0]) - (V1.vector4_f32[0] * V2.vector4_f32[2]),
9754 (V1.vector4_f32[0] * V2.vector4_f32[1]) - (V1.vector4_f32[1] * V2.vector4_f32[0]),
9755 0.0
9756 ]
9757 };
9758 return vResult.v;
9759 }
9760
9761 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9762 {
9763 unimplemented!()
9764 }
9765
9766 #[cfg(_XM_SSE_INTRINSICS_)]
9767 unsafe {
9768 let mut vTemp1: XMVECTOR = XM_PERMUTE_PS!(V1, _MM_SHUFFLE(3, 0, 2, 1));
9770 let mut vTemp2: XMVECTOR = XM_PERMUTE_PS!(V2, _MM_SHUFFLE(3, 1, 0, 2));
9772 let mut vResult: XMVECTOR = _mm_mul_ps(vTemp1, vTemp2);
9774 vTemp1 = XM_PERMUTE_PS!(vTemp1, _MM_SHUFFLE(3, 0, 2, 1));
9776 vTemp2 = XM_PERMUTE_PS!(vTemp2, _MM_SHUFFLE(3, 1, 0, 2));
9778 vResult = XM_FNMADD_PS!(vTemp1, vTemp2, vResult);
9780 return _mm_and_ps(vResult, g_XMMask3.v);
9782 }
9783}
9784
9785#[inline]
9789pub fn XMVector3LengthSq(
9790 V: FXMVECTOR,
9791) -> FXMVECTOR
9792{
9793 return XMVector3Dot(V, V);
9794}
9795
9796#[inline]
9800pub fn XMVector3ReciprocalLengthEst(
9801 V: FXMVECTOR,
9802) -> FXMVECTOR
9803{
9804 #[cfg(_XM_NO_INTRINSICS_)]
9805 {
9806 let mut Result: XMVECTOR;
9807
9808 Result = XMVector3LengthSq(V);
9809 Result = XMVectorReciprocalSqrtEst(Result);
9810
9811 return Result;
9812 }
9813
9814 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9815 {
9816 unimplemented!()
9817 }
9818
9819 #[cfg(_XM_SSE4_INTRINSICS_)]
9820 unsafe {
9821 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x7f);
9822 return _mm_rsqrt_ps(vTemp);
9823 }
9824
9825 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
9826 unsafe {
9827 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
9829 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2));
9831 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
9833 vTemp = XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(1, 1, 1, 1));
9835 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
9837 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
9839 vLengthSq = _mm_rsqrt_ps(vLengthSq);
9841 return vLengthSq;
9842 }
9843}
9844
9845#[inline]
9849pub fn XMVector3ReciprocalLength(
9850 V: FXMVECTOR,
9851) -> FXMVECTOR
9852{
9853 #[cfg(_XM_NO_INTRINSICS_)]
9854 {
9855 let mut Result: XMVECTOR;
9856
9857 Result = XMVector3LengthSq(V);
9858 Result = XMVectorReciprocalSqrt(Result);
9859
9860 return Result;
9861 }
9862
9863 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9864 {
9865 unimplemented!()
9866 }
9867
9868 #[cfg(_XM_SSE4_INTRINSICS_)]
9869 unsafe {
9870 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x7f);
9871 let vLengthSq: XMVECTOR = _mm_sqrt_ps(vTemp);
9872 return _mm_div_ps(g_XMOne.v, vLengthSq);
9873 }
9874
9875 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
9876 unsafe {
9877 let mut vDot: XMVECTOR = _mm_mul_ps(V, V);
9878 vDot = _mm_and_ps(vDot, g_XMMask3.v);
9879 vDot = _mm_hadd_ps(vDot, vDot);
9880 vDot = _mm_hadd_ps(vDot, vDot);
9881 vDot = _mm_sqrt_ps(vDot);
9882 vDot = _mm_div_ps(g_XMOne.v, vDot);
9883 return vDot
9884 }
9885
9886 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
9887 unsafe {
9888 let mut vDot: XMVECTOR = _mm_mul_ps(V, V);
9890 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vDot, _MM_SHUFFLE(2, 1, 2, 1));
9892 vDot = _mm_add_ss(vDot, vTemp);
9894 vTemp = XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(1, 1, 1, 1));
9896 vDot = _mm_add_ss(vDot, vTemp);
9898 vDot = XM_PERMUTE_PS!(vDot, _MM_SHUFFLE(0, 0, 0, 0));
9900 vDot = _mm_sqrt_ps(vDot);
9902 vDot = _mm_div_ps(g_XMOne.v, vDot);
9904 return vDot;
9905 }
9906}
9907
9908#[inline]
9912pub fn XMVector3LengthEst(
9913 V: FXMVECTOR,
9914) -> FXMVECTOR
9915{
9916 #[cfg(_XM_NO_INTRINSICS_)]
9917 {
9918 let mut Result: XMVECTOR;
9919
9920 Result = XMVector3LengthSq(V);
9921 Result = XMVectorSqrtEst(Result);
9922
9923 return Result;
9924 }
9925
9926 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9927 {
9928 unimplemented!()
9929 }
9930
9931 #[cfg(_XM_SSE4_INTRINSICS_)]
9932 unsafe {
9933 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x7f);
9934 return _mm_sqrt_ps(vTemp);
9935 }
9936
9937 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
9938 unsafe {
9939 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
9940 vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3.v);
9941 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
9942 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
9943 vLengthSq = _mm_sqrt_ps(vLengthSq);
9944 return vLengthSq;
9945 }
9946
9947 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
9948 unsafe {
9949 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
9951 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2));
9953 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
9955 vTemp = XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(1, 1, 1, 1));
9957 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
9959 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
9961 vLengthSq = _mm_sqrt_ps(vLengthSq);
9963 return vLengthSq;
9964 }
9965}
9966
9967#[inline]
9981pub fn XMVector3Length(
9982 V: FXMVECTOR,
9983) -> FXMVECTOR
9984{
9985 #[cfg(_XM_NO_INTRINSICS_)]
9986 {
9987 let mut Result: XMVECTOR;
9988
9989 Result = XMVector3LengthSq(V);
9990 Result = XMVectorSqrt(Result);
9991
9992 return Result;
9993 }
9994
9995 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
9996 {
9997 unimplemented!()
9998 }
9999
10000 #[cfg(_XM_SSE4_INTRINSICS_)]
10001 unsafe {
10002 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x7f);
10003 return _mm_sqrt_ps(vTemp);
10004 }
10005
10006 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
10007 unsafe {
10008 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
10009 vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3.v);
10010 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
10011 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
10012 vLengthSq = _mm_sqrt_ps(vLengthSq);
10013 return vLengthSq;
10014 }
10015
10016 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
10017 unsafe {
10018 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
10020 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 2, 1, 2));
10022 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
10024 vTemp = XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(1, 1, 1, 1));
10026 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
10028 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
10030 vLengthSq = _mm_sqrt_ps(vLengthSq);
10032 return vLengthSq;
10033 }
10034}
10035
10036#[inline]
10058pub fn XMVector3NormalizeEst(
10059 V: FXMVECTOR,
10060) -> FXMVECTOR
10061{
10062 #[cfg(_XM_NO_INTRINSICS_)]
10063 {
10064 let mut Result: XMVECTOR;
10065
10066 Result = XMVector3ReciprocalLength(V);
10067 Result = XMVectorMultiply(V, Result);
10068
10069 return Result;
10070 }
10071
10072 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
10073 {
10074 unimplemented!()
10075 }
10076
10077 #[cfg(_XM_SSE4_INTRINSICS_)]
10078 unsafe {
10079 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0x7f);
10080 let vResult: XMVECTOR = _mm_rsqrt_ps(vTemp);
10081 return _mm_mul_ps(vResult, V);
10082 }
10083
10084 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
10085 unsafe {
10086 let mut vDot: XMVECTOR = _mm_mul_ps(V, V);
10087 vDot = _mm_and_ps(vDot, g_XMMask3.v);
10088 vDot = _mm_hadd_ps(vDot, vDot);
10089 vDot = _mm_hadd_ps(vDot, vDot);
10090 vDot = _mm_rsqrt_ps(vDot);
10091 vDot = _mm_mul_ps(vDot, V);
10092 return vDot;
10093 }
10094
10095 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
10096 unsafe {
10097 let mut vDot: XMVECTOR = _mm_mul_ps(V, V);
10099 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vDot, _MM_SHUFFLE(2, 1, 2, 1));
10101 vDot = _mm_add_ss(vDot, vTemp);
10103 vTemp = XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(1, 1, 1, 1));
10105 vDot = _mm_add_ss(vDot, vTemp);
10107 vDot = XM_PERMUTE_PS!(vDot, _MM_SHUFFLE(0, 0, 0, 0));
10109 vDot = _mm_rsqrt_ps(vDot);
10111 vDot = _mm_mul_ps(vDot, V);
10113 return vDot;
10114 }
10115}
10116
10117#[inline]
10171pub fn XMVector3Normalize(
10172 V: FXMVECTOR,
10173) -> FXMVECTOR
10174{
10175 #[cfg(_XM_NO_INTRINSICS_)]
10176 unsafe {
10177 let mut fLength: f32;
10178 let mut vResult: XMVECTOR;
10179
10180 vResult = XMVector3Length(V);
10181 fLength = vResult.vector4_f32[0];
10182
10183 if (fLength > 0.0)
10185 {
10186 fLength = 1.0 / fLength;
10187 }
10188
10189 vResult.vector4_f32[0] = V.vector4_f32[0] * fLength;
10190 vResult.vector4_f32[1] = V.vector4_f32[1] * fLength;
10191 vResult.vector4_f32[2] = V.vector4_f32[2] * fLength;
10192 vResult.vector4_f32[3] = V.vector4_f32[3] * fLength;
10193
10194 return vResult;
10195 }
10196
10197 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
10198 {
10199 unimplemented!()
10200 }
10201
10202 #[cfg(_XM_SSE4_INTRINSICS_)]
10203 unsafe {
10204 let mut vLengthSq: XMVECTOR = _mm_dp_ps(V, V, 0x7f);
10205 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
10207 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
10209 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
10211 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
10214 vResult = _mm_div_ps(V, vResult);
10216 vResult = _mm_and_ps(vResult, vZeroMask);
10218 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
10220 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
10221 vResult = _mm_or_ps(vTemp1, vTemp2);
10222 return vResult;
10223 }
10224
10225 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
10226 unsafe {
10227 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
10229 vLengthSq = _mm_and_ps(vLengthSq, g_XMMask3.v);
10230 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
10231 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
10232 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
10234 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
10236 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
10238 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
10241 vResult = _mm_div_ps(V, vResult);
10243 vResult = _mm_and_ps(vResult, vZeroMask);
10245 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
10247 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
10248 vResult = _mm_or_ps(vTemp1, vTemp2);
10249 return vResult;
10250 }
10251
10252 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
10253 unsafe {
10254 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
10256 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(2, 1, 2, 1));
10257 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
10258 vTemp = XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(1, 1, 1, 1));
10259 vLengthSq = _mm_add_ss(vLengthSq, vTemp);
10260 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(0, 0, 0, 0));
10261 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
10263 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
10265 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
10267 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
10270 vResult = _mm_div_ps(V, vResult);
10272 vResult = _mm_and_ps(vResult, vZeroMask);
10274 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
10276 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
10277 vResult = _mm_or_ps(vTemp1, vTemp2);
10278 return vResult;
10279 }
10280}
10281
10282#[inline]
10307pub fn XMVector3ClampLength(
10308 V: FXMVECTOR,
10309 LengthMin: f32,
10310 LengthMax: f32,
10311) -> FXMVECTOR
10312{
10313 let ClampMax: XMVECTOR = XMVectorReplicate(LengthMax);
10314 let ClampMin: XMVECTOR = XMVectorReplicate(LengthMin);
10315
10316 return XMVector3ClampLengthV(V, ClampMin, ClampMax);
10317}
10318
10319#[inline]
10323pub fn XMVector3ClampLengthV(
10324 V: FXMVECTOR,
10325 LengthMin: FXMVECTOR,
10326 LengthMax: FXMVECTOR,
10327) -> FXMVECTOR
10328{
10329 let LengthSq: XMVECTOR = XMVector3LengthSq(V);
10330
10331 let RcpLength: XMVECTOR = XMVectorReciprocalSqrt(LengthSq);
10334
10335 let InfiniteLength: XMVECTOR = XMVectorEqualInt(LengthSq, unsafe { g_XMInfinity.v });
10336 let ZeroLength: XMVECTOR = XMVectorEqual(LengthSq, unsafe { g_XMZero.v });
10337
10338 let mut Normal: XMVECTOR = XMVectorMultiply(V, RcpLength);
10339
10340 let mut Length: XMVECTOR = XMVectorMultiply(LengthSq, RcpLength);
10341
10342 let Select: XMVECTOR = XMVectorEqualInt(InfiniteLength, ZeroLength);
10343 Length = XMVectorSelect(LengthSq, Length, Select);
10344 Normal = XMVectorSelect(LengthSq, Normal, Select);
10345
10346 let ControlMax: XMVECTOR = XMVectorGreater(Length, LengthMax);
10347 let ControlMin: XMVECTOR = XMVectorLess(Length, LengthMin);
10348
10349 let mut ClampLength: XMVECTOR = XMVectorSelect(Length, LengthMax, ControlMax);
10350 ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin);
10351
10352 let mut Result: XMVECTOR = XMVectorMultiply(Normal, ClampLength);
10353
10354 let Control: XMVECTOR = XMVectorEqualInt(ControlMax, ControlMin);
10356 Result = XMVectorSelect(Result, V, Control);
10357
10358 return Result;
10359}
10360
10361#[inline]
10393pub fn XMVector3Reflect(
10394 Incident: FXMVECTOR,
10395 Normal: FXMVECTOR
10396) -> FXMVECTOR
10397{
10398 let mut Result: XMVECTOR = XMVector3Dot(Incident, Normal);
10401 Result = XMVectorAdd(Result, Result);
10402 Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident);
10403
10404 return Result;
10405}
10406
10407#[inline]
10460pub fn XMVector3Refract(
10461 Incident: FXMVECTOR,
10462 Normal: FXMVECTOR,
10463 RefractionIndex: f32,
10464) -> FXMVECTOR
10465{
10466 let Index: XMVECTOR = XMVectorReplicate(RefractionIndex);
10467 return XMVector3RefractV(Incident, Normal, Index);
10468}
10469
10470#[inline]
10495pub fn XMVector3RefractV(
10496 Incident: FXMVECTOR,
10497 Normal: FXMVECTOR,
10498 RefractionIndex: FXMVECTOR,
10499) -> FXMVECTOR
10500{
10501 #[cfg(_XM_NO_INTRINSICS_)]
10505 unsafe {
10506 const Zero: XMVECTOR = unsafe { g_XMZero.v };
10507
10508 let IDotN: XMVECTOR = XMVector3Dot(Incident, Normal);
10509
10510 let mut R: XMVECTOR = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v);
10512 R = XMVectorMultiply(R, RefractionIndex);
10513 R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v);
10514
10515 if (XMVector4LessOrEqual(R, Zero))
10516 {
10517 return Zero;
10519 }
10520 else
10521 {
10522 R = XMVectorSqrt(R);
10524 R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R);
10525
10526 let mut Result: XMVECTOR = XMVectorMultiply(RefractionIndex, Incident);
10528 Result = XMVectorNegativeMultiplySubtract(Normal, R, Result);
10529
10530 return Result;
10531 }
10532 }
10533
10534 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
10535 {
10536 unimplemented!()
10537 }
10538
10539 #[cfg(_XM_SSE_INTRINSICS_)]
10540 unsafe {
10541 let IDotN: XMVECTOR = XMVector3Dot(Incident, Normal);
10544 let mut R: XMVECTOR = XM_FNMADD_PS!(IDotN, IDotN, g_XMOne.v);
10546 let R2: XMVECTOR = _mm_mul_ps(RefractionIndex, RefractionIndex);
10547 R = XM_FNMADD_PS!(R, R2, g_XMOne.v);
10548
10549 let mut vResult: XMVECTOR = _mm_cmple_ps(R, g_XMZero.v);
10550 if (_mm_movemask_ps(vResult) == 0x0f)
10551 {
10552 vResult = g_XMZero.v;
10554 }
10555 else
10556 {
10557 R = _mm_sqrt_ps(R);
10559 R = XM_FMADD_PS!(RefractionIndex, IDotN, R);
10560 vResult = _mm_mul_ps(RefractionIndex, Incident);
10562 vResult = XM_FNMADD_PS!(R, Normal, vResult);
10563 }
10564 return vResult;
10565 }
10566}
10567
10568#[inline]
10582pub fn XMVector3Orthogonal(
10583 V: FXMVECTOR,
10584) -> FXMVECTOR
10585{
10586 let Zero: XMVECTOR = XMVectorZero();
10587 let Z: XMVECTOR = XMVectorSplatZ(V);
10588 let YZYY: XMVECTOR = <(XM_SWIZZLE_Y, XM_SWIZZLE_Z, XM_SWIZZLE_Y, XM_SWIZZLE_Y)>::XMVectorSwizzle(V);
10593
10594 let NegativeV: XMVECTOR = XMVectorSubtract(Zero, V);
10595
10596 let ZIsNegative: XMVECTOR = XMVectorLess(Z, Zero);
10597 let YZYYIsNegative: XMVECTOR = XMVectorLess(YZYY, Zero);
10598
10599 let S: XMVECTOR = XMVectorAdd(YZYY, Z);
10600 let D: XMVECTOR = XMVectorSubtract(YZYY, Z);
10601
10602 let Select: XMVECTOR = XMVectorEqualInt(ZIsNegative, YZYYIsNegative);
10603
10604 let R0: XMVECTOR = <(XM_PERMUTE_1X, XM_PERMUTE_0X, XM_PERMUTE_0X, XM_PERMUTE_0X)>::XMVectorPermute(NegativeV, S);
10610 let R1: XMVECTOR = <(XM_PERMUTE_1X, XM_PERMUTE_0X, XM_PERMUTE_0X, XM_PERMUTE_0X)>::XMVectorPermute(V, D);
10611
10612 return XMVectorSelect(R1, R0, Select);
10613}
10614
10615#[inline]
10638pub fn XMVector3AngleBetweenNormalsEst(
10639 N1: FXMVECTOR,
10640 N2: FXMVECTOR,
10641) -> FXMVECTOR
10642{
10643 unsafe {
10644 let mut Result: XMVECTOR = XMVector3Dot(N1, N2);
10645 Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
10646 Result = XMVectorACosEst(Result);
10647 return Result;
10648 }
10649}
10650
10651#[inline]
10667pub fn XMVector3AngleBetweenNormals(
10668 N1: FXMVECTOR,
10669 N2: FXMVECTOR,
10670) -> FXMVECTOR
10671{
10672 unsafe {
10673 let mut Result: XMVECTOR = XMVector3Dot(N1, N2);
10674 Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
10675 Result = XMVectorACos(Result);
10676 return Result;
10677 }
10678}
10679
10680#[inline]
10700pub fn XMVector3AngleBetweenVectors(
10701 V1: FXMVECTOR,
10702 V2: FXMVECTOR,
10703) -> FXMVECTOR
10704{
10705 unsafe {
10706 let mut L1: XMVECTOR = XMVector3ReciprocalLength(V1);
10707 let L2: XMVECTOR = XMVector3ReciprocalLength(V2);
10708
10709 let Dot: XMVECTOR = XMVector3Dot(V1, V2);
10710
10711 L1 = XMVectorMultiply(L1, L2);
10712
10713 let mut CosAngle: XMVECTOR = XMVectorMultiply(Dot, L1);
10714 CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v);
10715
10716 return XMVectorACos(CosAngle);
10717 }
10718}
10719
10720#[inline]
10738pub fn XMVector3LinePointDistance(
10739 LinePoint1: FXMVECTOR,
10740 LinePoint2: FXMVECTOR,
10741 Point: FXMVECTOR
10742) -> FXMVECTOR
10743{
10744 let PointVector: XMVECTOR = XMVectorSubtract(Point, LinePoint1);
10752 let LineVector: XMVECTOR = XMVectorSubtract(LinePoint2, LinePoint1);
10753
10754 let LengthSq: XMVECTOR = XMVector3LengthSq(LineVector);
10755
10756 let mut PointProjectionScale: XMVECTOR = XMVector3Dot(PointVector, LineVector);
10757 PointProjectionScale = XMVectorDivide(PointProjectionScale, LengthSq);
10758
10759 let mut DistanceVector: XMVECTOR = XMVectorMultiply(LineVector, PointProjectionScale);
10760 DistanceVector = XMVectorSubtract(PointVector, DistanceVector);
10761
10762 return XMVector3Length(DistanceVector);
10763}
10764
10765#[inline]
10785pub fn XMVector3ComponentsFromNormal(
10786 pParallel: &mut XMVECTOR,
10787 pPerpendicular: &mut XMVECTOR,
10788 V: FXMVECTOR,
10789 Normal: FXMVECTOR
10790)
10791{
10792 let Scale: XMVECTOR = XMVector3Dot(V, Normal);
10793
10794 let Parallel: XMVECTOR = XMVectorMultiply(Normal, Scale);
10795
10796 *pParallel = Parallel;
10797 *pPerpendicular = XMVectorSubtract(V, Parallel);
10798}
10799
10800
10801#[inline]
10817pub fn XMVector3Rotate(
10818 V: FXMVECTOR,
10819 RotationQuaternion: FXMVECTOR,
10820) -> XMVECTOR
10821{
10822 unsafe {
10823 let A: XMVECTOR = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v);
10824 let Q: XMVECTOR = XMQuaternionConjugate(RotationQuaternion);
10825 let Result: XMVECTOR = XMQuaternionMultiply(Q, A);
10826 return XMQuaternionMultiply(Result, RotationQuaternion);
10827 }
10828}
10829
10830
10831#[inline]
10847pub fn XMVector3InverseRotate(
10848 V: FXMVECTOR,
10849 RotationQuaternion: FXMVECTOR,
10850) -> XMVECTOR
10851{
10852 unsafe {
10853 let A: XMVECTOR = XMVectorSelect(g_XMSelect1110.v, V, g_XMSelect1110.v);
10854 let Result: XMVECTOR = XMQuaternionMultiply(RotationQuaternion, A);
10855 let Q: XMVECTOR = XMQuaternionConjugate(RotationQuaternion);
10856 return XMQuaternionMultiply(Result, Q);
10857 }
10858}
10859
10860#[inline]
10884pub fn XMVector3Transform(
10885 V: FXMVECTOR,
10886 M: FXMMATRIX,
10887) -> FXMVECTOR
10888{
10889 #[cfg(_XM_NO_INTRINSICS_)]
10890 unsafe {
10891 let Z: XMVECTOR = XMVectorSplatZ(V);
10892 let Y: XMVECTOR = XMVectorSplatY(V);
10893 let X: XMVECTOR = XMVectorSplatX(V);
10894
10895 let mut Result: XMVECTOR = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]);
10896 Result = XMVectorMultiplyAdd(Y, M.r[1], Result);
10897 Result = XMVectorMultiplyAdd(X, M.r[0], Result);
10898
10899 return Result;
10900 }
10901
10902 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
10903 {
10904 unimplemented!()
10905 }
10906
10907 #[cfg(_XM_SSE_INTRINSICS_)]
10908 unsafe {
10909 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(2, 2, 2, 2)); vResult = XM_FMADD_PS!(vResult, M.r[2], M.r[3]);
10911 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1)); vResult = XM_FMADD_PS!(vTemp, M.r[1], vResult);
10913 vTemp = XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 0, 0, 0)); vResult = XM_FMADD_PS!(vTemp, M.r[0], vResult);
10915 return vResult;
10916 }
10917}
10918
10919#[inline]
10943pub fn XMVector3TransformCoord(
10944 V: FXMVECTOR,
10945 M: FXMMATRIX,
10946) -> FXMVECTOR
10947{
10948 unsafe {
10949 let Z: XMVECTOR = XMVectorSplatZ(V);
10950 let Y: XMVECTOR = XMVectorSplatY(V);
10951 let X: XMVECTOR = XMVectorSplatX(V);
10952
10953 let mut Result: XMVECTOR = XMVectorMultiplyAdd(Z, M.r[2], M.r[3]);
10954 Result = XMVectorMultiplyAdd(Y, M.r[1], Result);
10955 Result = XMVectorMultiplyAdd(X, M.r[0], Result);
10956
10957 let W: XMVECTOR = XMVectorSplatW(Result);
10958 return XMVectorDivide(Result, W);
10959 }
10960}
10961
10962#[inline]
10988pub fn XMVector3TransformNormal(
10989 V: FXMVECTOR,
10990 M: FXMMATRIX,
10991) -> FXMVECTOR
10992{
10993 #[cfg(_XM_NO_INTRINSICS_)]
10994 unsafe {
10995 let Z: XMVECTOR = XMVectorSplatZ(V);
10996 let Y: XMVECTOR = XMVectorSplatY(V);
10997 let X: XMVECTOR = XMVectorSplatX(V);
10998
10999 let mut Result: XMVECTOR = XMVectorMultiply(Z, M.r[2]);
11000 Result = XMVectorMultiplyAdd(Y, M.r[1], Result);
11001 Result = XMVectorMultiplyAdd(X, M.r[0], Result);
11002
11003 return Result;
11004 }
11005
11006 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11007 {
11008 unimplemented!()
11009 }
11010
11011 #[cfg(_XM_SSE_INTRINSICS_)]
11012 unsafe {
11013 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(2, 2, 2, 2)); vResult = _mm_mul_ps(vResult, M.r[2]);
11015 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1)); vResult = XM_FMADD_PS!(vTemp, M.r[1], vResult);
11017 vTemp = XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 0, 0, 0)); vResult = XM_FMADD_PS!(vTemp, M.r[0], vResult);
11019 return vResult;
11020 }
11021}
11022
11023#[inline]
11074pub fn XMVector3Project(
11075 V: FXMVECTOR,
11076 ViewportX: f32,
11077 ViewportY: f32,
11078 ViewportWidth: f32,
11079 ViewportHeight: f32,
11080 ViewportMinZ: f32,
11081 ViewportMaxZ: f32,
11082 Projection: FXMMATRIX,
11083 View: CXMMATRIX,
11084 World: CXMMATRIX,
11085) -> FXMVECTOR
11086{
11087 let HalfViewportWidth: f32 = ViewportWidth * 0.5;
11088 let HalfViewportHeight: f32 = ViewportHeight * 0.5;
11089
11090 let Scale: XMVECTOR = XMVectorSet(HalfViewportWidth, -HalfViewportHeight, ViewportMaxZ - ViewportMinZ, 0.0);
11091 let Offset: XMVECTOR = XMVectorSet(ViewportX + HalfViewportWidth, ViewportY + HalfViewportHeight, ViewportMinZ, 0.0);
11092
11093 let mut Transform: XMMATRIX = XMMatrixMultiply(*World, View);
11094 Transform = XMMatrixMultiply(Transform, &Projection);
11095
11096 let mut Result: XMVECTOR = XMVector3TransformCoord(V, Transform);
11097
11098 Result = XMVectorMultiplyAdd(Result, Scale, Offset);
11099
11100 return Result;
11101}
11102
11103#[inline]
11154pub fn XMVector3Unproject(
11155 V: FXMVECTOR,
11156 ViewportX: f32,
11157 ViewportY: f32,
11158 ViewportWidth: f32,
11159 ViewportHeight: f32,
11160 ViewportMinZ: f32,
11161 ViewportMaxZ: f32,
11162 Projection: FXMMATRIX,
11163 View: CXMMATRIX,
11164 World: CXMMATRIX,
11165) -> FXMVECTOR
11166{
11167 const D: XMVECTORF32 = XMVECTORF32 { f: [ -1.0, 1.0, 0.0, 0.0] };
11168
11169 let mut Scale: XMVECTOR = XMVectorSet(ViewportWidth * 0.5, -ViewportHeight * 0.5, ViewportMaxZ - ViewportMinZ, 1.0);
11170 Scale = XMVectorReciprocal(Scale);
11171
11172 let mut Offset: XMVECTOR = XMVectorSet(-ViewportX, -ViewportY, -ViewportMinZ, 0.0);
11173 Offset = XMVectorMultiplyAdd(Scale, Offset, unsafe { D.v });
11174
11175 let mut Transform: XMMATRIX = XMMatrixMultiply(*World, View);
11176 Transform = XMMatrixMultiply(Transform, &Projection);
11177 let mut det = unsafe { crate::undefined() };
11178 Transform = XMMatrixInverse(Some(&mut det), Transform);
11179
11180 let Result: XMVECTOR = XMVectorMultiplyAdd(V, Scale, Offset);
11181
11182 return XMVector3TransformCoord(Result, Transform);
11183}
11184
11185#[inline]
11194pub fn XMVector4Equal(
11195 V1: FXMVECTOR,
11196 V2: FXMVECTOR,
11197) -> bool
11198{
11199 #[cfg(_XM_NO_INTRINSICS_)]
11200 unsafe {
11201 return (((V1.vector4_f32[0] == V2.vector4_f32[0]) && (V1.vector4_f32[1] == V2.vector4_f32[1]) && (V1.vector4_f32[2] == V2.vector4_f32[2]) && (V1.vector4_f32[3] == V2.vector4_f32[3])) != false);
11202 }
11203
11204 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11205 {
11206 unimplemented!()
11207 }
11208
11209 #[cfg(_XM_SSE_INTRINSICS_)]
11210 unsafe {
11211 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
11212 return ((_mm_movemask_ps(vTemp) == 0x0f) != false);
11213 }
11214
11215 }
11218
11219#[inline]
11223pub fn XMVector4EqualR(
11224 V1: FXMVECTOR,
11225 V2: FXMVECTOR,
11226) -> u32
11227{
11228 #[cfg(_XM_NO_INTRINSICS_)]
11229 unsafe {
11230 let mut CR: u32 = 0;
11231
11232 if ((V1.vector4_f32[0] == V2.vector4_f32[0]) &&
11233 (V1.vector4_f32[1] == V2.vector4_f32[1]) &&
11234 (V1.vector4_f32[2] == V2.vector4_f32[2]) &&
11235 (V1.vector4_f32[3] == V2.vector4_f32[3]))
11236 {
11237 CR = XM_CRMASK_CR6TRUE;
11238 }
11239 else if ((V1.vector4_f32[0] != V2.vector4_f32[0]) &&
11240 (V1.vector4_f32[1] != V2.vector4_f32[1]) &&
11241 (V1.vector4_f32[2] != V2.vector4_f32[2]) &&
11242 (V1.vector4_f32[3] != V2.vector4_f32[3]))
11243 {
11244 CR = XM_CRMASK_CR6FALSE;
11245 }
11246 return CR;
11247 }
11248
11249 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11250 {
11251 unimplemented!()
11252 }
11253
11254 #[cfg(_XM_SSE_INTRINSICS_)]
11255 unsafe {
11256 let vTemp: XMVECTOR = _mm_cmpeq_ps(V1, V2);
11257 let iTest: i32 = _mm_movemask_ps(vTemp);
11258 let mut CR: u32 = 0;
11259 if (iTest == 0xf) {
11261 CR = XM_CRMASK_CR6TRUE;
11262 }
11263 else if (iTest == 0) {
11265 CR = XM_CRMASK_CR6FALSE;
11266 }
11267 return CR;
11268 }
11269}
11270
11271#[inline]
11275pub fn XMVector4EqualInt(
11276 V1: FXMVECTOR,
11277 V2: FXMVECTOR,
11278) -> bool
11279{
11280 #[cfg(_XM_NO_INTRINSICS_)]
11281 unsafe {
11282 return (((V1.vector4_u32[0] == V2.vector4_u32[0]) && (V1.vector4_u32[1] == V2.vector4_u32[1]) && (V1.vector4_u32[2] == V2.vector4_u32[2]) && (V1.vector4_u32[3] == V2.vector4_u32[3])) != false);
11283 }
11284
11285 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11286 {
11287 unimplemented!()
11288 }
11289
11290 #[cfg(_XM_SSE_INTRINSICS_)]
11291 unsafe {
11292 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
11293 return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) == 0xf) != false);
11294 }
11295
11296 }
11299
11300#[inline]
11306pub fn XMVector4EqualIntR(
11307 V1: FXMVECTOR,
11308 V2: FXMVECTOR,
11309) -> u32
11310{
11311 #[cfg(_XM_NO_INTRINSICS_)]
11312 unsafe {
11313 let mut CR: u32 = 0;
11314 if (V1.vector4_u32[0] == V2.vector4_u32[0] &&
11315 V1.vector4_u32[1] == V2.vector4_u32[1] &&
11316 V1.vector4_u32[2] == V2.vector4_u32[2] &&
11317 V1.vector4_u32[3] == V2.vector4_u32[3])
11318 {
11319 CR = XM_CRMASK_CR6TRUE;
11320 }
11321 else if (V1.vector4_u32[0] != V2.vector4_u32[0] &&
11322 V1.vector4_u32[1] != V2.vector4_u32[1] &&
11323 V1.vector4_u32[2] != V2.vector4_u32[2] &&
11324 V1.vector4_u32[3] != V2.vector4_u32[3])
11325 {
11326 CR = XM_CRMASK_CR6FALSE;
11327 }
11328 return CR;
11329 }
11330
11331 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11332 {
11333 unimplemented!()
11334 }
11335
11336 #[cfg(_XM_SSE_INTRINSICS_)]
11337 unsafe {
11338 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
11339 let iTest: i32 = _mm_movemask_ps(_mm_castsi128_ps(vTemp));
11340 let mut CR: u32 = 0;
11341 if (iTest == 0xf) {
11343 CR = XM_CRMASK_CR6TRUE;
11344 }
11345 else if (iTest == 0) {
11347 CR = XM_CRMASK_CR6FALSE;
11348 }
11349 return CR;
11350 }
11351}
11352
11353#[inline]
11357pub fn XMVector4NearEqual(
11358 V1: FXMVECTOR,
11359 V2: FXMVECTOR,
11360 Epsilon: FXMVECTOR,
11361) -> bool
11362{
11363 #[cfg(_XM_NO_INTRINSICS_)]
11364 unsafe {
11365 let dx: f32 = fabsf(V1.vector4_f32[0] - V2.vector4_f32[0]);
11366 let dy: f32 = fabsf(V1.vector4_f32[1] - V2.vector4_f32[1]);
11367 let dz: f32 = fabsf(V1.vector4_f32[2] - V2.vector4_f32[2]);
11368 let dw: f32 = fabsf(V1.vector4_f32[3] - V2.vector4_f32[3]);
11369
11370 return (((dx <= Epsilon.vector4_f32[0]) &&
11371 (dy <= Epsilon.vector4_f32[1]) &&
11372 (dz <= Epsilon.vector4_f32[2]) &&
11373 (dw <= Epsilon.vector4_f32[3])) != false);
11374 }
11375
11376 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11377 {
11378 unimplemented!()
11379 }
11380
11381 #[cfg(_XM_SSE_INTRINSICS_)]
11382 unsafe {
11383 let vDelta: XMVECTOR = _mm_sub_ps(V1, V2);
11385 let mut vTemp: XMVECTOR = _mm_setzero_ps();
11387 vTemp = _mm_sub_ps(vTemp, vDelta);
11388 vTemp = _mm_max_ps(vTemp, vDelta);
11389 vTemp = _mm_cmple_ps(vTemp, Epsilon);
11390 return ((_mm_movemask_ps(vTemp) == 0xf) != false);
11391 }
11392}
11393
11394#[inline]
11398pub fn XMVector4NotEqual(
11399 V1: FXMVECTOR,
11400 V2: FXMVECTOR,
11401) -> bool
11402{
11403 #[cfg(_XM_NO_INTRINSICS_)]
11404 unsafe {
11405 return (((V1.vector4_f32[0] != V2.vector4_f32[0]) || (V1.vector4_f32[1] != V2.vector4_f32[1]) || (V1.vector4_f32[2] != V2.vector4_f32[2]) || (V1.vector4_f32[3] != V2.vector4_f32[3])) != false);
11406 }
11407
11408 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11409 {
11410 unimplemented!()
11411 }
11412
11413 #[cfg(_XM_SSE_INTRINSICS_)]
11414 unsafe {
11415 let vTemp: XMVECTOR = _mm_cmpneq_ps(V1, V2);
11416 return ((_mm_movemask_ps(vTemp)) != 0);
11417 }
11418
11419 }
11422
11423#[inline]
11427pub fn XMVector4NotEqualInt(
11428 V1: FXMVECTOR,
11429 V2: FXMVECTOR,
11430) -> bool
11431{
11432 #[cfg(_XM_NO_INTRINSICS_)]
11433 unsafe {
11434 return (((V1.vector4_u32[0] != V2.vector4_u32[0]) || (V1.vector4_u32[1] != V2.vector4_u32[1]) || (V1.vector4_u32[2] != V2.vector4_u32[2]) || (V1.vector4_u32[3] != V2.vector4_u32[3])) != false);
11435 }
11436
11437 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11438 {
11439 unimplemented!()
11440 }
11441
11442 #[cfg(_XM_SSE_INTRINSICS_)]
11443 unsafe {
11444 let vTemp: __m128i = _mm_cmpeq_epi32(_mm_castps_si128(V1), _mm_castps_si128(V2));
11445 return ((_mm_movemask_ps(_mm_castsi128_ps(vTemp)) != 0xF) != false);
11446 }
11447
11448 }
11451
11452#[inline]
11456pub fn XMVector4Greater(
11457 V1: FXMVECTOR,
11458 V2: FXMVECTOR,
11459) -> bool
11460{
11461 #[cfg(_XM_NO_INTRINSICS_)]
11462 unsafe {
11463 return (((V1.vector4_f32[0] > V2.vector4_f32[0]) && (V1.vector4_f32[1] > V2.vector4_f32[1]) && (V1.vector4_f32[2] > V2.vector4_f32[2]) && (V1.vector4_f32[3] > V2.vector4_f32[3])) != false);
11464 }
11465
11466 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11467 {
11468 unimplemented!()
11469 }
11470
11471 #[cfg(_XM_SSE_INTRINSICS_)]
11472 unsafe {
11473 let vTemp: XMVECTOR = _mm_cmpgt_ps(V1, V2);
11474 return ((_mm_movemask_ps(vTemp) == 0x0f) != false);
11475 }
11476
11477 }
11480
11481#[inline]
11485pub fn XMVector4GreaterR(
11486 V1: FXMVECTOR,
11487 V2: FXMVECTOR,
11488) -> u32
11489{
11490 #[cfg(_XM_NO_INTRINSICS_)]
11491 unsafe {
11492 let mut CR: u32 = 0;
11493 if (V1.vector4_f32[0] > V2.vector4_f32[0] &&
11494 V1.vector4_f32[1] > V2.vector4_f32[1] &&
11495 V1.vector4_f32[2] > V2.vector4_f32[2] &&
11496 V1.vector4_f32[3] > V2.vector4_f32[3])
11497 {
11498 CR = XM_CRMASK_CR6TRUE;
11499 }
11500 else if (V1.vector4_f32[0] <= V2.vector4_f32[0] &&
11501 V1.vector4_f32[1] <= V2.vector4_f32[1] &&
11502 V1.vector4_f32[2] <= V2.vector4_f32[2] &&
11503 V1.vector4_f32[3] <= V2.vector4_f32[3])
11504 {
11505 CR = XM_CRMASK_CR6FALSE;
11506 }
11507 return CR;
11508 }
11509
11510 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11511 {
11512 unimplemented!()
11513 }
11514
11515 #[cfg(_XM_SSE_INTRINSICS_)]
11516 unsafe {
11517 let mut CR: u32 = 0;
11518 let vTemp: XMVECTOR = _mm_cmpgt_ps(V1, V2);
11519 let iTest: i32 = _mm_movemask_ps(vTemp);
11520 if (iTest == 0xf)
11521 {
11522 CR = XM_CRMASK_CR6TRUE;
11523 }
11524 else if (!ibool(iTest))
11525 {
11526 CR = XM_CRMASK_CR6FALSE;
11527 }
11528 return CR;
11529 }
11530}
11531
11532#[inline]
11536pub fn XMVector4GreaterOrEqual(
11537 V1: FXMVECTOR,
11538 V2: FXMVECTOR,
11539) -> bool
11540{
11541 #[cfg(_XM_NO_INTRINSICS_)]
11542 unsafe {
11543 return (((V1.vector4_f32[0] >= V2.vector4_f32[0]) && (V1.vector4_f32[1] >= V2.vector4_f32[1]) && (V1.vector4_f32[2] >= V2.vector4_f32[2]) && (V1.vector4_f32[3] >= V2.vector4_f32[3])) != false);
11544 }
11545
11546 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11547 {
11548 unimplemented!()
11549 }
11550
11551 #[cfg(_XM_SSE_INTRINSICS_)]
11552 unsafe {
11553 let vTemp: XMVECTOR = _mm_cmpge_ps(V1, V2);
11554 return ((_mm_movemask_ps(vTemp) == 0x0f) != false);
11555 }
11556
11557 }
11560
11561
11562#[inline]
11566pub fn XMVector4GreaterOrEqualR(
11567 V1: FXMVECTOR,
11568 V2: FXMVECTOR,
11569) -> u32
11570{
11571 #[cfg(_XM_NO_INTRINSICS_)]
11572 unsafe {
11573 let mut CR: u32 = 0;
11574 if ((V1.vector4_f32[0] >= V2.vector4_f32[0]) &&
11575 (V1.vector4_f32[1] >= V2.vector4_f32[1]) &&
11576 (V1.vector4_f32[2] >= V2.vector4_f32[2]) &&
11577 (V1.vector4_f32[3] >= V2.vector4_f32[3]))
11578 {
11579 CR = XM_CRMASK_CR6TRUE;
11580 }
11581 else if ((V1.vector4_f32[0] < V2.vector4_f32[0]) &&
11582 (V1.vector4_f32[1] < V2.vector4_f32[1]) &&
11583 (V1.vector4_f32[2] < V2.vector4_f32[2]) &&
11584 (V1.vector4_f32[3] < V2.vector4_f32[3]))
11585 {
11586 CR = XM_CRMASK_CR6FALSE;
11587 }
11588 return CR;
11589 }
11590
11591 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11592 {
11593 unimplemented!()
11594 }
11595
11596 #[cfg(_XM_SSE_INTRINSICS_)]
11597 unsafe {
11598 let mut CR: u32 = 0;
11599 let vTemp: XMVECTOR = _mm_cmpge_ps(V1, V2);
11600 let iTest: i32 = _mm_movemask_ps(vTemp);
11601 if (iTest == 0x0f)
11602 {
11603 CR = XM_CRMASK_CR6TRUE;
11604 }
11605 else if (!ibool(iTest))
11606 {
11607 CR = XM_CRMASK_CR6FALSE;
11608 }
11609 return CR;
11610 }
11611}
11612
11613#[inline]
11638pub fn XMVector4Less(
11639 V1: FXMVECTOR,
11640 V2: FXMVECTOR,
11641) -> bool
11642{
11643 #[cfg(_XM_NO_INTRINSICS_)]
11644 unsafe {
11645 return (((V1.vector4_f32[0] < V2.vector4_f32[0]) && (V1.vector4_f32[1] < V2.vector4_f32[1]) && (V1.vector4_f32[2] < V2.vector4_f32[2]) && (V1.vector4_f32[3] < V2.vector4_f32[3])) != false);
11646 }
11647
11648 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11649 {
11650 unimplemented!()
11651 }
11652
11653 #[cfg(_XM_SSE_INTRINSICS_)]
11654 unsafe {
11655 let vTemp: XMVECTOR = _mm_cmplt_ps(V1, V2);
11656 return ((_mm_movemask_ps(vTemp) == 0x0f) != false);
11657 }
11658
11659 }
11662
11663#[inline]
11667pub fn XMVector4LessOrEqual(
11668 V1: FXMVECTOR,
11669 V2: FXMVECTOR,
11670) -> bool
11671{
11672 #[cfg(_XM_NO_INTRINSICS_)]
11673 unsafe {
11674 return (((V1.vector4_f32[0] <= V2.vector4_f32[0]) && (V1.vector4_f32[1] <= V2.vector4_f32[1]) && (V1.vector4_f32[2] <= V2.vector4_f32[2]) && (V1.vector4_f32[3] <= V2.vector4_f32[3])) != false);
11675 }
11676
11677 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11678 {
11679 unimplemented!()
11680 }
11681
11682 #[cfg(_XM_SSE_INTRINSICS_)]
11683 unsafe {
11684 let vTemp: XMVECTOR = _mm_cmple_ps(V1, V2);
11685 return ((_mm_movemask_ps(vTemp) == 0x0f) != false);
11686 }
11687
11688 }
11691
11692#[inline]
11719pub fn XMVector4InBounds(
11720 V: FXMVECTOR,
11721 Bounds: FXMVECTOR,
11722) -> bool
11723{
11724 #[cfg(_XM_NO_INTRINSICS_)]
11725 unsafe {
11726 return (((V.vector4_f32[0] <= Bounds.vector4_f32[0] && V.vector4_f32[0] >= -Bounds.vector4_f32[0]) &&
11727 (V.vector4_f32[1] <= Bounds.vector4_f32[1] && V.vector4_f32[1] >= -Bounds.vector4_f32[1]) &&
11728 (V.vector4_f32[2] <= Bounds.vector4_f32[2] && V.vector4_f32[2] >= -Bounds.vector4_f32[2]) &&
11729 (V.vector4_f32[3] <= Bounds.vector4_f32[3] && V.vector4_f32[3] >= -Bounds.vector4_f32[3])) != false);
11730 }
11731
11732 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11733 {
11734 unimplemented!()
11735 }
11736
11737 #[cfg(_XM_SSE_INTRINSICS_)]
11738 unsafe {
11739 let mut vTemp1: XMVECTOR = _mm_cmple_ps(V, Bounds);
11741 let mut vTemp2: XMVECTOR = _mm_mul_ps(Bounds, g_XMNegativeOne.v);
11743 vTemp2 = _mm_cmple_ps(vTemp2, V);
11745 vTemp1 = _mm_and_ps(vTemp1, vTemp2);
11747 return ((_mm_movemask_ps(vTemp1) == 0x0f) != false);
11749 }
11750
11751 }
11754
11755#[inline]
11759pub fn XMVector4IsNaN(
11760 V: FXMVECTOR,
11761) -> bool
11762{
11763 #[cfg(_XM_NO_INTRINSICS_)]
11764 unsafe {
11765 return (XMISNAN!(V.vector4_f32[0]) ||
11766 XMISNAN!(V.vector4_f32[1]) ||
11767 XMISNAN!(V.vector4_f32[2]) ||
11768 XMISNAN!(V.vector4_f32[3]));
11769 }
11770
11771 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11772 {
11773 unimplemented!()
11774 }
11775
11776 #[cfg(_XM_SSE_INTRINSICS_)]
11777 unsafe {
11778 let vTempNan: XMVECTOR = _mm_cmpneq_ps(V, V);
11780 return (_mm_movemask_ps(vTempNan) != 0);
11782 }
11783
11784 }
11787
11788#[inline]
11792pub fn XMVector4IsInfinite(
11793 V: FXMVECTOR,
11794) -> bool
11795{
11796 #[cfg(_XM_NO_INTRINSICS_)]
11797 unsafe {
11798 return (XMISINF!(V.vector4_f32[0]) ||
11799 XMISINF!(V.vector4_f32[1]) ||
11800 XMISINF!(V.vector4_f32[2]) ||
11801 XMISINF!(V.vector4_f32[3]));
11802 }
11803
11804 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11805 {
11806 unimplemented!()
11807 }
11808
11809 #[cfg(_XM_SSE_INTRINSICS_)]
11810 unsafe {
11811 let mut vTemp: XMVECTOR = _mm_and_ps(V, g_XMAbsMask.v);
11813 vTemp = _mm_cmpeq_ps(vTemp, g_XMInfinity.v);
11815 return (_mm_movemask_ps(vTemp) != 0);
11817 }
11818}
11819
11820
11821#[inline]
11825pub fn XMVector4Dot(
11826 V1: FXMVECTOR,
11827 V2: FXMVECTOR,
11828) -> FXMVECTOR
11829{
11830 #[cfg(_XM_NO_INTRINSICS_)]
11831 unsafe {
11832 let Value = V1.vector4_f32[0] * V2.vector4_f32[0] + V1.vector4_f32[1] * V2.vector4_f32[1] + V1.vector4_f32[2] * V2.vector4_f32[2] + V1.vector4_f32[3] * V2.vector4_f32[3];
11833 let mut vResult: XMVECTORF32 = crate::undefined();
11834 vResult.f[0] = Value;
11835 vResult.f[1] = Value;
11836 vResult.f[2] = Value;
11837 vResult.f[3] = Value;
11838 return vResult.v;
11839 }
11840
11841 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11842 {
11843 unimplemented!()
11844 }
11845
11846 #[cfg(_XM_SSE4_INTRINSICS_)]
11847 unsafe {
11848 return _mm_dp_ps(V1, V2, 0xff);
11849 }
11850
11851 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
11852 unsafe {
11853 let mut vTemp: XMVECTOR = _mm_mul_ps(V1, V2);
11854 vTemp = _mm_hadd_ps(vTemp, vTemp);
11855 return _mm_hadd_ps(vTemp, vTemp);
11856 }
11857
11858 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
11859 unsafe {
11860 let mut vTemp2: XMVECTOR = V2;
11861 let mut vTemp: XMVECTOR = _mm_mul_ps(V1, vTemp2);
11862 vTemp2 = _mm_shuffle_ps(vTemp2, vTemp, _MM_SHUFFLE(1, 0, 0, 0)); vTemp2 = _mm_add_ps(vTemp2, vTemp); vTemp = _mm_shuffle_ps(vTemp, vTemp2, _MM_SHUFFLE(0, 3, 0, 0)); vTemp = _mm_add_ps(vTemp, vTemp2); return XM_PERMUTE_PS!(vTemp, _MM_SHUFFLE(2, 2, 2, 2)); }
11868}
11869
11870
11871#[inline]
11918pub fn XMVector4Cross(
11919 V1: FXMVECTOR,
11920 V2: FXMVECTOR,
11921 V3: FXMVECTOR,
11922) -> FXMVECTOR
11923{
11924 #[cfg(_XM_NO_INTRINSICS_)]
11930 unsafe {
11931 let vResult: XMVECTORF32 = XMVECTORF32 {
11932 f: [
11933 (((V2.vector4_f32[2] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[2])) * V1.vector4_f32[1]) - (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1])) * V1.vector4_f32[2]) + (((V2.vector4_f32[1] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[1])) * V1.vector4_f32[3]),
11934 (((V2.vector4_f32[3] * V3.vector4_f32[2]) - (V2.vector4_f32[2] * V3.vector4_f32[3])) * V1.vector4_f32[0]) - (((V2.vector4_f32[3] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[3])) * V1.vector4_f32[2]) + (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2])) * V1.vector4_f32[3]),
11935 (((V2.vector4_f32[1] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[1])) * V1.vector4_f32[0]) - (((V2.vector4_f32[0] * V3.vector4_f32[3]) - (V2.vector4_f32[3] * V3.vector4_f32[0])) * V1.vector4_f32[1]) + (((V2.vector4_f32[0] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[0])) * V1.vector4_f32[3]),
11936 (((V2.vector4_f32[2] * V3.vector4_f32[1]) - (V2.vector4_f32[1] * V3.vector4_f32[2])) * V1.vector4_f32[0]) - (((V2.vector4_f32[2] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[2])) * V1.vector4_f32[1]) + (((V2.vector4_f32[1] * V3.vector4_f32[0]) - (V2.vector4_f32[0] * V3.vector4_f32[1])) * V1.vector4_f32[2]),
11937 ]
11938 };
11939 return vResult.v;
11940 }
11941
11942 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
11943 {
11944 unimplemented!()
11945 }
11946
11947 #[cfg(_XM_SSE_INTRINSICS_)]
11948 unsafe {
11949 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V2, _MM_SHUFFLE(2, 1, 3, 2));
11951 let mut vTemp3: XMVECTOR = XM_PERMUTE_PS!(V3, _MM_SHUFFLE(1, 3, 2, 3));
11952 vResult = _mm_mul_ps(vResult, vTemp3);
11953 let mut vTemp2: XMVECTOR = XM_PERMUTE_PS!(V2, _MM_SHUFFLE(1, 3, 2, 3));
11955 vTemp3 = XM_PERMUTE_PS!(vTemp3, _MM_SHUFFLE(1, 3, 0, 1));
11956 vResult = XM_FNMADD_PS!(vTemp2, vTemp3, vResult);
11957 let mut vTemp1: XMVECTOR = XM_PERMUTE_PS!(V1, _MM_SHUFFLE(0, 0, 0, 1));
11959 vResult = _mm_mul_ps(vResult, vTemp1);
11960
11961 vTemp2 = XM_PERMUTE_PS!(V2, _MM_SHUFFLE(2, 0, 3, 1));
11963 vTemp3 = XM_PERMUTE_PS!(V3, _MM_SHUFFLE(0, 3, 0, 3));
11964 vTemp3 = _mm_mul_ps(vTemp3, vTemp2);
11965 vTemp2 = XM_PERMUTE_PS!(vTemp2, _MM_SHUFFLE(2, 1, 2, 1));
11967 vTemp1 = XM_PERMUTE_PS!(V3, _MM_SHUFFLE(2, 0, 3, 1));
11968 vTemp3 = XM_FNMADD_PS!(vTemp2, vTemp1, vTemp3);
11969 vTemp1 = XM_PERMUTE_PS!(V1, _MM_SHUFFLE(1, 1, 2, 2));
11971 vResult = XM_FNMADD_PS!(vTemp1, vTemp3, vResult);
11972
11973 vTemp2 = XM_PERMUTE_PS!(V2, _MM_SHUFFLE(1, 0, 2, 1));
11975 vTemp3 = XM_PERMUTE_PS!(V3, _MM_SHUFFLE(0, 1, 0, 2));
11976 vTemp3 = _mm_mul_ps(vTemp3, vTemp2);
11977 vTemp2 = XM_PERMUTE_PS!(vTemp2, _MM_SHUFFLE(2, 0, 2, 1));
11979 vTemp1 = XM_PERMUTE_PS!(V3, _MM_SHUFFLE(1, 0, 2, 1));
11980 vTemp3 = XM_FNMADD_PS!(vTemp1, vTemp2, vTemp3);
11981 vTemp1 = XM_PERMUTE_PS!(V1, _MM_SHUFFLE(2, 3, 3, 3));
11983 vResult = XM_FMADD_PS!(vTemp3, vTemp1, vResult);
11984 return vResult;
11985 }
11986}
11987
11988#[inline]
11992pub fn XMVector4LengthSq(
11993 V: FXMVECTOR,
11994) -> FXMVECTOR
11995{
11996 return XMVector4Dot(V, V);
11997}
11998
11999#[inline]
12003pub fn XMVector4ReciprocalLengthEst(
12004 V: FXMVECTOR,
12005) -> FXMVECTOR
12006{
12007 #[cfg(_XM_NO_INTRINSICS_)]
12008 {
12009 let mut Result: XMVECTOR;
12010
12011 Result = XMVector4LengthSq(V);
12012 Result = XMVectorReciprocalSqrtEst(Result);
12013
12014 return Result;
12015 }
12016
12017 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12018 {
12019 unimplemented!()
12020 }
12021
12022 #[cfg(_XM_SSE4_INTRINSICS_)]
12023 unsafe {
12024 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0xff);
12025 return _mm_rsqrt_ps(vTemp);
12026 }
12027
12028 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
12029 unsafe {
12030 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12031 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12032 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12033 vLengthSq = _mm_rsqrt_ps(vLengthSq);
12034 return vLengthSq;
12035 }
12036
12037 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
12038 unsafe {
12039 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12041 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2));
12043 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12045 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0));
12047 vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0));
12049 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12051 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2));
12053 vLengthSq = _mm_rsqrt_ps(vLengthSq);
12055 return vLengthSq;
12056 }
12057}
12058
12059#[inline]
12063pub fn XMVector4ReciprocalLength(
12064 V: FXMVECTOR,
12065) -> FXMVECTOR
12066{
12067 #[cfg(_XM_NO_INTRINSICS_)]
12068 {
12069 let mut Result: XMVECTOR;
12070
12071 Result = XMVector4LengthSq(V);
12072 Result = XMVectorReciprocalSqrt(Result);
12073
12074 return Result;
12075 }
12076
12077 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12078 {
12079 unimplemented!()
12080 }
12081
12082 #[cfg(_XM_SSE4_INTRINSICS_)]
12083 unsafe {
12084 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0xff);
12085 let vLengthSq: XMVECTOR = _mm_sqrt_ps(vTemp);
12086 return _mm_div_ps(g_XMOne.v, vLengthSq);
12087 }
12088
12089 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
12090 unsafe {
12091 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12092 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12093 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12094 vLengthSq = _mm_sqrt_ps(vLengthSq);
12095 vLengthSq = _mm_div_ps(g_XMOne.v, vLengthSq);
12096 return vLengthSq;
12097 }
12098
12099 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
12100 unsafe {
12101 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12103 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2));
12105 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12107 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0));
12109 vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0));
12111 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12113 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2));
12115 vLengthSq = _mm_sqrt_ps(vLengthSq);
12117 vLengthSq = _mm_div_ps(g_XMOne.v, vLengthSq);
12119 return vLengthSq;
12120 }
12121}
12122
12123#[inline]
12127pub fn XMVector4LengthEst(
12128 V: FXMVECTOR,
12129) -> FXMVECTOR
12130{
12131 #[cfg(_XM_NO_INTRINSICS_)]
12132 {
12133 let mut Result: XMVECTOR;
12134
12135 Result = XMVector4LengthSq(V);
12136 Result = XMVectorSqrtEst(Result);
12137
12138 return Result;
12139 }
12140
12141 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12142 {
12143 unimplemented!()
12144 }
12145
12146 #[cfg(_XM_SSE4_INTRINSICS_)]
12147 unsafe {
12148 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0xff);
12149 return _mm_sqrt_ps(vTemp);
12150 }
12151
12152 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
12153 unsafe {
12154 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12155 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12156 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12157 vLengthSq = _mm_sqrt_ps(vLengthSq);
12158 return vLengthSq;
12159 }
12160
12161 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
12162 unsafe {
12163 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12165 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2));
12167 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12169 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0));
12171 vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0));
12173 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12175 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2));
12177 vLengthSq = _mm_sqrt_ps(vLengthSq);
12179 return vLengthSq;
12180 }
12181}
12182
12183
12184#[inline]
12188pub fn XMVector4Length(
12189 V: FXMVECTOR,
12190) -> FXMVECTOR
12191{
12192 #[cfg(_XM_NO_INTRINSICS_)]
12193 {
12194 let mut Result: XMVECTOR;
12195
12196 Result = XMVector4LengthSq(V);
12197 Result = XMVectorSqrt(Result);
12198
12199 return Result;
12200 }
12201
12202 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12203 {
12204 unimplemented!()
12205 }
12206
12207 #[cfg(_XM_SSE4_INTRINSICS_)]
12208 unsafe {
12209 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0xff);
12210 return _mm_sqrt_ps(vTemp);
12211 }
12212
12213 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
12214 unsafe {
12215 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12216 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12217 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12218 vLengthSq = _mm_sqrt_ps(vLengthSq);
12219 return vLengthSq;
12220 }
12221
12222 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
12223 unsafe {
12224 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12226 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2));
12228 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12230 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0));
12232 vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0));
12234 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12236 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2));
12238 vLengthSq = _mm_sqrt_ps(vLengthSq);
12240 return vLengthSq;
12241 }
12242}
12243
12244#[inline]
12248pub fn XMVector4NormalizeEst(
12249 V: FXMVECTOR,
12250) -> FXMVECTOR
12251{
12252 #[cfg(_XM_NO_INTRINSICS_)]
12253 {
12254 let mut Result: XMVECTOR;
12255
12256 Result = XMVector4ReciprocalLength(V);
12257 Result = XMVectorMultiply(V, Result);
12258
12259 return Result;
12260 }
12261
12262 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12263 {
12264 unimplemented!()
12265 }
12266
12267 #[cfg(_XM_SSE4_INTRINSICS_)]
12268 unsafe {
12269 let vTemp: XMVECTOR = _mm_dp_ps(V, V, 0xff);
12270 let vResult: XMVECTOR = _mm_rsqrt_ps(vTemp);
12271 return _mm_mul_ps(vResult, V);
12272 }
12273
12274 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
12275 unsafe {
12276 let mut vDot: XMVECTOR = _mm_mul_ps(V, V);
12277 vDot = _mm_hadd_ps(vDot, vDot);
12278 vDot = _mm_hadd_ps(vDot, vDot);
12279 vDot = _mm_rsqrt_ps(vDot);
12280 vDot = _mm_mul_ps(vDot, V);
12281 return vDot;
12282 }
12283
12284 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
12285 unsafe {
12286 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12288 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2));
12290 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12292 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0));
12294 vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0));
12296 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12298 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2));
12300 let mut vResult: XMVECTOR = _mm_rsqrt_ps(vLengthSq);
12302 vResult = _mm_mul_ps(vResult, V);
12304 return vResult;
12305 }
12306}
12307
12308#[inline]
12312pub fn XMVector4Normalize(
12313 V: FXMVECTOR,
12314) -> FXMVECTOR
12315{
12316 #[cfg(_XM_NO_INTRINSICS_)]
12317 unsafe {
12318 let mut fLength: f32;
12319 let mut vResult: XMVECTOR;
12320
12321 vResult = XMVector4Length(V);
12322 fLength = vResult.vector4_f32[0];
12323
12324 if (fLength > 0.0)
12326 {
12327 fLength = 1.0 / fLength;
12328 }
12329
12330 vResult.vector4_f32[0] = V.vector4_f32[0] * fLength;
12331 vResult.vector4_f32[1] = V.vector4_f32[1] * fLength;
12332 vResult.vector4_f32[2] = V.vector4_f32[2] * fLength;
12333 vResult.vector4_f32[3] = V.vector4_f32[3] * fLength;
12334 return vResult;
12335 }
12336
12337 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12338 {
12339 unimplemented!()
12340 }
12341
12342 #[cfg(_XM_SSE4_INTRINSICS_)]
12343 unsafe {
12344 let mut vLengthSq: XMVECTOR = _mm_dp_ps(V, V, 0xff);
12345 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
12347 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
12349 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
12351 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
12354 vResult = _mm_div_ps(V, vResult);
12356 vResult = _mm_and_ps(vResult, vZeroMask);
12358 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
12360 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
12361 vResult = _mm_or_ps(vTemp1, vTemp2);
12362 return vResult;
12363 }
12364
12365 #[cfg(all(_XM_SSE3_INTRINSICS_, not(_XM_SSE4_INTRINSICS_)))]
12366 unsafe {
12367 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12369 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12370 vLengthSq = _mm_hadd_ps(vLengthSq, vLengthSq);
12371 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
12373 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
12375 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
12377 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
12380 vResult = _mm_div_ps(V, vResult);
12382 vResult = _mm_and_ps(vResult, vZeroMask);
12384 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
12386 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
12387 vResult = _mm_or_ps(vTemp1, vTemp2);
12388 return vResult;
12389 }
12390
12391 #[cfg(all(_XM_SSE_INTRINSICS_, not(_XM_SSE3_INTRINSICS_), not(_XM_SSE4_INTRINSICS_)))]
12392 unsafe {
12393 let mut vLengthSq: XMVECTOR = _mm_mul_ps(V, V);
12395 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(3, 2, 3, 2));
12397 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12399 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(1, 0, 0, 0));
12401 vTemp = _mm_shuffle_ps(vTemp, vLengthSq, _MM_SHUFFLE(3, 3, 0, 0));
12403 vLengthSq = _mm_add_ps(vLengthSq, vTemp);
12405 vLengthSq = XM_PERMUTE_PS!(vLengthSq, _MM_SHUFFLE(2, 2, 2, 2));
12407 let mut vResult: XMVECTOR = _mm_sqrt_ps(vLengthSq);
12409 let mut vZeroMask: XMVECTOR = _mm_setzero_ps();
12411 vZeroMask = _mm_cmpneq_ps(vZeroMask, vResult);
12413 vLengthSq = _mm_cmpneq_ps(vLengthSq, g_XMInfinity.v);
12416 vResult = _mm_div_ps(V, vResult);
12418 vResult = _mm_and_ps(vResult, vZeroMask);
12420 let vTemp1: XMVECTOR = _mm_andnot_ps(vLengthSq, g_XMQNaN.v);
12422 let vTemp2: XMVECTOR = _mm_and_ps(vResult, vLengthSq);
12423 vResult = _mm_or_ps(vTemp1, vTemp2);
12424 return vResult;
12425 }
12426}
12427
12428#[inline]
12446pub fn XMVector4ClampLength(
12447 V: FXMVECTOR,
12448 LengthMin: f32,
12449 LengthMax: f32,
12450) -> XMVECTOR
12451{
12452 let ClampMax: XMVECTOR = XMVectorReplicate(LengthMax);
12453 let ClampMin: XMVECTOR = XMVectorReplicate(LengthMin);
12454
12455 return XMVector4ClampLengthV(V, ClampMin, ClampMax);
12456}
12457
12458
12459#[inline]
12484pub fn XMVector4ClampLengthV(
12485 V: FXMVECTOR,
12486 LengthMin: FXMVECTOR,
12487 LengthMax: FXMVECTOR,
12488) -> XMVECTOR
12489{
12490 unsafe {
12491 debug_assert!((XMVectorGetY(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetZ(LengthMin) == XMVectorGetX(LengthMin)) && (XMVectorGetW(LengthMin) == XMVectorGetX(LengthMin)));
12492 debug_assert!((XMVectorGetY(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetZ(LengthMax) == XMVectorGetX(LengthMax)) && (XMVectorGetW(LengthMax) == XMVectorGetX(LengthMax)));
12493 debug_assert!(XMVector4GreaterOrEqual(LengthMin, XMVectorZero()));
12494 debug_assert!(XMVector4GreaterOrEqual(LengthMax, XMVectorZero()));
12495 debug_assert!(XMVector4GreaterOrEqual(LengthMax, LengthMin));
12496
12497 let LengthSq: XMVECTOR = XMVector4LengthSq(V);
12498
12499 const Zero: XMVECTOR = unsafe { g_XMZero.v };
12500
12501 let RcpLength: XMVECTOR = XMVectorReciprocalSqrt(LengthSq);
12502
12503 let InfiniteLength: XMVECTOR = XMVectorEqualInt(LengthSq, g_XMInfinity.v);
12504 let ZeroLength: XMVECTOR = XMVectorEqual(LengthSq, Zero);
12505
12506 let mut Normal: XMVECTOR = XMVectorMultiply(V, RcpLength);
12507
12508 let mut Length: XMVECTOR = XMVectorMultiply(LengthSq, RcpLength);
12509
12510 let Select: XMVECTOR = XMVectorEqualInt(InfiniteLength, ZeroLength);
12511 Length = XMVectorSelect(LengthSq, Length, Select);
12512 Normal = XMVectorSelect(LengthSq, Normal, Select);
12513
12514 let ControlMax: XMVECTOR = XMVectorGreater(Length, LengthMax);
12515 let ControlMin: XMVECTOR = XMVectorLess(Length, LengthMin);
12516
12517 let mut ClampLength: XMVECTOR = XMVectorSelect(Length, LengthMax, ControlMax);
12518 ClampLength = XMVectorSelect(ClampLength, LengthMin, ControlMin);
12519
12520 let mut Result: XMVECTOR = XMVectorMultiply(Normal, ClampLength);
12521
12522 let Control: XMVECTOR = XMVectorEqualInt(ControlMax, ControlMin);
12524 Result = XMVectorSelect(Result, V, Control);
12525
12526 return Result;
12527 }
12528}
12529
12530#[inline]
12563pub fn XMVector4Reflect(
12564 Incident: FXMVECTOR,
12565 Normal: FXMVECTOR,
12566) -> XMVECTOR
12567{
12568 let mut Result: XMVECTOR = XMVector4Dot(Incident, Normal);
12571 Result = XMVectorAdd(Result, Result);
12572 Result = XMVectorNegativeMultiplySubtract(Result, Normal, Incident);
12573
12574 return Result;
12575}
12576
12577#[inline]
12630pub fn XMVector4Refract(
12631 Incident: FXMVECTOR,
12632 Normal: FXMVECTOR,
12633 RefractionIndex: f32,
12634) -> XMVECTOR
12635{
12636 let Index: XMVECTOR = XMVectorReplicate(RefractionIndex);
12637 return XMVector4RefractV(Incident, Normal, Index);
12638}
12639
12640#[inline]
12665pub fn XMVector4RefractV(
12666 Incident: FXMVECTOR,
12667 Normal: FXMVECTOR,
12668 RefractionIndex: FXMVECTOR,
12669) -> XMVECTOR
12670{
12671 #[cfg(_XM_NO_INTRINSICS_)]
12672 unsafe {
12673 let IDotN: XMVECTOR;
12674 let mut R: XMVECTOR;
12675 const Zero: XMVECTOR = unsafe { g_XMZero.v };
12676
12677 IDotN = XMVector4Dot(Incident, Normal);
12681
12682 R = XMVectorNegativeMultiplySubtract(IDotN, IDotN, g_XMOne.v);
12684 R = XMVectorMultiply(R, RefractionIndex);
12685 R = XMVectorNegativeMultiplySubtract(R, RefractionIndex, g_XMOne.v);
12686
12687 if (XMVector4LessOrEqual(R, Zero))
12688 {
12689 return Zero;
12691 }
12692 else
12693 {
12694 let mut Result: XMVECTOR;
12695
12696 R = XMVectorSqrt(R);
12698 R = XMVectorMultiplyAdd(RefractionIndex, IDotN, R);
12699
12700 Result = XMVectorMultiply(RefractionIndex, Incident);
12702 Result = XMVectorNegativeMultiplySubtract(Normal, R, Result);
12703
12704 return Result;
12705 }
12706 }
12707
12708 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12709 {
12710 unimplemented!()
12711 }
12712
12713 #[cfg(_XM_SSE_INTRINSICS_)]
12714 unsafe {
12715 let IDotN: XMVECTOR = XMVector4Dot(Incident, Normal);
12716
12717 let mut R: XMVECTOR = XM_FNMADD_PS!(IDotN, IDotN, g_XMOne.v);
12719 let R2: XMVECTOR = _mm_mul_ps(RefractionIndex, RefractionIndex);
12720 R = XM_FNMADD_PS!(R, R2, g_XMOne.v);
12721
12722 let mut vResult: XMVECTOR = _mm_cmple_ps(R, g_XMZero.v);
12723 if (_mm_movemask_ps(vResult) == 0x0f)
12724 {
12725 vResult = g_XMZero.v;
12727 }
12728 else
12729 {
12730 R = _mm_sqrt_ps(R);
12732 R = XM_FMADD_PS!(RefractionIndex, IDotN, R);
12733 vResult = _mm_mul_ps(RefractionIndex, Incident);
12735 vResult = XM_FNMADD_PS!(R, Normal, vResult);
12736 }
12737 return vResult;
12738 }
12739}
12740
12741
12742#[inline]
12774pub fn XMVector4Orthogonal(
12775 V: FXMVECTOR,
12776) -> XMVECTOR
12777{
12778 #[cfg(_XM_NO_INTRINSICS_)]
12779 unsafe {
12780 let Result = XMVECTORF32 { f: [
12781 V.vector4_f32[2],
12782 V.vector4_f32[3],
12783 -V.vector4_f32[0],
12784 -V.vector4_f32[1]
12785 ]};
12786 return Result.v;
12787 }
12788
12789 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12790 {
12791 unimplemented!()
12792 }
12793
12794 #[cfg(_XM_SSE_INTRINSICS_)]
12795 unsafe {
12796 const FlipZW: XMVECTORF32 = XMVECTORF32 { f: [ 1.0, 1.0, -1.0, -1.0 ] };
12797 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 0, 3, 2));
12798 vResult = _mm_mul_ps(vResult, FlipZW.v);
12799 return vResult;
12800 }
12801}
12802
12803#[inline]
12826pub fn XMVector4AngleBetweenNormalsEst(
12827 N1: FXMVECTOR,
12828 N2: FXMVECTOR,
12829) -> XMVECTOR
12830{
12831 unsafe {
12832 let mut Result: XMVECTOR = XMVector4Dot(N1, N2);
12833 Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
12834 Result = XMVectorACosEst(Result);
12835 return Result;
12836 }
12837}
12838
12839#[inline]
12855pub fn XMVector4AngleBetweenNormals(
12856 N1: FXMVECTOR,
12857 N2: FXMVECTOR,
12858) -> XMVECTOR
12859{
12860 unsafe {
12861 let mut Result: XMVECTOR = XMVector4Dot(N1, N2);
12862 Result = XMVectorClamp(Result, g_XMNegativeOne.v, g_XMOne.v);
12863 Result = XMVectorACos(Result);
12864 return Result;
12865 }
12866}
12867
12868#[inline]
12888pub fn XMVector4AngleBetweenVectors(
12889 V1: FXMVECTOR,
12890 V2: FXMVECTOR,
12891) -> XMVECTOR
12892{
12893 unsafe {
12894 let mut L1: XMVECTOR = XMVector4ReciprocalLength(V1);
12895 let L2: XMVECTOR = XMVector4ReciprocalLength(V2);
12896
12897 let Dot: XMVECTOR = XMVector4Dot(V1, V2);
12898
12899 L1 = XMVectorMultiply(L1, L2);
12900
12901 let mut CosAngle: XMVECTOR = XMVectorMultiply(Dot, L1);
12902 CosAngle = XMVectorClamp(CosAngle, g_XMNegativeOne.v, g_XMOne.v);
12903
12904 return XMVectorACos(CosAngle);
12905 }
12906}
12907
12908#[inline]
12924pub fn XMVector4Transform(
12925 V: FXMVECTOR,
12926 M: XMMATRIX,
12927) -> XMVECTOR
12928{
12929 #[cfg(_XM_NO_INTRINSICS_)]
12930 unsafe {
12931 let fX: f32 = (M.m[0][0] * V.vector4_f32[0]) + (M.m[1][0] * V.vector4_f32[1]) + (M.m[2][0] * V.vector4_f32[2]) + (M.m[3][0] * V.vector4_f32[3]);
12932 let fY: f32 = (M.m[0][1] * V.vector4_f32[0]) + (M.m[1][1] * V.vector4_f32[1]) + (M.m[2][1] * V.vector4_f32[2]) + (M.m[3][1] * V.vector4_f32[3]);
12933 let fZ: f32 = (M.m[0][2] * V.vector4_f32[0]) + (M.m[1][2] * V.vector4_f32[1]) + (M.m[2][2] * V.vector4_f32[2]) + (M.m[3][2] * V.vector4_f32[3]);
12934 let fW: f32 = (M.m[0][3] * V.vector4_f32[0]) + (M.m[1][3] * V.vector4_f32[1]) + (M.m[2][3] * V.vector4_f32[2]) + (M.m[3][3] * V.vector4_f32[3]);
12935 let vResult = XMVECTORF32 { f: [ fX, fY, fZ, fW ] };
12936 return vResult.v;
12937 }
12938
12939 #[cfg(_XM_ARM_NEON_INTRINSICS_)]
12940 {
12941 unimplemented!()
12942 }
12943
12944 #[cfg(_XM_SSE_INTRINSICS_)]
12945 unsafe {
12946 let mut vResult: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(3, 3, 3, 3)); vResult = _mm_mul_ps(vResult, M.r[3]);
12948 let mut vTemp: XMVECTOR = XM_PERMUTE_PS!(V, _MM_SHUFFLE(2, 2, 2, 2)); vResult = XM_FMADD_PS!(vTemp, M.r[2], vResult);
12950 vTemp = XM_PERMUTE_PS!(V, _MM_SHUFFLE(1, 1, 1, 1)); vResult = XM_FMADD_PS!(vTemp, M.r[1], vResult);
12952 vTemp = XM_PERMUTE_PS!(V, _MM_SHUFFLE(0, 0, 0, 0)); vResult = XM_FMADD_PS!(vTemp, M.r[0], vResult);
12954 return vResult;
12955 }
12956}
12957
12958impl From<&[f32; 4]> for XMVector {
12961 #[inline]
12962 fn from(v: &[f32; 4]) -> XMVector {
12963 XMVector(XMLoadFloat4(v.into()))
12964 }
12965}
12966
12967impl std::ops::Deref for XMVector {
12968 type Target = XMVECTOR;
12969 #[inline(always)]
12970 fn deref(&self) -> &Self::Target {
12971 &self.0
12972 }
12973}
12974
12975impl std::ops::DerefMut for XMVector {
12976 #[inline(always)]
12977 fn deref_mut(&mut self) -> &mut Self::Target {
12978 &mut self.0
12979 }
12980}
12981
12982impl XMVector {
12983 #[inline(always)]
12984 pub fn set(x: f32, y: f32, z: f32, w: f32) -> XMVector {
12985 XMVector(XMVectorSet(x, y, z, w))
12986 }
12987}
12988
12989impl std::ops::Add for XMVector {
12990 type Output = XMVector;
12991 #[inline]
12992 fn add(self, V2: XMVector) -> Self::Output {
12993 XMVector(XMVectorAdd(self.0, V2.0))
12994 }
12995}
12996
12997impl std::ops::AddAssign for XMVector {
12998 #[inline]
12999 fn add_assign(&mut self, V2: XMVector) {
13000 self.0 = XMVectorAdd(self.0, V2.0);
13001 }
13002}
13003
13004impl std::ops::Sub for XMVector {
13005 type Output = XMVector;
13006 #[inline]
13007 fn sub(self, V2: XMVector) -> Self::Output {
13008 XMVector(XMVectorSubtract(self.0, V2.0))
13009 }
13010}
13011
13012impl std::ops::SubAssign for XMVector {
13013 #[inline]
13014 fn sub_assign(&mut self, V2: XMVector) {
13015 self.0 = XMVectorSubtract(self.0, V2.0);
13016 }
13017}
13018
13019impl std::ops::Mul for XMVector {
13020 type Output = XMVector;
13021 #[inline]
13022 fn mul(self, V2: XMVector) -> Self::Output {
13023 XMVector(XMVectorMultiply(self.0, V2.0))
13024 }
13025}
13026
13027impl std::ops::MulAssign for XMVector {
13028 #[inline]
13029 fn mul_assign(&mut self, V2: XMVector) {
13030 self.0 = XMVectorMultiply(self.0, V2.0);
13031 }
13032}
13033
13034impl std::ops::Div for XMVector {
13035 type Output = XMVector;
13036 #[inline]
13037 fn div(self, V2: XMVector) -> Self::Output {
13038 XMVector(XMVectorDivide(self.0, V2.0))
13039 }
13040}
13041
13042impl std::ops::DivAssign for XMVector {
13043 #[inline]
13044 fn div_assign(&mut self, V2: XMVector) {
13045 self.0 = XMVectorDivide(self.0, V2.0);
13046 }
13047}
13048
13049impl std::ops::Mul<XMVector> for f32 {
13050 type Output = XMVector;
13051 #[inline]
13052 fn mul(self, V: XMVector) -> Self::Output {
13053 let S = self;
13054 XMVector(XMVectorScale(V.0, S))
13055 }
13056}
13057
13058impl std::ops::Mul<f32> for XMVector {
13059 type Output = XMVector;
13060 #[inline]
13061 fn mul(self, S: f32) -> Self::Output {
13062 XMVector(XMVectorScale(self.0, S))
13063 }
13064}
13065
13066impl std::ops::MulAssign<f32> for XMVector {
13067 #[inline]
13068 fn mul_assign(&mut self, S: f32) {
13069 self.0 = XMVectorScale(self.0, S);
13070 }
13071}
13072
13073impl std::ops::Div<f32> for XMVector {
13074 type Output = XMVector;
13075 #[inline]
13076 fn div(self, S: f32) -> Self::Output {
13077 let vS = XMVectorReplicate(S);
13078 XMVector(XMVectorDivide(self.0, vS))
13079 }
13080}
13081
13082impl std::ops::DivAssign<f32> for XMVector {
13083 #[inline]
13084 fn div_assign(&mut self, S: f32) {
13085 let vS = XMVectorReplicate(S);
13086 self.0 = XMVectorDivide(self.0, vS);
13087 }
13088}
13089
13090impl std::ops::Neg for XMVector {
13091 type Output = XMVector;
13092 #[inline]
13093 fn neg(self) -> Self::Output {
13094 XMVector(XMVectorNegate(*self))
13095 }
13096}
13097
13098impl std::cmp::PartialEq for XMVector {
13099 #[inline]
13100 fn eq(&self, rhs: &Self) -> bool {
13101 XMVector4Equal(self.0, rhs.0)
13102 }
13103}
13104
13105impl std::fmt::Debug for XMVector {
13106 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
13107 f.debug_list()
13108 .entry(&XMVectorGetX(self.0))
13109 .entry(&XMVectorGetY(self.0))
13110 .entry(&XMVectorGetZ(self.0))
13111 .entry(&XMVectorGetW(self.0))
13112 .finish()
13113 }
13114}
13115
13116
13117#[test]
13118fn test_debug() {
13119 #[rustfmt::skip]
13120 let m = XMVector::from(&[1.0, 2.0, 3.0, 4.0]);
13121 let s = format!("{:?}", m);
13122 assert_eq!("[1.0, 2.0, 3.0, 4.0]", s);
13123}