1use crate::{BVec2, I16Vec2, I64Vec3, I8Vec2, IVec2, U16Vec2, U64Vec2, U8Vec2, USizeVec2, UVec2};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[inline(always)]
11#[must_use]
12pub const fn i64vec2(x: i64, y: i64) -> I64Vec2 {
13 I64Vec2::new(x, y)
14}
15
16#[cfg_attr(not(target_arch = "spirv"), derive(Hash))]
18#[derive(Clone, Copy, PartialEq, Eq)]
19#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
20#[cfg_attr(feature = "cuda", repr(align(16)))]
21#[cfg_attr(not(target_arch = "spirv"), repr(C))]
22#[cfg_attr(target_arch = "spirv", repr(simd))]
23pub struct I64Vec2 {
24 pub x: i64,
25 pub y: i64,
26}
27
28impl I64Vec2 {
29 pub const ZERO: Self = Self::splat(0);
31
32 pub const ONE: Self = Self::splat(1);
34
35 pub const NEG_ONE: Self = Self::splat(-1);
37
38 pub const MIN: Self = Self::splat(i64::MIN);
40
41 pub const MAX: Self = Self::splat(i64::MAX);
43
44 pub const X: Self = Self::new(1, 0);
46
47 pub const Y: Self = Self::new(0, 1);
49
50 pub const NEG_X: Self = Self::new(-1, 0);
52
53 pub const NEG_Y: Self = Self::new(0, -1);
55
56 pub const AXES: [Self; 2] = [Self::X, Self::Y];
58
59 #[inline(always)]
61 #[must_use]
62 pub const fn new(x: i64, y: i64) -> Self {
63 Self { x, y }
64 }
65
66 #[inline]
68 #[must_use]
69 pub const fn splat(v: i64) -> Self {
70 Self { x: v, y: v }
71 }
72
73 #[inline]
75 #[must_use]
76 pub fn map<F>(self, f: F) -> Self
77 where
78 F: Fn(i64) -> i64,
79 {
80 Self::new(f(self.x), f(self.y))
81 }
82
83 #[inline]
89 #[must_use]
90 pub fn select(mask: BVec2, if_true: Self, if_false: Self) -> Self {
91 Self {
92 x: if mask.test(0) { if_true.x } else { if_false.x },
93 y: if mask.test(1) { if_true.y } else { if_false.y },
94 }
95 }
96
97 #[inline]
99 #[must_use]
100 pub const fn from_array(a: [i64; 2]) -> Self {
101 Self::new(a[0], a[1])
102 }
103
104 #[inline]
106 #[must_use]
107 pub const fn to_array(&self) -> [i64; 2] {
108 [self.x, self.y]
109 }
110
111 #[inline]
117 #[must_use]
118 pub const fn from_slice(slice: &[i64]) -> Self {
119 assert!(slice.len() >= 2);
120 Self::new(slice[0], slice[1])
121 }
122
123 #[inline]
129 pub fn write_to_slice(self, slice: &mut [i64]) {
130 slice[..2].copy_from_slice(&self.to_array());
131 }
132
133 #[inline]
135 #[must_use]
136 pub const fn extend(self, z: i64) -> I64Vec3 {
137 I64Vec3::new(self.x, self.y, z)
138 }
139
140 #[inline]
142 #[must_use]
143 pub fn with_x(mut self, x: i64) -> Self {
144 self.x = x;
145 self
146 }
147
148 #[inline]
150 #[must_use]
151 pub fn with_y(mut self, y: i64) -> Self {
152 self.y = y;
153 self
154 }
155
156 #[inline]
158 #[must_use]
159 pub fn dot(self, rhs: Self) -> i64 {
160 (self.x * rhs.x) + (self.y * rhs.y)
161 }
162
163 #[inline]
165 #[must_use]
166 pub fn dot_into_vec(self, rhs: Self) -> Self {
167 Self::splat(self.dot(rhs))
168 }
169
170 #[inline]
174 #[must_use]
175 pub fn min(self, rhs: Self) -> Self {
176 Self {
177 x: if self.x < rhs.x { self.x } else { rhs.x },
178 y: if self.y < rhs.y { self.y } else { rhs.y },
179 }
180 }
181
182 #[inline]
186 #[must_use]
187 pub fn max(self, rhs: Self) -> Self {
188 Self {
189 x: if self.x > rhs.x { self.x } else { rhs.x },
190 y: if self.y > rhs.y { self.y } else { rhs.y },
191 }
192 }
193
194 #[inline]
202 #[must_use]
203 pub fn clamp(self, min: Self, max: Self) -> Self {
204 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
205 self.max(min).min(max)
206 }
207
208 #[inline]
212 #[must_use]
213 pub fn min_element(self) -> i64 {
214 let min = |a, b| if a < b { a } else { b };
215 min(self.x, self.y)
216 }
217
218 #[inline]
222 #[must_use]
223 pub fn max_element(self) -> i64 {
224 let max = |a, b| if a > b { a } else { b };
225 max(self.x, self.y)
226 }
227
228 #[doc(alias = "argmin")]
230 #[inline]
231 #[must_use]
232 pub fn min_position(self) -> usize {
233 if self.x <= self.y {
234 0
235 } else {
236 1
237 }
238 }
239
240 #[doc(alias = "argmax")]
242 #[inline]
243 #[must_use]
244 pub fn max_position(self) -> usize {
245 if self.x >= self.y {
246 0
247 } else {
248 1
249 }
250 }
251
252 #[inline]
256 #[must_use]
257 pub fn element_sum(self) -> i64 {
258 self.x + self.y
259 }
260
261 #[inline]
265 #[must_use]
266 pub fn element_product(self) -> i64 {
267 self.x * self.y
268 }
269
270 #[inline]
276 #[must_use]
277 pub fn cmpeq(self, rhs: Self) -> BVec2 {
278 BVec2::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y))
279 }
280
281 #[inline]
287 #[must_use]
288 pub fn cmpne(self, rhs: Self) -> BVec2 {
289 BVec2::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y))
290 }
291
292 #[inline]
298 #[must_use]
299 pub fn cmpge(self, rhs: Self) -> BVec2 {
300 BVec2::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y))
301 }
302
303 #[inline]
309 #[must_use]
310 pub fn cmpgt(self, rhs: Self) -> BVec2 {
311 BVec2::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y))
312 }
313
314 #[inline]
320 #[must_use]
321 pub fn cmple(self, rhs: Self) -> BVec2 {
322 BVec2::new(self.x.le(&rhs.x), self.y.le(&rhs.y))
323 }
324
325 #[inline]
331 #[must_use]
332 pub fn cmplt(self, rhs: Self) -> BVec2 {
333 BVec2::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y))
334 }
335
336 #[inline]
338 #[must_use]
339 pub fn abs(self) -> Self {
340 Self {
341 x: self.x.abs(),
342 y: self.y.abs(),
343 }
344 }
345
346 #[inline]
352 #[must_use]
353 pub fn signum(self) -> Self {
354 Self {
355 x: self.x.signum(),
356 y: self.y.signum(),
357 }
358 }
359
360 #[inline]
368 #[must_use]
369 pub fn is_negative_bitmask(self) -> u32 {
370 (self.x.is_negative() as u32) | ((self.y.is_negative() as u32) << 1)
371 }
372
373 #[doc(alias = "magnitude2")]
375 #[inline]
376 #[must_use]
377 pub fn length_squared(self) -> i64 {
378 self.dot(self)
379 }
380
381 #[inline]
383 #[must_use]
384 pub fn distance_squared(self, rhs: Self) -> i64 {
385 (self - rhs).length_squared()
386 }
387
388 #[inline]
393 #[must_use]
394 pub fn div_euclid(self, rhs: Self) -> Self {
395 Self::new(self.x.div_euclid(rhs.x), self.y.div_euclid(rhs.y))
396 }
397
398 #[inline]
405 #[must_use]
406 pub fn rem_euclid(self, rhs: Self) -> Self {
407 Self::new(self.x.rem_euclid(rhs.x), self.y.rem_euclid(rhs.y))
408 }
409
410 #[inline]
419 #[must_use]
420 pub fn manhattan_distance(self, rhs: Self) -> u64 {
421 self.x.abs_diff(rhs.x) + self.y.abs_diff(rhs.y)
422 }
423
424 #[inline]
430 #[must_use]
431 pub fn checked_manhattan_distance(self, rhs: Self) -> Option<u64> {
432 let d = self.x.abs_diff(rhs.x);
433 d.checked_add(self.y.abs_diff(rhs.y))
434 }
435
436 #[inline]
440 #[must_use]
441 pub fn chebyshev_distance(self, rhs: Self) -> u64 {
442 [self.x.abs_diff(rhs.x), self.y.abs_diff(rhs.y)]
444 .into_iter()
445 .max()
446 .unwrap()
447 }
448
449 #[inline]
451 #[must_use]
452 pub fn perp(self) -> Self {
453 Self {
454 x: -self.y,
455 y: self.x,
456 }
457 }
458
459 #[doc(alias = "wedge")]
462 #[doc(alias = "cross")]
463 #[doc(alias = "determinant")]
464 #[inline]
465 #[must_use]
466 pub fn perp_dot(self, rhs: Self) -> i64 {
467 (self.x * rhs.y) - (self.y * rhs.x)
468 }
469
470 #[inline]
474 #[must_use]
475 pub fn rotate(self, rhs: Self) -> Self {
476 Self {
477 x: self.x * rhs.x - self.y * rhs.y,
478 y: self.y * rhs.x + self.x * rhs.y,
479 }
480 }
481
482 #[inline]
484 #[must_use]
485 pub fn as_vec2(&self) -> crate::Vec2 {
486 crate::Vec2::new(self.x as f32, self.y as f32)
487 }
488
489 #[inline]
491 #[must_use]
492 pub fn as_dvec2(&self) -> crate::DVec2 {
493 crate::DVec2::new(self.x as f64, self.y as f64)
494 }
495
496 #[inline]
498 #[must_use]
499 pub fn as_i8vec2(&self) -> crate::I8Vec2 {
500 crate::I8Vec2::new(self.x as i8, self.y as i8)
501 }
502
503 #[inline]
505 #[must_use]
506 pub fn as_u8vec2(&self) -> crate::U8Vec2 {
507 crate::U8Vec2::new(self.x as u8, self.y as u8)
508 }
509
510 #[inline]
512 #[must_use]
513 pub fn as_i16vec2(&self) -> crate::I16Vec2 {
514 crate::I16Vec2::new(self.x as i16, self.y as i16)
515 }
516
517 #[inline]
519 #[must_use]
520 pub fn as_u16vec2(&self) -> crate::U16Vec2 {
521 crate::U16Vec2::new(self.x as u16, self.y as u16)
522 }
523
524 #[inline]
526 #[must_use]
527 pub fn as_ivec2(&self) -> crate::IVec2 {
528 crate::IVec2::new(self.x as i32, self.y as i32)
529 }
530
531 #[inline]
533 #[must_use]
534 pub fn as_uvec2(&self) -> crate::UVec2 {
535 crate::UVec2::new(self.x as u32, self.y as u32)
536 }
537
538 #[inline]
540 #[must_use]
541 pub fn as_u64vec2(&self) -> crate::U64Vec2 {
542 crate::U64Vec2::new(self.x as u64, self.y as u64)
543 }
544
545 #[inline]
547 #[must_use]
548 pub fn as_usizevec2(&self) -> crate::USizeVec2 {
549 crate::USizeVec2::new(self.x as usize, self.y as usize)
550 }
551
552 #[inline]
556 #[must_use]
557 pub const fn checked_add(self, rhs: Self) -> Option<Self> {
558 let x = match self.x.checked_add(rhs.x) {
559 Some(v) => v,
560 None => return None,
561 };
562 let y = match self.y.checked_add(rhs.y) {
563 Some(v) => v,
564 None => return None,
565 };
566
567 Some(Self { x, y })
568 }
569
570 #[inline]
574 #[must_use]
575 pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
576 let x = match self.x.checked_sub(rhs.x) {
577 Some(v) => v,
578 None => return None,
579 };
580 let y = match self.y.checked_sub(rhs.y) {
581 Some(v) => v,
582 None => return None,
583 };
584
585 Some(Self { x, y })
586 }
587
588 #[inline]
592 #[must_use]
593 pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
594 let x = match self.x.checked_mul(rhs.x) {
595 Some(v) => v,
596 None => return None,
597 };
598 let y = match self.y.checked_mul(rhs.y) {
599 Some(v) => v,
600 None => return None,
601 };
602
603 Some(Self { x, y })
604 }
605
606 #[inline]
610 #[must_use]
611 pub const fn checked_div(self, rhs: Self) -> Option<Self> {
612 let x = match self.x.checked_div(rhs.x) {
613 Some(v) => v,
614 None => return None,
615 };
616 let y = match self.y.checked_div(rhs.y) {
617 Some(v) => v,
618 None => return None,
619 };
620
621 Some(Self { x, y })
622 }
623
624 #[inline]
628 #[must_use]
629 pub const fn wrapping_add(self, rhs: Self) -> Self {
630 Self {
631 x: self.x.wrapping_add(rhs.x),
632 y: self.y.wrapping_add(rhs.y),
633 }
634 }
635
636 #[inline]
640 #[must_use]
641 pub const fn wrapping_sub(self, rhs: Self) -> Self {
642 Self {
643 x: self.x.wrapping_sub(rhs.x),
644 y: self.y.wrapping_sub(rhs.y),
645 }
646 }
647
648 #[inline]
652 #[must_use]
653 pub const fn wrapping_mul(self, rhs: Self) -> Self {
654 Self {
655 x: self.x.wrapping_mul(rhs.x),
656 y: self.y.wrapping_mul(rhs.y),
657 }
658 }
659
660 #[inline]
664 #[must_use]
665 pub const fn wrapping_div(self, rhs: Self) -> Self {
666 Self {
667 x: self.x.wrapping_div(rhs.x),
668 y: self.y.wrapping_div(rhs.y),
669 }
670 }
671
672 #[inline]
676 #[must_use]
677 pub const fn saturating_add(self, rhs: Self) -> Self {
678 Self {
679 x: self.x.saturating_add(rhs.x),
680 y: self.y.saturating_add(rhs.y),
681 }
682 }
683
684 #[inline]
688 #[must_use]
689 pub const fn saturating_sub(self, rhs: Self) -> Self {
690 Self {
691 x: self.x.saturating_sub(rhs.x),
692 y: self.y.saturating_sub(rhs.y),
693 }
694 }
695
696 #[inline]
700 #[must_use]
701 pub const fn saturating_mul(self, rhs: Self) -> Self {
702 Self {
703 x: self.x.saturating_mul(rhs.x),
704 y: self.y.saturating_mul(rhs.y),
705 }
706 }
707
708 #[inline]
712 #[must_use]
713 pub const fn saturating_div(self, rhs: Self) -> Self {
714 Self {
715 x: self.x.saturating_div(rhs.x),
716 y: self.y.saturating_div(rhs.y),
717 }
718 }
719
720 #[inline]
724 #[must_use]
725 pub const fn checked_add_unsigned(self, rhs: U64Vec2) -> Option<Self> {
726 let x = match self.x.checked_add_unsigned(rhs.x) {
727 Some(v) => v,
728 None => return None,
729 };
730 let y = match self.y.checked_add_unsigned(rhs.y) {
731 Some(v) => v,
732 None => return None,
733 };
734
735 Some(Self { x, y })
736 }
737
738 #[inline]
742 #[must_use]
743 pub const fn checked_sub_unsigned(self, rhs: U64Vec2) -> Option<Self> {
744 let x = match self.x.checked_sub_unsigned(rhs.x) {
745 Some(v) => v,
746 None => return None,
747 };
748 let y = match self.y.checked_sub_unsigned(rhs.y) {
749 Some(v) => v,
750 None => return None,
751 };
752
753 Some(Self { x, y })
754 }
755
756 #[inline]
760 #[must_use]
761 pub const fn wrapping_add_unsigned(self, rhs: U64Vec2) -> Self {
762 Self {
763 x: self.x.wrapping_add_unsigned(rhs.x),
764 y: self.y.wrapping_add_unsigned(rhs.y),
765 }
766 }
767
768 #[inline]
772 #[must_use]
773 pub const fn wrapping_sub_unsigned(self, rhs: U64Vec2) -> Self {
774 Self {
775 x: self.x.wrapping_sub_unsigned(rhs.x),
776 y: self.y.wrapping_sub_unsigned(rhs.y),
777 }
778 }
779
780 #[inline]
784 #[must_use]
785 pub const fn saturating_add_unsigned(self, rhs: U64Vec2) -> Self {
786 Self {
787 x: self.x.saturating_add_unsigned(rhs.x),
788 y: self.y.saturating_add_unsigned(rhs.y),
789 }
790 }
791
792 #[inline]
796 #[must_use]
797 pub const fn saturating_sub_unsigned(self, rhs: U64Vec2) -> Self {
798 Self {
799 x: self.x.saturating_sub_unsigned(rhs.x),
800 y: self.y.saturating_sub_unsigned(rhs.y),
801 }
802 }
803}
804
805impl Default for I64Vec2 {
806 #[inline(always)]
807 fn default() -> Self {
808 Self::ZERO
809 }
810}
811
812impl Div for I64Vec2 {
813 type Output = Self;
814 #[inline]
815 fn div(self, rhs: Self) -> Self {
816 Self {
817 x: self.x.div(rhs.x),
818 y: self.y.div(rhs.y),
819 }
820 }
821}
822
823impl Div<&Self> for I64Vec2 {
824 type Output = Self;
825 #[inline]
826 fn div(self, rhs: &Self) -> Self {
827 self.div(*rhs)
828 }
829}
830
831impl Div<&I64Vec2> for &I64Vec2 {
832 type Output = I64Vec2;
833 #[inline]
834 fn div(self, rhs: &I64Vec2) -> I64Vec2 {
835 (*self).div(*rhs)
836 }
837}
838
839impl Div<I64Vec2> for &I64Vec2 {
840 type Output = I64Vec2;
841 #[inline]
842 fn div(self, rhs: I64Vec2) -> I64Vec2 {
843 (*self).div(rhs)
844 }
845}
846
847impl DivAssign for I64Vec2 {
848 #[inline]
849 fn div_assign(&mut self, rhs: Self) {
850 self.x.div_assign(rhs.x);
851 self.y.div_assign(rhs.y);
852 }
853}
854
855impl DivAssign<&Self> for I64Vec2 {
856 #[inline]
857 fn div_assign(&mut self, rhs: &Self) {
858 self.div_assign(*rhs);
859 }
860}
861
862impl Div<i64> for I64Vec2 {
863 type Output = Self;
864 #[inline]
865 fn div(self, rhs: i64) -> Self {
866 Self {
867 x: self.x.div(rhs),
868 y: self.y.div(rhs),
869 }
870 }
871}
872
873impl Div<&i64> for I64Vec2 {
874 type Output = Self;
875 #[inline]
876 fn div(self, rhs: &i64) -> Self {
877 self.div(*rhs)
878 }
879}
880
881impl Div<&i64> for &I64Vec2 {
882 type Output = I64Vec2;
883 #[inline]
884 fn div(self, rhs: &i64) -> I64Vec2 {
885 (*self).div(*rhs)
886 }
887}
888
889impl Div<i64> for &I64Vec2 {
890 type Output = I64Vec2;
891 #[inline]
892 fn div(self, rhs: i64) -> I64Vec2 {
893 (*self).div(rhs)
894 }
895}
896
897impl DivAssign<i64> for I64Vec2 {
898 #[inline]
899 fn div_assign(&mut self, rhs: i64) {
900 self.x.div_assign(rhs);
901 self.y.div_assign(rhs);
902 }
903}
904
905impl DivAssign<&i64> for I64Vec2 {
906 #[inline]
907 fn div_assign(&mut self, rhs: &i64) {
908 self.div_assign(*rhs);
909 }
910}
911
912impl Div<I64Vec2> for i64 {
913 type Output = I64Vec2;
914 #[inline]
915 fn div(self, rhs: I64Vec2) -> I64Vec2 {
916 I64Vec2 {
917 x: self.div(rhs.x),
918 y: self.div(rhs.y),
919 }
920 }
921}
922
923impl Div<&I64Vec2> for i64 {
924 type Output = I64Vec2;
925 #[inline]
926 fn div(self, rhs: &I64Vec2) -> I64Vec2 {
927 self.div(*rhs)
928 }
929}
930
931impl Div<&I64Vec2> for &i64 {
932 type Output = I64Vec2;
933 #[inline]
934 fn div(self, rhs: &I64Vec2) -> I64Vec2 {
935 (*self).div(*rhs)
936 }
937}
938
939impl Div<I64Vec2> for &i64 {
940 type Output = I64Vec2;
941 #[inline]
942 fn div(self, rhs: I64Vec2) -> I64Vec2 {
943 (*self).div(rhs)
944 }
945}
946
947impl Mul for I64Vec2 {
948 type Output = Self;
949 #[inline]
950 fn mul(self, rhs: Self) -> Self {
951 Self {
952 x: self.x.mul(rhs.x),
953 y: self.y.mul(rhs.y),
954 }
955 }
956}
957
958impl Mul<&Self> for I64Vec2 {
959 type Output = Self;
960 #[inline]
961 fn mul(self, rhs: &Self) -> Self {
962 self.mul(*rhs)
963 }
964}
965
966impl Mul<&I64Vec2> for &I64Vec2 {
967 type Output = I64Vec2;
968 #[inline]
969 fn mul(self, rhs: &I64Vec2) -> I64Vec2 {
970 (*self).mul(*rhs)
971 }
972}
973
974impl Mul<I64Vec2> for &I64Vec2 {
975 type Output = I64Vec2;
976 #[inline]
977 fn mul(self, rhs: I64Vec2) -> I64Vec2 {
978 (*self).mul(rhs)
979 }
980}
981
982impl MulAssign for I64Vec2 {
983 #[inline]
984 fn mul_assign(&mut self, rhs: Self) {
985 self.x.mul_assign(rhs.x);
986 self.y.mul_assign(rhs.y);
987 }
988}
989
990impl MulAssign<&Self> for I64Vec2 {
991 #[inline]
992 fn mul_assign(&mut self, rhs: &Self) {
993 self.mul_assign(*rhs);
994 }
995}
996
997impl Mul<i64> for I64Vec2 {
998 type Output = Self;
999 #[inline]
1000 fn mul(self, rhs: i64) -> Self {
1001 Self {
1002 x: self.x.mul(rhs),
1003 y: self.y.mul(rhs),
1004 }
1005 }
1006}
1007
1008impl Mul<&i64> for I64Vec2 {
1009 type Output = Self;
1010 #[inline]
1011 fn mul(self, rhs: &i64) -> Self {
1012 self.mul(*rhs)
1013 }
1014}
1015
1016impl Mul<&i64> for &I64Vec2 {
1017 type Output = I64Vec2;
1018 #[inline]
1019 fn mul(self, rhs: &i64) -> I64Vec2 {
1020 (*self).mul(*rhs)
1021 }
1022}
1023
1024impl Mul<i64> for &I64Vec2 {
1025 type Output = I64Vec2;
1026 #[inline]
1027 fn mul(self, rhs: i64) -> I64Vec2 {
1028 (*self).mul(rhs)
1029 }
1030}
1031
1032impl MulAssign<i64> for I64Vec2 {
1033 #[inline]
1034 fn mul_assign(&mut self, rhs: i64) {
1035 self.x.mul_assign(rhs);
1036 self.y.mul_assign(rhs);
1037 }
1038}
1039
1040impl MulAssign<&i64> for I64Vec2 {
1041 #[inline]
1042 fn mul_assign(&mut self, rhs: &i64) {
1043 self.mul_assign(*rhs);
1044 }
1045}
1046
1047impl Mul<I64Vec2> for i64 {
1048 type Output = I64Vec2;
1049 #[inline]
1050 fn mul(self, rhs: I64Vec2) -> I64Vec2 {
1051 I64Vec2 {
1052 x: self.mul(rhs.x),
1053 y: self.mul(rhs.y),
1054 }
1055 }
1056}
1057
1058impl Mul<&I64Vec2> for i64 {
1059 type Output = I64Vec2;
1060 #[inline]
1061 fn mul(self, rhs: &I64Vec2) -> I64Vec2 {
1062 self.mul(*rhs)
1063 }
1064}
1065
1066impl Mul<&I64Vec2> for &i64 {
1067 type Output = I64Vec2;
1068 #[inline]
1069 fn mul(self, rhs: &I64Vec2) -> I64Vec2 {
1070 (*self).mul(*rhs)
1071 }
1072}
1073
1074impl Mul<I64Vec2> for &i64 {
1075 type Output = I64Vec2;
1076 #[inline]
1077 fn mul(self, rhs: I64Vec2) -> I64Vec2 {
1078 (*self).mul(rhs)
1079 }
1080}
1081
1082impl Add for I64Vec2 {
1083 type Output = Self;
1084 #[inline]
1085 fn add(self, rhs: Self) -> Self {
1086 Self {
1087 x: self.x.add(rhs.x),
1088 y: self.y.add(rhs.y),
1089 }
1090 }
1091}
1092
1093impl Add<&Self> for I64Vec2 {
1094 type Output = Self;
1095 #[inline]
1096 fn add(self, rhs: &Self) -> Self {
1097 self.add(*rhs)
1098 }
1099}
1100
1101impl Add<&I64Vec2> for &I64Vec2 {
1102 type Output = I64Vec2;
1103 #[inline]
1104 fn add(self, rhs: &I64Vec2) -> I64Vec2 {
1105 (*self).add(*rhs)
1106 }
1107}
1108
1109impl Add<I64Vec2> for &I64Vec2 {
1110 type Output = I64Vec2;
1111 #[inline]
1112 fn add(self, rhs: I64Vec2) -> I64Vec2 {
1113 (*self).add(rhs)
1114 }
1115}
1116
1117impl AddAssign for I64Vec2 {
1118 #[inline]
1119 fn add_assign(&mut self, rhs: Self) {
1120 self.x.add_assign(rhs.x);
1121 self.y.add_assign(rhs.y);
1122 }
1123}
1124
1125impl AddAssign<&Self> for I64Vec2 {
1126 #[inline]
1127 fn add_assign(&mut self, rhs: &Self) {
1128 self.add_assign(*rhs);
1129 }
1130}
1131
1132impl Add<i64> for I64Vec2 {
1133 type Output = Self;
1134 #[inline]
1135 fn add(self, rhs: i64) -> Self {
1136 Self {
1137 x: self.x.add(rhs),
1138 y: self.y.add(rhs),
1139 }
1140 }
1141}
1142
1143impl Add<&i64> for I64Vec2 {
1144 type Output = Self;
1145 #[inline]
1146 fn add(self, rhs: &i64) -> Self {
1147 self.add(*rhs)
1148 }
1149}
1150
1151impl Add<&i64> for &I64Vec2 {
1152 type Output = I64Vec2;
1153 #[inline]
1154 fn add(self, rhs: &i64) -> I64Vec2 {
1155 (*self).add(*rhs)
1156 }
1157}
1158
1159impl Add<i64> for &I64Vec2 {
1160 type Output = I64Vec2;
1161 #[inline]
1162 fn add(self, rhs: i64) -> I64Vec2 {
1163 (*self).add(rhs)
1164 }
1165}
1166
1167impl AddAssign<i64> for I64Vec2 {
1168 #[inline]
1169 fn add_assign(&mut self, rhs: i64) {
1170 self.x.add_assign(rhs);
1171 self.y.add_assign(rhs);
1172 }
1173}
1174
1175impl AddAssign<&i64> for I64Vec2 {
1176 #[inline]
1177 fn add_assign(&mut self, rhs: &i64) {
1178 self.add_assign(*rhs);
1179 }
1180}
1181
1182impl Add<I64Vec2> for i64 {
1183 type Output = I64Vec2;
1184 #[inline]
1185 fn add(self, rhs: I64Vec2) -> I64Vec2 {
1186 I64Vec2 {
1187 x: self.add(rhs.x),
1188 y: self.add(rhs.y),
1189 }
1190 }
1191}
1192
1193impl Add<&I64Vec2> for i64 {
1194 type Output = I64Vec2;
1195 #[inline]
1196 fn add(self, rhs: &I64Vec2) -> I64Vec2 {
1197 self.add(*rhs)
1198 }
1199}
1200
1201impl Add<&I64Vec2> for &i64 {
1202 type Output = I64Vec2;
1203 #[inline]
1204 fn add(self, rhs: &I64Vec2) -> I64Vec2 {
1205 (*self).add(*rhs)
1206 }
1207}
1208
1209impl Add<I64Vec2> for &i64 {
1210 type Output = I64Vec2;
1211 #[inline]
1212 fn add(self, rhs: I64Vec2) -> I64Vec2 {
1213 (*self).add(rhs)
1214 }
1215}
1216
1217impl Sub for I64Vec2 {
1218 type Output = Self;
1219 #[inline]
1220 fn sub(self, rhs: Self) -> Self {
1221 Self {
1222 x: self.x.sub(rhs.x),
1223 y: self.y.sub(rhs.y),
1224 }
1225 }
1226}
1227
1228impl Sub<&Self> for I64Vec2 {
1229 type Output = Self;
1230 #[inline]
1231 fn sub(self, rhs: &Self) -> Self {
1232 self.sub(*rhs)
1233 }
1234}
1235
1236impl Sub<&I64Vec2> for &I64Vec2 {
1237 type Output = I64Vec2;
1238 #[inline]
1239 fn sub(self, rhs: &I64Vec2) -> I64Vec2 {
1240 (*self).sub(*rhs)
1241 }
1242}
1243
1244impl Sub<I64Vec2> for &I64Vec2 {
1245 type Output = I64Vec2;
1246 #[inline]
1247 fn sub(self, rhs: I64Vec2) -> I64Vec2 {
1248 (*self).sub(rhs)
1249 }
1250}
1251
1252impl SubAssign for I64Vec2 {
1253 #[inline]
1254 fn sub_assign(&mut self, rhs: Self) {
1255 self.x.sub_assign(rhs.x);
1256 self.y.sub_assign(rhs.y);
1257 }
1258}
1259
1260impl SubAssign<&Self> for I64Vec2 {
1261 #[inline]
1262 fn sub_assign(&mut self, rhs: &Self) {
1263 self.sub_assign(*rhs);
1264 }
1265}
1266
1267impl Sub<i64> for I64Vec2 {
1268 type Output = Self;
1269 #[inline]
1270 fn sub(self, rhs: i64) -> Self {
1271 Self {
1272 x: self.x.sub(rhs),
1273 y: self.y.sub(rhs),
1274 }
1275 }
1276}
1277
1278impl Sub<&i64> for I64Vec2 {
1279 type Output = Self;
1280 #[inline]
1281 fn sub(self, rhs: &i64) -> Self {
1282 self.sub(*rhs)
1283 }
1284}
1285
1286impl Sub<&i64> for &I64Vec2 {
1287 type Output = I64Vec2;
1288 #[inline]
1289 fn sub(self, rhs: &i64) -> I64Vec2 {
1290 (*self).sub(*rhs)
1291 }
1292}
1293
1294impl Sub<i64> for &I64Vec2 {
1295 type Output = I64Vec2;
1296 #[inline]
1297 fn sub(self, rhs: i64) -> I64Vec2 {
1298 (*self).sub(rhs)
1299 }
1300}
1301
1302impl SubAssign<i64> for I64Vec2 {
1303 #[inline]
1304 fn sub_assign(&mut self, rhs: i64) {
1305 self.x.sub_assign(rhs);
1306 self.y.sub_assign(rhs);
1307 }
1308}
1309
1310impl SubAssign<&i64> for I64Vec2 {
1311 #[inline]
1312 fn sub_assign(&mut self, rhs: &i64) {
1313 self.sub_assign(*rhs);
1314 }
1315}
1316
1317impl Sub<I64Vec2> for i64 {
1318 type Output = I64Vec2;
1319 #[inline]
1320 fn sub(self, rhs: I64Vec2) -> I64Vec2 {
1321 I64Vec2 {
1322 x: self.sub(rhs.x),
1323 y: self.sub(rhs.y),
1324 }
1325 }
1326}
1327
1328impl Sub<&I64Vec2> for i64 {
1329 type Output = I64Vec2;
1330 #[inline]
1331 fn sub(self, rhs: &I64Vec2) -> I64Vec2 {
1332 self.sub(*rhs)
1333 }
1334}
1335
1336impl Sub<&I64Vec2> for &i64 {
1337 type Output = I64Vec2;
1338 #[inline]
1339 fn sub(self, rhs: &I64Vec2) -> I64Vec2 {
1340 (*self).sub(*rhs)
1341 }
1342}
1343
1344impl Sub<I64Vec2> for &i64 {
1345 type Output = I64Vec2;
1346 #[inline]
1347 fn sub(self, rhs: I64Vec2) -> I64Vec2 {
1348 (*self).sub(rhs)
1349 }
1350}
1351
1352impl Rem for I64Vec2 {
1353 type Output = Self;
1354 #[inline]
1355 fn rem(self, rhs: Self) -> Self {
1356 Self {
1357 x: self.x.rem(rhs.x),
1358 y: self.y.rem(rhs.y),
1359 }
1360 }
1361}
1362
1363impl Rem<&Self> for I64Vec2 {
1364 type Output = Self;
1365 #[inline]
1366 fn rem(self, rhs: &Self) -> Self {
1367 self.rem(*rhs)
1368 }
1369}
1370
1371impl Rem<&I64Vec2> for &I64Vec2 {
1372 type Output = I64Vec2;
1373 #[inline]
1374 fn rem(self, rhs: &I64Vec2) -> I64Vec2 {
1375 (*self).rem(*rhs)
1376 }
1377}
1378
1379impl Rem<I64Vec2> for &I64Vec2 {
1380 type Output = I64Vec2;
1381 #[inline]
1382 fn rem(self, rhs: I64Vec2) -> I64Vec2 {
1383 (*self).rem(rhs)
1384 }
1385}
1386
1387impl RemAssign for I64Vec2 {
1388 #[inline]
1389 fn rem_assign(&mut self, rhs: Self) {
1390 self.x.rem_assign(rhs.x);
1391 self.y.rem_assign(rhs.y);
1392 }
1393}
1394
1395impl RemAssign<&Self> for I64Vec2 {
1396 #[inline]
1397 fn rem_assign(&mut self, rhs: &Self) {
1398 self.rem_assign(*rhs);
1399 }
1400}
1401
1402impl Rem<i64> for I64Vec2 {
1403 type Output = Self;
1404 #[inline]
1405 fn rem(self, rhs: i64) -> Self {
1406 Self {
1407 x: self.x.rem(rhs),
1408 y: self.y.rem(rhs),
1409 }
1410 }
1411}
1412
1413impl Rem<&i64> for I64Vec2 {
1414 type Output = Self;
1415 #[inline]
1416 fn rem(self, rhs: &i64) -> Self {
1417 self.rem(*rhs)
1418 }
1419}
1420
1421impl Rem<&i64> for &I64Vec2 {
1422 type Output = I64Vec2;
1423 #[inline]
1424 fn rem(self, rhs: &i64) -> I64Vec2 {
1425 (*self).rem(*rhs)
1426 }
1427}
1428
1429impl Rem<i64> for &I64Vec2 {
1430 type Output = I64Vec2;
1431 #[inline]
1432 fn rem(self, rhs: i64) -> I64Vec2 {
1433 (*self).rem(rhs)
1434 }
1435}
1436
1437impl RemAssign<i64> for I64Vec2 {
1438 #[inline]
1439 fn rem_assign(&mut self, rhs: i64) {
1440 self.x.rem_assign(rhs);
1441 self.y.rem_assign(rhs);
1442 }
1443}
1444
1445impl RemAssign<&i64> for I64Vec2 {
1446 #[inline]
1447 fn rem_assign(&mut self, rhs: &i64) {
1448 self.rem_assign(*rhs);
1449 }
1450}
1451
1452impl Rem<I64Vec2> for i64 {
1453 type Output = I64Vec2;
1454 #[inline]
1455 fn rem(self, rhs: I64Vec2) -> I64Vec2 {
1456 I64Vec2 {
1457 x: self.rem(rhs.x),
1458 y: self.rem(rhs.y),
1459 }
1460 }
1461}
1462
1463impl Rem<&I64Vec2> for i64 {
1464 type Output = I64Vec2;
1465 #[inline]
1466 fn rem(self, rhs: &I64Vec2) -> I64Vec2 {
1467 self.rem(*rhs)
1468 }
1469}
1470
1471impl Rem<&I64Vec2> for &i64 {
1472 type Output = I64Vec2;
1473 #[inline]
1474 fn rem(self, rhs: &I64Vec2) -> I64Vec2 {
1475 (*self).rem(*rhs)
1476 }
1477}
1478
1479impl Rem<I64Vec2> for &i64 {
1480 type Output = I64Vec2;
1481 #[inline]
1482 fn rem(self, rhs: I64Vec2) -> I64Vec2 {
1483 (*self).rem(rhs)
1484 }
1485}
1486
1487#[cfg(not(target_arch = "spirv"))]
1488impl AsRef<[i64; 2]> for I64Vec2 {
1489 #[inline]
1490 fn as_ref(&self) -> &[i64; 2] {
1491 unsafe { &*(self as *const Self as *const [i64; 2]) }
1492 }
1493}
1494
1495#[cfg(not(target_arch = "spirv"))]
1496impl AsMut<[i64; 2]> for I64Vec2 {
1497 #[inline]
1498 fn as_mut(&mut self) -> &mut [i64; 2] {
1499 unsafe { &mut *(self as *mut Self as *mut [i64; 2]) }
1500 }
1501}
1502
1503impl Sum for I64Vec2 {
1504 #[inline]
1505 fn sum<I>(iter: I) -> Self
1506 where
1507 I: Iterator<Item = Self>,
1508 {
1509 iter.fold(Self::ZERO, Self::add)
1510 }
1511}
1512
1513impl<'a> Sum<&'a Self> for I64Vec2 {
1514 #[inline]
1515 fn sum<I>(iter: I) -> Self
1516 where
1517 I: Iterator<Item = &'a Self>,
1518 {
1519 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1520 }
1521}
1522
1523impl Product for I64Vec2 {
1524 #[inline]
1525 fn product<I>(iter: I) -> Self
1526 where
1527 I: Iterator<Item = Self>,
1528 {
1529 iter.fold(Self::ONE, Self::mul)
1530 }
1531}
1532
1533impl<'a> Product<&'a Self> for I64Vec2 {
1534 #[inline]
1535 fn product<I>(iter: I) -> Self
1536 where
1537 I: Iterator<Item = &'a Self>,
1538 {
1539 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1540 }
1541}
1542
1543impl Neg for I64Vec2 {
1544 type Output = Self;
1545 #[inline]
1546 fn neg(self) -> Self {
1547 Self {
1548 x: self.x.neg(),
1549 y: self.y.neg(),
1550 }
1551 }
1552}
1553
1554impl Neg for &I64Vec2 {
1555 type Output = I64Vec2;
1556 #[inline]
1557 fn neg(self) -> I64Vec2 {
1558 (*self).neg()
1559 }
1560}
1561
1562impl Not for I64Vec2 {
1563 type Output = Self;
1564 #[inline]
1565 fn not(self) -> Self {
1566 Self {
1567 x: self.x.not(),
1568 y: self.y.not(),
1569 }
1570 }
1571}
1572
1573impl Not for &I64Vec2 {
1574 type Output = I64Vec2;
1575 #[inline]
1576 fn not(self) -> I64Vec2 {
1577 (*self).not()
1578 }
1579}
1580
1581impl BitAnd for I64Vec2 {
1582 type Output = Self;
1583 #[inline]
1584 fn bitand(self, rhs: Self) -> Self::Output {
1585 Self {
1586 x: self.x.bitand(rhs.x),
1587 y: self.y.bitand(rhs.y),
1588 }
1589 }
1590}
1591
1592impl BitAnd<&Self> for I64Vec2 {
1593 type Output = Self;
1594 #[inline]
1595 fn bitand(self, rhs: &Self) -> Self {
1596 self.bitand(*rhs)
1597 }
1598}
1599
1600impl BitAnd<&I64Vec2> for &I64Vec2 {
1601 type Output = I64Vec2;
1602 #[inline]
1603 fn bitand(self, rhs: &I64Vec2) -> I64Vec2 {
1604 (*self).bitand(*rhs)
1605 }
1606}
1607
1608impl BitAnd<I64Vec2> for &I64Vec2 {
1609 type Output = I64Vec2;
1610 #[inline]
1611 fn bitand(self, rhs: I64Vec2) -> I64Vec2 {
1612 (*self).bitand(rhs)
1613 }
1614}
1615
1616impl BitAndAssign for I64Vec2 {
1617 #[inline]
1618 fn bitand_assign(&mut self, rhs: Self) {
1619 *self = self.bitand(rhs);
1620 }
1621}
1622
1623impl BitAndAssign<&Self> for I64Vec2 {
1624 #[inline]
1625 fn bitand_assign(&mut self, rhs: &Self) {
1626 self.bitand_assign(*rhs);
1627 }
1628}
1629
1630impl BitOr for I64Vec2 {
1631 type Output = Self;
1632 #[inline]
1633 fn bitor(self, rhs: Self) -> Self::Output {
1634 Self {
1635 x: self.x.bitor(rhs.x),
1636 y: self.y.bitor(rhs.y),
1637 }
1638 }
1639}
1640
1641impl BitOr<&Self> for I64Vec2 {
1642 type Output = Self;
1643 #[inline]
1644 fn bitor(self, rhs: &Self) -> Self {
1645 self.bitor(*rhs)
1646 }
1647}
1648
1649impl BitOr<&I64Vec2> for &I64Vec2 {
1650 type Output = I64Vec2;
1651 #[inline]
1652 fn bitor(self, rhs: &I64Vec2) -> I64Vec2 {
1653 (*self).bitor(*rhs)
1654 }
1655}
1656
1657impl BitOr<I64Vec2> for &I64Vec2 {
1658 type Output = I64Vec2;
1659 #[inline]
1660 fn bitor(self, rhs: I64Vec2) -> I64Vec2 {
1661 (*self).bitor(rhs)
1662 }
1663}
1664
1665impl BitOrAssign for I64Vec2 {
1666 #[inline]
1667 fn bitor_assign(&mut self, rhs: Self) {
1668 *self = self.bitor(rhs);
1669 }
1670}
1671
1672impl BitOrAssign<&Self> for I64Vec2 {
1673 #[inline]
1674 fn bitor_assign(&mut self, rhs: &Self) {
1675 self.bitor_assign(*rhs);
1676 }
1677}
1678
1679impl BitXor for I64Vec2 {
1680 type Output = Self;
1681 #[inline]
1682 fn bitxor(self, rhs: Self) -> Self::Output {
1683 Self {
1684 x: self.x.bitxor(rhs.x),
1685 y: self.y.bitxor(rhs.y),
1686 }
1687 }
1688}
1689
1690impl BitXor<&Self> for I64Vec2 {
1691 type Output = Self;
1692 #[inline]
1693 fn bitxor(self, rhs: &Self) -> Self {
1694 self.bitxor(*rhs)
1695 }
1696}
1697
1698impl BitXor<&I64Vec2> for &I64Vec2 {
1699 type Output = I64Vec2;
1700 #[inline]
1701 fn bitxor(self, rhs: &I64Vec2) -> I64Vec2 {
1702 (*self).bitxor(*rhs)
1703 }
1704}
1705
1706impl BitXor<I64Vec2> for &I64Vec2 {
1707 type Output = I64Vec2;
1708 #[inline]
1709 fn bitxor(self, rhs: I64Vec2) -> I64Vec2 {
1710 (*self).bitxor(rhs)
1711 }
1712}
1713
1714impl BitXorAssign for I64Vec2 {
1715 #[inline]
1716 fn bitxor_assign(&mut self, rhs: Self) {
1717 *self = self.bitxor(rhs);
1718 }
1719}
1720
1721impl BitXorAssign<&Self> for I64Vec2 {
1722 #[inline]
1723 fn bitxor_assign(&mut self, rhs: &Self) {
1724 self.bitxor_assign(*rhs);
1725 }
1726}
1727
1728impl BitAnd<i64> for I64Vec2 {
1729 type Output = Self;
1730 #[inline]
1731 fn bitand(self, rhs: i64) -> Self::Output {
1732 Self {
1733 x: self.x.bitand(rhs),
1734 y: self.y.bitand(rhs),
1735 }
1736 }
1737}
1738
1739impl BitAnd<&i64> for I64Vec2 {
1740 type Output = Self;
1741 #[inline]
1742 fn bitand(self, rhs: &i64) -> Self {
1743 self.bitand(*rhs)
1744 }
1745}
1746
1747impl BitAnd<&i64> for &I64Vec2 {
1748 type Output = I64Vec2;
1749 #[inline]
1750 fn bitand(self, rhs: &i64) -> I64Vec2 {
1751 (*self).bitand(*rhs)
1752 }
1753}
1754
1755impl BitAnd<i64> for &I64Vec2 {
1756 type Output = I64Vec2;
1757 #[inline]
1758 fn bitand(self, rhs: i64) -> I64Vec2 {
1759 (*self).bitand(rhs)
1760 }
1761}
1762
1763impl BitAndAssign<i64> for I64Vec2 {
1764 #[inline]
1765 fn bitand_assign(&mut self, rhs: i64) {
1766 *self = self.bitand(rhs);
1767 }
1768}
1769
1770impl BitAndAssign<&i64> for I64Vec2 {
1771 #[inline]
1772 fn bitand_assign(&mut self, rhs: &i64) {
1773 self.bitand_assign(*rhs);
1774 }
1775}
1776
1777impl BitOr<i64> for I64Vec2 {
1778 type Output = Self;
1779 #[inline]
1780 fn bitor(self, rhs: i64) -> Self::Output {
1781 Self {
1782 x: self.x.bitor(rhs),
1783 y: self.y.bitor(rhs),
1784 }
1785 }
1786}
1787
1788impl BitOr<&i64> for I64Vec2 {
1789 type Output = Self;
1790 #[inline]
1791 fn bitor(self, rhs: &i64) -> Self {
1792 self.bitor(*rhs)
1793 }
1794}
1795
1796impl BitOr<&i64> for &I64Vec2 {
1797 type Output = I64Vec2;
1798 #[inline]
1799 fn bitor(self, rhs: &i64) -> I64Vec2 {
1800 (*self).bitor(*rhs)
1801 }
1802}
1803
1804impl BitOr<i64> for &I64Vec2 {
1805 type Output = I64Vec2;
1806 #[inline]
1807 fn bitor(self, rhs: i64) -> I64Vec2 {
1808 (*self).bitor(rhs)
1809 }
1810}
1811
1812impl BitOrAssign<i64> for I64Vec2 {
1813 #[inline]
1814 fn bitor_assign(&mut self, rhs: i64) {
1815 *self = self.bitor(rhs);
1816 }
1817}
1818
1819impl BitOrAssign<&i64> for I64Vec2 {
1820 #[inline]
1821 fn bitor_assign(&mut self, rhs: &i64) {
1822 self.bitor_assign(*rhs);
1823 }
1824}
1825
1826impl BitXor<i64> for I64Vec2 {
1827 type Output = Self;
1828 #[inline]
1829 fn bitxor(self, rhs: i64) -> Self::Output {
1830 Self {
1831 x: self.x.bitxor(rhs),
1832 y: self.y.bitxor(rhs),
1833 }
1834 }
1835}
1836
1837impl BitXor<&i64> for I64Vec2 {
1838 type Output = Self;
1839 #[inline]
1840 fn bitxor(self, rhs: &i64) -> Self {
1841 self.bitxor(*rhs)
1842 }
1843}
1844
1845impl BitXor<&i64> for &I64Vec2 {
1846 type Output = I64Vec2;
1847 #[inline]
1848 fn bitxor(self, rhs: &i64) -> I64Vec2 {
1849 (*self).bitxor(*rhs)
1850 }
1851}
1852
1853impl BitXor<i64> for &I64Vec2 {
1854 type Output = I64Vec2;
1855 #[inline]
1856 fn bitxor(self, rhs: i64) -> I64Vec2 {
1857 (*self).bitxor(rhs)
1858 }
1859}
1860
1861impl BitXorAssign<i64> for I64Vec2 {
1862 #[inline]
1863 fn bitxor_assign(&mut self, rhs: i64) {
1864 *self = self.bitxor(rhs);
1865 }
1866}
1867
1868impl BitXorAssign<&i64> for I64Vec2 {
1869 #[inline]
1870 fn bitxor_assign(&mut self, rhs: &i64) {
1871 self.bitxor_assign(*rhs);
1872 }
1873}
1874
1875impl Shl<i8> for I64Vec2 {
1876 type Output = Self;
1877 #[inline]
1878 fn shl(self, rhs: i8) -> Self::Output {
1879 Self {
1880 x: self.x.shl(rhs),
1881 y: self.y.shl(rhs),
1882 }
1883 }
1884}
1885
1886impl Shl<&i8> for I64Vec2 {
1887 type Output = Self;
1888 #[inline]
1889 fn shl(self, rhs: &i8) -> Self {
1890 self.shl(*rhs)
1891 }
1892}
1893
1894impl Shl<&i8> for &I64Vec2 {
1895 type Output = I64Vec2;
1896 #[inline]
1897 fn shl(self, rhs: &i8) -> I64Vec2 {
1898 (*self).shl(*rhs)
1899 }
1900}
1901
1902impl Shl<i8> for &I64Vec2 {
1903 type Output = I64Vec2;
1904 #[inline]
1905 fn shl(self, rhs: i8) -> I64Vec2 {
1906 (*self).shl(rhs)
1907 }
1908}
1909
1910impl ShlAssign<i8> for I64Vec2 {
1911 #[inline]
1912 fn shl_assign(&mut self, rhs: i8) {
1913 *self = self.shl(rhs);
1914 }
1915}
1916
1917impl ShlAssign<&i8> for I64Vec2 {
1918 #[inline]
1919 fn shl_assign(&mut self, rhs: &i8) {
1920 self.shl_assign(*rhs);
1921 }
1922}
1923
1924impl Shr<i8> for I64Vec2 {
1925 type Output = Self;
1926 #[inline]
1927 fn shr(self, rhs: i8) -> Self::Output {
1928 Self {
1929 x: self.x.shr(rhs),
1930 y: self.y.shr(rhs),
1931 }
1932 }
1933}
1934
1935impl Shr<&i8> for I64Vec2 {
1936 type Output = Self;
1937 #[inline]
1938 fn shr(self, rhs: &i8) -> Self {
1939 self.shr(*rhs)
1940 }
1941}
1942
1943impl Shr<&i8> for &I64Vec2 {
1944 type Output = I64Vec2;
1945 #[inline]
1946 fn shr(self, rhs: &i8) -> I64Vec2 {
1947 (*self).shr(*rhs)
1948 }
1949}
1950
1951impl Shr<i8> for &I64Vec2 {
1952 type Output = I64Vec2;
1953 #[inline]
1954 fn shr(self, rhs: i8) -> I64Vec2 {
1955 (*self).shr(rhs)
1956 }
1957}
1958
1959impl ShrAssign<i8> for I64Vec2 {
1960 #[inline]
1961 fn shr_assign(&mut self, rhs: i8) {
1962 *self = self.shr(rhs);
1963 }
1964}
1965
1966impl ShrAssign<&i8> for I64Vec2 {
1967 #[inline]
1968 fn shr_assign(&mut self, rhs: &i8) {
1969 self.shr_assign(*rhs);
1970 }
1971}
1972
1973impl Shl<i16> for I64Vec2 {
1974 type Output = Self;
1975 #[inline]
1976 fn shl(self, rhs: i16) -> Self::Output {
1977 Self {
1978 x: self.x.shl(rhs),
1979 y: self.y.shl(rhs),
1980 }
1981 }
1982}
1983
1984impl Shl<&i16> for I64Vec2 {
1985 type Output = Self;
1986 #[inline]
1987 fn shl(self, rhs: &i16) -> Self {
1988 self.shl(*rhs)
1989 }
1990}
1991
1992impl Shl<&i16> for &I64Vec2 {
1993 type Output = I64Vec2;
1994 #[inline]
1995 fn shl(self, rhs: &i16) -> I64Vec2 {
1996 (*self).shl(*rhs)
1997 }
1998}
1999
2000impl Shl<i16> for &I64Vec2 {
2001 type Output = I64Vec2;
2002 #[inline]
2003 fn shl(self, rhs: i16) -> I64Vec2 {
2004 (*self).shl(rhs)
2005 }
2006}
2007
2008impl ShlAssign<i16> for I64Vec2 {
2009 #[inline]
2010 fn shl_assign(&mut self, rhs: i16) {
2011 *self = self.shl(rhs);
2012 }
2013}
2014
2015impl ShlAssign<&i16> for I64Vec2 {
2016 #[inline]
2017 fn shl_assign(&mut self, rhs: &i16) {
2018 self.shl_assign(*rhs);
2019 }
2020}
2021
2022impl Shr<i16> for I64Vec2 {
2023 type Output = Self;
2024 #[inline]
2025 fn shr(self, rhs: i16) -> Self::Output {
2026 Self {
2027 x: self.x.shr(rhs),
2028 y: self.y.shr(rhs),
2029 }
2030 }
2031}
2032
2033impl Shr<&i16> for I64Vec2 {
2034 type Output = Self;
2035 #[inline]
2036 fn shr(self, rhs: &i16) -> Self {
2037 self.shr(*rhs)
2038 }
2039}
2040
2041impl Shr<&i16> for &I64Vec2 {
2042 type Output = I64Vec2;
2043 #[inline]
2044 fn shr(self, rhs: &i16) -> I64Vec2 {
2045 (*self).shr(*rhs)
2046 }
2047}
2048
2049impl Shr<i16> for &I64Vec2 {
2050 type Output = I64Vec2;
2051 #[inline]
2052 fn shr(self, rhs: i16) -> I64Vec2 {
2053 (*self).shr(rhs)
2054 }
2055}
2056
2057impl ShrAssign<i16> for I64Vec2 {
2058 #[inline]
2059 fn shr_assign(&mut self, rhs: i16) {
2060 *self = self.shr(rhs);
2061 }
2062}
2063
2064impl ShrAssign<&i16> for I64Vec2 {
2065 #[inline]
2066 fn shr_assign(&mut self, rhs: &i16) {
2067 self.shr_assign(*rhs);
2068 }
2069}
2070
2071impl Shl<i32> for I64Vec2 {
2072 type Output = Self;
2073 #[inline]
2074 fn shl(self, rhs: i32) -> Self::Output {
2075 Self {
2076 x: self.x.shl(rhs),
2077 y: self.y.shl(rhs),
2078 }
2079 }
2080}
2081
2082impl Shl<&i32> for I64Vec2 {
2083 type Output = Self;
2084 #[inline]
2085 fn shl(self, rhs: &i32) -> Self {
2086 self.shl(*rhs)
2087 }
2088}
2089
2090impl Shl<&i32> for &I64Vec2 {
2091 type Output = I64Vec2;
2092 #[inline]
2093 fn shl(self, rhs: &i32) -> I64Vec2 {
2094 (*self).shl(*rhs)
2095 }
2096}
2097
2098impl Shl<i32> for &I64Vec2 {
2099 type Output = I64Vec2;
2100 #[inline]
2101 fn shl(self, rhs: i32) -> I64Vec2 {
2102 (*self).shl(rhs)
2103 }
2104}
2105
2106impl ShlAssign<i32> for I64Vec2 {
2107 #[inline]
2108 fn shl_assign(&mut self, rhs: i32) {
2109 *self = self.shl(rhs);
2110 }
2111}
2112
2113impl ShlAssign<&i32> for I64Vec2 {
2114 #[inline]
2115 fn shl_assign(&mut self, rhs: &i32) {
2116 self.shl_assign(*rhs);
2117 }
2118}
2119
2120impl Shr<i32> for I64Vec2 {
2121 type Output = Self;
2122 #[inline]
2123 fn shr(self, rhs: i32) -> Self::Output {
2124 Self {
2125 x: self.x.shr(rhs),
2126 y: self.y.shr(rhs),
2127 }
2128 }
2129}
2130
2131impl Shr<&i32> for I64Vec2 {
2132 type Output = Self;
2133 #[inline]
2134 fn shr(self, rhs: &i32) -> Self {
2135 self.shr(*rhs)
2136 }
2137}
2138
2139impl Shr<&i32> for &I64Vec2 {
2140 type Output = I64Vec2;
2141 #[inline]
2142 fn shr(self, rhs: &i32) -> I64Vec2 {
2143 (*self).shr(*rhs)
2144 }
2145}
2146
2147impl Shr<i32> for &I64Vec2 {
2148 type Output = I64Vec2;
2149 #[inline]
2150 fn shr(self, rhs: i32) -> I64Vec2 {
2151 (*self).shr(rhs)
2152 }
2153}
2154
2155impl ShrAssign<i32> for I64Vec2 {
2156 #[inline]
2157 fn shr_assign(&mut self, rhs: i32) {
2158 *self = self.shr(rhs);
2159 }
2160}
2161
2162impl ShrAssign<&i32> for I64Vec2 {
2163 #[inline]
2164 fn shr_assign(&mut self, rhs: &i32) {
2165 self.shr_assign(*rhs);
2166 }
2167}
2168
2169impl Shl<i64> for I64Vec2 {
2170 type Output = Self;
2171 #[inline]
2172 fn shl(self, rhs: i64) -> Self::Output {
2173 Self {
2174 x: self.x.shl(rhs),
2175 y: self.y.shl(rhs),
2176 }
2177 }
2178}
2179
2180impl Shl<&i64> for I64Vec2 {
2181 type Output = Self;
2182 #[inline]
2183 fn shl(self, rhs: &i64) -> Self {
2184 self.shl(*rhs)
2185 }
2186}
2187
2188impl Shl<&i64> for &I64Vec2 {
2189 type Output = I64Vec2;
2190 #[inline]
2191 fn shl(self, rhs: &i64) -> I64Vec2 {
2192 (*self).shl(*rhs)
2193 }
2194}
2195
2196impl Shl<i64> for &I64Vec2 {
2197 type Output = I64Vec2;
2198 #[inline]
2199 fn shl(self, rhs: i64) -> I64Vec2 {
2200 (*self).shl(rhs)
2201 }
2202}
2203
2204impl ShlAssign<i64> for I64Vec2 {
2205 #[inline]
2206 fn shl_assign(&mut self, rhs: i64) {
2207 *self = self.shl(rhs);
2208 }
2209}
2210
2211impl ShlAssign<&i64> for I64Vec2 {
2212 #[inline]
2213 fn shl_assign(&mut self, rhs: &i64) {
2214 self.shl_assign(*rhs);
2215 }
2216}
2217
2218impl Shr<i64> for I64Vec2 {
2219 type Output = Self;
2220 #[inline]
2221 fn shr(self, rhs: i64) -> Self::Output {
2222 Self {
2223 x: self.x.shr(rhs),
2224 y: self.y.shr(rhs),
2225 }
2226 }
2227}
2228
2229impl Shr<&i64> for I64Vec2 {
2230 type Output = Self;
2231 #[inline]
2232 fn shr(self, rhs: &i64) -> Self {
2233 self.shr(*rhs)
2234 }
2235}
2236
2237impl Shr<&i64> for &I64Vec2 {
2238 type Output = I64Vec2;
2239 #[inline]
2240 fn shr(self, rhs: &i64) -> I64Vec2 {
2241 (*self).shr(*rhs)
2242 }
2243}
2244
2245impl Shr<i64> for &I64Vec2 {
2246 type Output = I64Vec2;
2247 #[inline]
2248 fn shr(self, rhs: i64) -> I64Vec2 {
2249 (*self).shr(rhs)
2250 }
2251}
2252
2253impl ShrAssign<i64> for I64Vec2 {
2254 #[inline]
2255 fn shr_assign(&mut self, rhs: i64) {
2256 *self = self.shr(rhs);
2257 }
2258}
2259
2260impl ShrAssign<&i64> for I64Vec2 {
2261 #[inline]
2262 fn shr_assign(&mut self, rhs: &i64) {
2263 self.shr_assign(*rhs);
2264 }
2265}
2266
2267impl Shl<u8> for I64Vec2 {
2268 type Output = Self;
2269 #[inline]
2270 fn shl(self, rhs: u8) -> Self::Output {
2271 Self {
2272 x: self.x.shl(rhs),
2273 y: self.y.shl(rhs),
2274 }
2275 }
2276}
2277
2278impl Shl<&u8> for I64Vec2 {
2279 type Output = Self;
2280 #[inline]
2281 fn shl(self, rhs: &u8) -> Self {
2282 self.shl(*rhs)
2283 }
2284}
2285
2286impl Shl<&u8> for &I64Vec2 {
2287 type Output = I64Vec2;
2288 #[inline]
2289 fn shl(self, rhs: &u8) -> I64Vec2 {
2290 (*self).shl(*rhs)
2291 }
2292}
2293
2294impl Shl<u8> for &I64Vec2 {
2295 type Output = I64Vec2;
2296 #[inline]
2297 fn shl(self, rhs: u8) -> I64Vec2 {
2298 (*self).shl(rhs)
2299 }
2300}
2301
2302impl ShlAssign<u8> for I64Vec2 {
2303 #[inline]
2304 fn shl_assign(&mut self, rhs: u8) {
2305 *self = self.shl(rhs);
2306 }
2307}
2308
2309impl ShlAssign<&u8> for I64Vec2 {
2310 #[inline]
2311 fn shl_assign(&mut self, rhs: &u8) {
2312 self.shl_assign(*rhs);
2313 }
2314}
2315
2316impl Shr<u8> for I64Vec2 {
2317 type Output = Self;
2318 #[inline]
2319 fn shr(self, rhs: u8) -> Self::Output {
2320 Self {
2321 x: self.x.shr(rhs),
2322 y: self.y.shr(rhs),
2323 }
2324 }
2325}
2326
2327impl Shr<&u8> for I64Vec2 {
2328 type Output = Self;
2329 #[inline]
2330 fn shr(self, rhs: &u8) -> Self {
2331 self.shr(*rhs)
2332 }
2333}
2334
2335impl Shr<&u8> for &I64Vec2 {
2336 type Output = I64Vec2;
2337 #[inline]
2338 fn shr(self, rhs: &u8) -> I64Vec2 {
2339 (*self).shr(*rhs)
2340 }
2341}
2342
2343impl Shr<u8> for &I64Vec2 {
2344 type Output = I64Vec2;
2345 #[inline]
2346 fn shr(self, rhs: u8) -> I64Vec2 {
2347 (*self).shr(rhs)
2348 }
2349}
2350
2351impl ShrAssign<u8> for I64Vec2 {
2352 #[inline]
2353 fn shr_assign(&mut self, rhs: u8) {
2354 *self = self.shr(rhs);
2355 }
2356}
2357
2358impl ShrAssign<&u8> for I64Vec2 {
2359 #[inline]
2360 fn shr_assign(&mut self, rhs: &u8) {
2361 self.shr_assign(*rhs);
2362 }
2363}
2364
2365impl Shl<u16> for I64Vec2 {
2366 type Output = Self;
2367 #[inline]
2368 fn shl(self, rhs: u16) -> Self::Output {
2369 Self {
2370 x: self.x.shl(rhs),
2371 y: self.y.shl(rhs),
2372 }
2373 }
2374}
2375
2376impl Shl<&u16> for I64Vec2 {
2377 type Output = Self;
2378 #[inline]
2379 fn shl(self, rhs: &u16) -> Self {
2380 self.shl(*rhs)
2381 }
2382}
2383
2384impl Shl<&u16> for &I64Vec2 {
2385 type Output = I64Vec2;
2386 #[inline]
2387 fn shl(self, rhs: &u16) -> I64Vec2 {
2388 (*self).shl(*rhs)
2389 }
2390}
2391
2392impl Shl<u16> for &I64Vec2 {
2393 type Output = I64Vec2;
2394 #[inline]
2395 fn shl(self, rhs: u16) -> I64Vec2 {
2396 (*self).shl(rhs)
2397 }
2398}
2399
2400impl ShlAssign<u16> for I64Vec2 {
2401 #[inline]
2402 fn shl_assign(&mut self, rhs: u16) {
2403 *self = self.shl(rhs);
2404 }
2405}
2406
2407impl ShlAssign<&u16> for I64Vec2 {
2408 #[inline]
2409 fn shl_assign(&mut self, rhs: &u16) {
2410 self.shl_assign(*rhs);
2411 }
2412}
2413
2414impl Shr<u16> for I64Vec2 {
2415 type Output = Self;
2416 #[inline]
2417 fn shr(self, rhs: u16) -> Self::Output {
2418 Self {
2419 x: self.x.shr(rhs),
2420 y: self.y.shr(rhs),
2421 }
2422 }
2423}
2424
2425impl Shr<&u16> for I64Vec2 {
2426 type Output = Self;
2427 #[inline]
2428 fn shr(self, rhs: &u16) -> Self {
2429 self.shr(*rhs)
2430 }
2431}
2432
2433impl Shr<&u16> for &I64Vec2 {
2434 type Output = I64Vec2;
2435 #[inline]
2436 fn shr(self, rhs: &u16) -> I64Vec2 {
2437 (*self).shr(*rhs)
2438 }
2439}
2440
2441impl Shr<u16> for &I64Vec2 {
2442 type Output = I64Vec2;
2443 #[inline]
2444 fn shr(self, rhs: u16) -> I64Vec2 {
2445 (*self).shr(rhs)
2446 }
2447}
2448
2449impl ShrAssign<u16> for I64Vec2 {
2450 #[inline]
2451 fn shr_assign(&mut self, rhs: u16) {
2452 *self = self.shr(rhs);
2453 }
2454}
2455
2456impl ShrAssign<&u16> for I64Vec2 {
2457 #[inline]
2458 fn shr_assign(&mut self, rhs: &u16) {
2459 self.shr_assign(*rhs);
2460 }
2461}
2462
2463impl Shl<u32> for I64Vec2 {
2464 type Output = Self;
2465 #[inline]
2466 fn shl(self, rhs: u32) -> Self::Output {
2467 Self {
2468 x: self.x.shl(rhs),
2469 y: self.y.shl(rhs),
2470 }
2471 }
2472}
2473
2474impl Shl<&u32> for I64Vec2 {
2475 type Output = Self;
2476 #[inline]
2477 fn shl(self, rhs: &u32) -> Self {
2478 self.shl(*rhs)
2479 }
2480}
2481
2482impl Shl<&u32> for &I64Vec2 {
2483 type Output = I64Vec2;
2484 #[inline]
2485 fn shl(self, rhs: &u32) -> I64Vec2 {
2486 (*self).shl(*rhs)
2487 }
2488}
2489
2490impl Shl<u32> for &I64Vec2 {
2491 type Output = I64Vec2;
2492 #[inline]
2493 fn shl(self, rhs: u32) -> I64Vec2 {
2494 (*self).shl(rhs)
2495 }
2496}
2497
2498impl ShlAssign<u32> for I64Vec2 {
2499 #[inline]
2500 fn shl_assign(&mut self, rhs: u32) {
2501 *self = self.shl(rhs);
2502 }
2503}
2504
2505impl ShlAssign<&u32> for I64Vec2 {
2506 #[inline]
2507 fn shl_assign(&mut self, rhs: &u32) {
2508 self.shl_assign(*rhs);
2509 }
2510}
2511
2512impl Shr<u32> for I64Vec2 {
2513 type Output = Self;
2514 #[inline]
2515 fn shr(self, rhs: u32) -> Self::Output {
2516 Self {
2517 x: self.x.shr(rhs),
2518 y: self.y.shr(rhs),
2519 }
2520 }
2521}
2522
2523impl Shr<&u32> for I64Vec2 {
2524 type Output = Self;
2525 #[inline]
2526 fn shr(self, rhs: &u32) -> Self {
2527 self.shr(*rhs)
2528 }
2529}
2530
2531impl Shr<&u32> for &I64Vec2 {
2532 type Output = I64Vec2;
2533 #[inline]
2534 fn shr(self, rhs: &u32) -> I64Vec2 {
2535 (*self).shr(*rhs)
2536 }
2537}
2538
2539impl Shr<u32> for &I64Vec2 {
2540 type Output = I64Vec2;
2541 #[inline]
2542 fn shr(self, rhs: u32) -> I64Vec2 {
2543 (*self).shr(rhs)
2544 }
2545}
2546
2547impl ShrAssign<u32> for I64Vec2 {
2548 #[inline]
2549 fn shr_assign(&mut self, rhs: u32) {
2550 *self = self.shr(rhs);
2551 }
2552}
2553
2554impl ShrAssign<&u32> for I64Vec2 {
2555 #[inline]
2556 fn shr_assign(&mut self, rhs: &u32) {
2557 self.shr_assign(*rhs);
2558 }
2559}
2560
2561impl Shl<u64> for I64Vec2 {
2562 type Output = Self;
2563 #[inline]
2564 fn shl(self, rhs: u64) -> Self::Output {
2565 Self {
2566 x: self.x.shl(rhs),
2567 y: self.y.shl(rhs),
2568 }
2569 }
2570}
2571
2572impl Shl<&u64> for I64Vec2 {
2573 type Output = Self;
2574 #[inline]
2575 fn shl(self, rhs: &u64) -> Self {
2576 self.shl(*rhs)
2577 }
2578}
2579
2580impl Shl<&u64> for &I64Vec2 {
2581 type Output = I64Vec2;
2582 #[inline]
2583 fn shl(self, rhs: &u64) -> I64Vec2 {
2584 (*self).shl(*rhs)
2585 }
2586}
2587
2588impl Shl<u64> for &I64Vec2 {
2589 type Output = I64Vec2;
2590 #[inline]
2591 fn shl(self, rhs: u64) -> I64Vec2 {
2592 (*self).shl(rhs)
2593 }
2594}
2595
2596impl ShlAssign<u64> for I64Vec2 {
2597 #[inline]
2598 fn shl_assign(&mut self, rhs: u64) {
2599 *self = self.shl(rhs);
2600 }
2601}
2602
2603impl ShlAssign<&u64> for I64Vec2 {
2604 #[inline]
2605 fn shl_assign(&mut self, rhs: &u64) {
2606 self.shl_assign(*rhs);
2607 }
2608}
2609
2610impl Shr<u64> for I64Vec2 {
2611 type Output = Self;
2612 #[inline]
2613 fn shr(self, rhs: u64) -> Self::Output {
2614 Self {
2615 x: self.x.shr(rhs),
2616 y: self.y.shr(rhs),
2617 }
2618 }
2619}
2620
2621impl Shr<&u64> for I64Vec2 {
2622 type Output = Self;
2623 #[inline]
2624 fn shr(self, rhs: &u64) -> Self {
2625 self.shr(*rhs)
2626 }
2627}
2628
2629impl Shr<&u64> for &I64Vec2 {
2630 type Output = I64Vec2;
2631 #[inline]
2632 fn shr(self, rhs: &u64) -> I64Vec2 {
2633 (*self).shr(*rhs)
2634 }
2635}
2636
2637impl Shr<u64> for &I64Vec2 {
2638 type Output = I64Vec2;
2639 #[inline]
2640 fn shr(self, rhs: u64) -> I64Vec2 {
2641 (*self).shr(rhs)
2642 }
2643}
2644
2645impl ShrAssign<u64> for I64Vec2 {
2646 #[inline]
2647 fn shr_assign(&mut self, rhs: u64) {
2648 *self = self.shr(rhs);
2649 }
2650}
2651
2652impl ShrAssign<&u64> for I64Vec2 {
2653 #[inline]
2654 fn shr_assign(&mut self, rhs: &u64) {
2655 self.shr_assign(*rhs);
2656 }
2657}
2658
2659impl Shl<IVec2> for I64Vec2 {
2660 type Output = Self;
2661 #[inline]
2662 fn shl(self, rhs: IVec2) -> Self {
2663 Self {
2664 x: self.x.shl(rhs.x),
2665 y: self.y.shl(rhs.y),
2666 }
2667 }
2668}
2669
2670impl Shl<&IVec2> for I64Vec2 {
2671 type Output = Self;
2672 #[inline]
2673 fn shl(self, rhs: &IVec2) -> Self {
2674 self.shl(*rhs)
2675 }
2676}
2677
2678impl Shl<&IVec2> for &I64Vec2 {
2679 type Output = I64Vec2;
2680 #[inline]
2681 fn shl(self, rhs: &IVec2) -> I64Vec2 {
2682 (*self).shl(*rhs)
2683 }
2684}
2685
2686impl Shl<IVec2> for &I64Vec2 {
2687 type Output = I64Vec2;
2688 #[inline]
2689 fn shl(self, rhs: IVec2) -> I64Vec2 {
2690 (*self).shl(rhs)
2691 }
2692}
2693
2694impl Shr<IVec2> for I64Vec2 {
2695 type Output = Self;
2696 #[inline]
2697 fn shr(self, rhs: IVec2) -> Self {
2698 Self {
2699 x: self.x.shr(rhs.x),
2700 y: self.y.shr(rhs.y),
2701 }
2702 }
2703}
2704
2705impl Shr<&IVec2> for I64Vec2 {
2706 type Output = Self;
2707 #[inline]
2708 fn shr(self, rhs: &IVec2) -> Self {
2709 self.shr(*rhs)
2710 }
2711}
2712
2713impl Shr<&IVec2> for &I64Vec2 {
2714 type Output = I64Vec2;
2715 #[inline]
2716 fn shr(self, rhs: &IVec2) -> I64Vec2 {
2717 (*self).shr(*rhs)
2718 }
2719}
2720
2721impl Shr<IVec2> for &I64Vec2 {
2722 type Output = I64Vec2;
2723 #[inline]
2724 fn shr(self, rhs: IVec2) -> I64Vec2 {
2725 (*self).shr(rhs)
2726 }
2727}
2728
2729impl Shl<UVec2> for I64Vec2 {
2730 type Output = Self;
2731 #[inline]
2732 fn shl(self, rhs: UVec2) -> Self {
2733 Self {
2734 x: self.x.shl(rhs.x),
2735 y: self.y.shl(rhs.y),
2736 }
2737 }
2738}
2739
2740impl Shl<&UVec2> for I64Vec2 {
2741 type Output = Self;
2742 #[inline]
2743 fn shl(self, rhs: &UVec2) -> Self {
2744 self.shl(*rhs)
2745 }
2746}
2747
2748impl Shl<&UVec2> for &I64Vec2 {
2749 type Output = I64Vec2;
2750 #[inline]
2751 fn shl(self, rhs: &UVec2) -> I64Vec2 {
2752 (*self).shl(*rhs)
2753 }
2754}
2755
2756impl Shl<UVec2> for &I64Vec2 {
2757 type Output = I64Vec2;
2758 #[inline]
2759 fn shl(self, rhs: UVec2) -> I64Vec2 {
2760 (*self).shl(rhs)
2761 }
2762}
2763
2764impl Shr<UVec2> for I64Vec2 {
2765 type Output = Self;
2766 #[inline]
2767 fn shr(self, rhs: UVec2) -> Self {
2768 Self {
2769 x: self.x.shr(rhs.x),
2770 y: self.y.shr(rhs.y),
2771 }
2772 }
2773}
2774
2775impl Shr<&UVec2> for I64Vec2 {
2776 type Output = Self;
2777 #[inline]
2778 fn shr(self, rhs: &UVec2) -> Self {
2779 self.shr(*rhs)
2780 }
2781}
2782
2783impl Shr<&UVec2> for &I64Vec2 {
2784 type Output = I64Vec2;
2785 #[inline]
2786 fn shr(self, rhs: &UVec2) -> I64Vec2 {
2787 (*self).shr(*rhs)
2788 }
2789}
2790
2791impl Shr<UVec2> for &I64Vec2 {
2792 type Output = I64Vec2;
2793 #[inline]
2794 fn shr(self, rhs: UVec2) -> I64Vec2 {
2795 (*self).shr(rhs)
2796 }
2797}
2798
2799impl Index<usize> for I64Vec2 {
2800 type Output = i64;
2801 #[inline]
2802 fn index(&self, index: usize) -> &Self::Output {
2803 match index {
2804 0 => &self.x,
2805 1 => &self.y,
2806 _ => panic!("index out of bounds"),
2807 }
2808 }
2809}
2810
2811impl IndexMut<usize> for I64Vec2 {
2812 #[inline]
2813 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
2814 match index {
2815 0 => &mut self.x,
2816 1 => &mut self.y,
2817 _ => panic!("index out of bounds"),
2818 }
2819 }
2820}
2821
2822impl fmt::Display for I64Vec2 {
2823 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2824 write!(f, "[{}, {}]", self.x, self.y)
2825 }
2826}
2827
2828impl fmt::Debug for I64Vec2 {
2829 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2830 fmt.debug_tuple(stringify!(I64Vec2))
2831 .field(&self.x)
2832 .field(&self.y)
2833 .finish()
2834 }
2835}
2836
2837impl From<[i64; 2]> for I64Vec2 {
2838 #[inline]
2839 fn from(a: [i64; 2]) -> Self {
2840 Self::new(a[0], a[1])
2841 }
2842}
2843
2844impl From<I64Vec2> for [i64; 2] {
2845 #[inline]
2846 fn from(v: I64Vec2) -> Self {
2847 [v.x, v.y]
2848 }
2849}
2850
2851impl From<(i64, i64)> for I64Vec2 {
2852 #[inline]
2853 fn from(t: (i64, i64)) -> Self {
2854 Self::new(t.0, t.1)
2855 }
2856}
2857
2858impl From<I64Vec2> for (i64, i64) {
2859 #[inline]
2860 fn from(v: I64Vec2) -> Self {
2861 (v.x, v.y)
2862 }
2863}
2864
2865impl From<I8Vec2> for I64Vec2 {
2866 #[inline]
2867 fn from(v: I8Vec2) -> Self {
2868 Self::new(i64::from(v.x), i64::from(v.y))
2869 }
2870}
2871
2872impl From<U8Vec2> for I64Vec2 {
2873 #[inline]
2874 fn from(v: U8Vec2) -> Self {
2875 Self::new(i64::from(v.x), i64::from(v.y))
2876 }
2877}
2878
2879impl From<I16Vec2> for I64Vec2 {
2880 #[inline]
2881 fn from(v: I16Vec2) -> Self {
2882 Self::new(i64::from(v.x), i64::from(v.y))
2883 }
2884}
2885
2886impl From<U16Vec2> for I64Vec2 {
2887 #[inline]
2888 fn from(v: U16Vec2) -> Self {
2889 Self::new(i64::from(v.x), i64::from(v.y))
2890 }
2891}
2892
2893impl From<IVec2> for I64Vec2 {
2894 #[inline]
2895 fn from(v: IVec2) -> Self {
2896 Self::new(i64::from(v.x), i64::from(v.y))
2897 }
2898}
2899
2900impl From<UVec2> for I64Vec2 {
2901 #[inline]
2902 fn from(v: UVec2) -> Self {
2903 Self::new(i64::from(v.x), i64::from(v.y))
2904 }
2905}
2906
2907impl TryFrom<U64Vec2> for I64Vec2 {
2908 type Error = core::num::TryFromIntError;
2909
2910 #[inline]
2911 fn try_from(v: U64Vec2) -> Result<Self, Self::Error> {
2912 Ok(Self::new(i64::try_from(v.x)?, i64::try_from(v.y)?))
2913 }
2914}
2915
2916impl TryFrom<USizeVec2> for I64Vec2 {
2917 type Error = core::num::TryFromIntError;
2918
2919 #[inline]
2920 fn try_from(v: USizeVec2) -> Result<Self, Self::Error> {
2921 Ok(Self::new(i64::try_from(v.x)?, i64::try_from(v.y)?))
2922 }
2923}
2924
2925impl From<BVec2> for I64Vec2 {
2926 #[inline]
2927 fn from(v: BVec2) -> Self {
2928 Self::new(i64::from(v.x), i64::from(v.y))
2929 }
2930}