Skip to main content

orion/util/
u64x4.rs

1// MIT License
2
3// Copyright (c) 2019-2026 The orion Developers
4
5// Permission is hereby granted, free of charge, to any person obtaining a copy
6// of this software and associated documentation files (the "Software"), to deal
7// in the Software without restriction, including without limitation the rights
8// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9// copies of the Software, and to permit persons to whom the Software is
10// furnished to do so, subject to the following conditions:
11
12// The above copyright notice and this permission notice shall be included in
13// all copies or substantial portions of the Software.
14
15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21// SOFTWARE.
22
23#[derive(Clone, Copy, Default)]
24pub(crate) struct U64x4(
25    pub(crate) u64,
26    pub(crate) u64,
27    pub(crate) u64,
28    pub(crate) u64,
29);
30
31impl core::ops::BitXor for U64x4 {
32    type Output = Self;
33
34    fn bitxor(self, _rhs: Self) -> Self::Output {
35        Self(
36            self.0 ^ _rhs.0,
37            self.1 ^ _rhs.1,
38            self.2 ^ _rhs.2,
39            self.3 ^ _rhs.3,
40        )
41    }
42}
43
44impl core::ops::BitXorAssign for U64x4 {
45    fn bitxor_assign(&mut self, _rhs: Self) {
46        self.0 ^= _rhs.0;
47        self.1 ^= _rhs.1;
48        self.2 ^= _rhs.2;
49        self.3 ^= _rhs.3;
50    }
51}
52
53#[cfg(feature = "zeroize")]
54impl zeroize::Zeroize for U64x4 {
55    fn zeroize(&mut self) {
56        self.0.zeroize();
57        self.1.zeroize();
58        self.2.zeroize();
59        self.3.zeroize();
60    }
61}
62
63#[cfg(test)]
64impl PartialEq<U64x4> for U64x4 {
65    fn eq(&self, other: &Self) -> bool {
66        self.0 == other.0 && self.1 == other.1 && self.2 == other.2 && self.3 == other.3
67    }
68}
69
70impl U64x4 {
71    #[must_use]
72    pub(crate) const fn wrapping_add(self, _rhs: Self) -> Self {
73        Self(
74            self.0.wrapping_add(_rhs.0),
75            self.1.wrapping_add(_rhs.1),
76            self.2.wrapping_add(_rhs.2),
77            self.3.wrapping_add(_rhs.3),
78        )
79    }
80
81    #[must_use]
82    pub(crate) const fn shl_1(self) -> Self {
83        Self(self.1, self.2, self.3, self.0)
84    }
85
86    #[must_use]
87    pub(crate) const fn shl_2(self) -> Self {
88        Self(self.2, self.3, self.0, self.1)
89    }
90
91    #[must_use]
92    pub(crate) const fn shl_3(self) -> Self {
93        Self(self.3, self.0, self.1, self.2)
94    }
95
96    #[must_use]
97    pub(crate) const fn rotate_right(self, n: u32) -> Self {
98        Self(
99            self.0.rotate_right(n),
100            self.1.rotate_right(n),
101            self.2.rotate_right(n),
102            self.3.rotate_right(n),
103        )
104    }
105
106    pub(crate) fn store_into_le(self, slice_in: &mut [u8]) {
107        debug_assert_eq!(slice_in.len(), size_of::<u64>() * 4);
108        let mut iter = slice_in.chunks_exact_mut(size_of::<u64>());
109        iter.next().unwrap().copy_from_slice(&self.0.to_le_bytes());
110        iter.next().unwrap().copy_from_slice(&self.1.to_le_bytes());
111        iter.next().unwrap().copy_from_slice(&self.2.to_le_bytes());
112        iter.next().unwrap().copy_from_slice(&self.3.to_le_bytes());
113    }
114}