1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
//! Bit-granularity right shift with zero-fill.
use super::U256;
impl U256 {
/// Shifts the value right by `n` bits, filling the vacated high bits
/// with zeros.
///
/// For shifts of 256 or more, the result is zero. For a shift of 0,
/// the value is returned unchanged. Handles both intra-limb shifts
/// (within a single 64-bit word) and inter-limb carries where bits
/// cross limb boundaries.
///
/// # Examples
///
/// ```
/// use cnfy_uint::u256::U256;
///
/// let a = U256::from_be_limbs([0, 0, 0, 8]);
/// assert_eq!(a.shr_bits(3), U256::from_be_limbs([0, 0, 0, 1]));
///
/// let b = U256::from_be_limbs([1, 0, 0, 0]);
/// assert_eq!(b.shr_bits(192), U256::from_be_limbs([0, 0, 0, 1]));
/// ```
#[inline]
pub const fn shr_bits(&self, n: u32) -> U256 {
if n >= 256 {
return U256([0, 0, 0, 0]);
}
if n == 0 {
return *self;
}
let limb_shift = (n / 64) as usize;
let bit_shift = n % 64;
// First, shift whole limbs (little-endian: shifting right moves MSB limbs
// toward LSB positions, i.e. toward lower indices)
let mut shifted = [0u64; 4];
let mut i = 0;
while i + limb_shift < 4 {
shifted[i] = self.0[i + limb_shift];
i += 1;
}
if bit_shift == 0 {
return U256(shifted);
}
// Then shift bits within limbs, carrying from MSB to LSB
let mut result = [0u64; 4];
result[3] = shifted[3] >> bit_shift;
let mut j = 3;
while j > 0 {
j -= 1;
result[j] = (shifted[j] >> bit_shift) | (shifted[j + 1] << (64 - bit_shift));
}
U256(result)
}
}
#[cfg(test)]
mod ai_tests {
use super::*;
/// Shift by 0 is identity.
#[test]
fn identity() {
let a = U256::from_be_limbs([0x1234, 0x5678, 0x9ABC, 0xDEF0]);
assert_eq!(a.shr_bits(0), a);
}
/// Shift by 256 or more produces zero.
#[test]
fn full_shift() {
let a = U256::from_be_limbs([u64::MAX, u64::MAX, u64::MAX, u64::MAX]);
assert_eq!(a.shr_bits(256), U256::ZERO);
assert_eq!(a.shr_bits(300), U256::ZERO);
}
/// Shift right by 1 halves the value.
#[test]
fn shift_one() {
assert_eq!(
U256::from_be_limbs([0, 0, 0, 8]).shr_bits(1),
U256::from_be_limbs([0, 0, 0, 4])
);
}
/// Shift by exactly 64 moves one limb.
#[test]
fn one_limb() {
let a = U256::from_be_limbs([0, 0, 1, 0]);
assert_eq!(a.shr_bits(64), U256::from_be_limbs([0, 0, 0, 1]));
}
/// Shift by 192 moves from MSB limb to LSB.
#[test]
fn three_limbs() {
let a = U256::from_be_limbs([0x42, 0, 0, 0]);
assert_eq!(a.shr_bits(192), U256::from_be_limbs([0, 0, 0, 0x42]));
}
/// Cross-limb bit carry works.
#[test]
fn cross_limb_carry() {
let a = U256::from_be_limbs([0, 0, 1, 0]);
assert_eq!(a.shr_bits(1), U256::from_be_limbs([0, 0, 0, 1 << 63]));
}
/// Combined limb and bit shift.
#[test]
fn combined_shift() {
let a = U256::from_be_limbs([0, 0, 0x100, 0]);
assert_eq!(a.shr_bits(68), U256::from_be_limbs([0, 0, 0, 0x10]));
}
/// Shifting zero always produces zero.
#[test]
fn shift_zero() {
assert_eq!(U256::ZERO.shr_bits(42), U256::ZERO);
}
/// Shifting max right by 255 leaves 1.
#[test]
fn max_shift_255() {
let max = U256::from_be_limbs([u64::MAX, u64::MAX, u64::MAX, u64::MAX]);
assert_eq!(max.shr_bits(255), U256::ONE);
}
/// Round-trip with shl_bits for small shifts.
#[test]
fn round_trip() {
let a = U256::from_be_limbs([0, 0x1234, 0x5678, 0x9ABC]);
assert_eq!(a.shl_bits(17).shr_bits(17), a);
}
}