1pub const PCG_MUL_64: u64 = 6364136223846793005;
5
6#[inline]
8#[must_use]
9pub const fn lcg32_step(mul: u32, add: u32, state: u32) -> u32 {
10 state.wrapping_mul(mul).wrapping_add(add)
11}
12
13#[inline]
15#[must_use]
16pub const fn lcg32_jump(mul: u32, add: u32, state: u32, mut delta: u32) -> u32 {
17 let mut cur_mult: u32 = mul;
18 let mut cur_plus: u32 = add;
19 let mut acc_mult: u32 = 1;
20 let mut acc_plus: u32 = 0;
21 while delta > 0 {
22 if (delta & 1) > 0 {
23 acc_mult = acc_mult.wrapping_mul(cur_mult);
24 acc_plus = acc_plus.wrapping_mul(cur_mult).wrapping_add(cur_plus);
25 }
26 cur_plus = cur_mult.wrapping_add(1).wrapping_mul(cur_plus);
27 cur_mult = cur_mult.wrapping_mul(cur_mult);
28 delta /= 2;
29 }
30 state.wrapping_mul(acc_mult).wrapping_add(acc_plus)
31}
32
33#[inline]
35#[must_use]
36pub const fn lcg64_step(mul: u64, add: u64, state: u64) -> u64 {
37 state.wrapping_mul(mul).wrapping_add(add)
38}
39
40#[inline]
42#[must_use]
43pub const fn lcg64_jump(mul: u64, add: u64, state: u64, mut delta: u64) -> u64 {
44 let mut cur_mult: u64 = mul;
45 let mut cur_plus: u64 = add;
46 let mut acc_mult: u64 = 1;
47 let mut acc_plus: u64 = 0;
48 while delta > 0 {
49 if (delta & 1) > 0 {
50 acc_mult = acc_mult.wrapping_mul(cur_mult);
51 acc_plus = acc_plus.wrapping_mul(cur_mult).wrapping_add(cur_plus);
52 }
53 cur_plus = cur_mult.wrapping_add(1).wrapping_mul(cur_plus);
54 cur_mult = cur_mult.wrapping_mul(cur_mult);
55 delta /= 2;
56 }
57 state.wrapping_mul(acc_mult).wrapping_add(acc_plus)
58}
59
60#[inline]
62#[must_use]
63pub const fn lcg128_step(mul: u128, add: u128, state: u128) -> u128 {
64 state.wrapping_mul(mul).wrapping_add(add)
65}
66
67#[inline]
69#[must_use]
70pub const fn lcg128_jump(mul: u128, add: u128, state: u128, mut delta: u128) -> u128 {
71 let mut cur_mult: u128 = mul;
72 let mut cur_plus: u128 = add;
73 let mut acc_mult: u128 = 1;
74 let mut acc_plus: u128 = 0;
75 while delta > 0 {
76 if (delta & 1) > 0 {
77 acc_mult = acc_mult.wrapping_mul(cur_mult);
78 acc_plus = acc_plus.wrapping_mul(cur_mult).wrapping_add(cur_plus);
79 }
80 cur_plus = cur_mult.wrapping_add(1).wrapping_mul(cur_plus);
81 cur_mult = cur_mult.wrapping_mul(cur_mult);
82 delta /= 2;
83 }
84 state.wrapping_mul(acc_mult).wrapping_add(acc_plus)
85}
86
87#[inline]
89#[must_use]
90pub const fn xsh_rr_u64_to_u32(state: u64) -> u32 {
91 let rot_amount: u32 = (state >> (64 - 5)) as u32;
96 let xor_shifted: u64 = state ^ (state >> ((32 + 5) / 2));
97 let kept_bits: u32 = (xor_shifted >> (32 - 5)) as u32;
98 kept_bits.rotate_right(rot_amount)
99}
100
101#[inline]
103#[must_use]
104pub const fn xsl_rr_u128_to_u64(state: u128) -> u64 {
105 let rot_amount: u32 = (state >> (128 - 6)) as u32;
111 let folded_bits: u64 = (state ^ (state >> 64)) as u64;
112 folded_bits.rotate_right(rot_amount)
113}
114
115#[inline]
118#[must_use]
119pub fn next_binary_exp_distr32<F: FnMut() -> u32>(mut f: F) -> u32 {
120 let mut extra = 0;
126 let mut r: u32 = f();
127 while r == 0 {
128 extra += 1;
129 r = f();
130 }
131 extra * 32 + r.trailing_zeros()
132}
133
134#[inline]
139#[must_use]
140pub fn ieee754_random_f32<F: FnMut() -> u32>(mut f: F, signed: bool) -> f32 {
141 let bit_width = 32;
145 let exponent_bias = 127;
146 let num_mantissa_bits = 23;
147 let num_rest_bits = bit_width - num_mantissa_bits - 1 - signed as i32;
148 let r: u32 = f();
149
150 debug_assert!(num_rest_bits >= 0);
151 debug_assert!(core::mem::size_of::<u32>() * 8 == bit_width as _);
152
153 let mantissa = r >> (bit_width - num_mantissa_bits);
154 let (sign_mask, rand_bit, rest_bits);
155 if signed {
156 sign_mask = r << (bit_width - 1);
157 rand_bit = (r & 2) != 0;
158 rest_bits = (r >> 2) & ((1 << num_rest_bits) - 1);
159 } else {
160 sign_mask = 0;
161 rand_bit = (r & 1) != 0;
162 rest_bits = (r >> 1) & ((1 << num_rest_bits) - 1);
163 }
164
165 let increment_exponent = (mantissa == 0 && rand_bit) as i32;
167
168 let computed_rest_bits: i32 = if rest_bits > 0 {
170 rest_bits.trailing_zeros() as i32
171 } else {
172 num_rest_bits + next_binary_exp_distr32(&mut f) as i32
173 };
174 let mut exponent: i32 = -1 + increment_exponent - computed_rest_bits;
175
176 while exponent < -exponent_bias || exponent > 0 {
179 exponent = -1 + increment_exponent - next_binary_exp_distr32(&mut f) as i32;
180 }
181
182 let final_exponent = ((exponent + exponent_bias) as u32) << num_mantissa_bits;
183 f32::from_bits(sign_mask | final_exponent | mantissa)
184}
185
186#[inline]
191#[must_use]
192pub fn ieee754_random_f64<F: FnMut() -> u32>(mut f: F, signed: bool) -> f64 {
193 let bit_width = 64;
197 let exponent_bias = 1023;
198 let num_mantissa_bits = 52;
199 let num_rest_bits = bit_width - num_mantissa_bits - 1 - signed as i32;
200 let r: u64 = ((f() as u64) << 32) | (f() as u64);
201
202 debug_assert!(num_rest_bits >= 0);
203 debug_assert!(core::mem::size_of::<u32>() * 8 == bit_width as _);
204
205 let mantissa = r >> (bit_width - num_mantissa_bits);
206 let (sign_mask, rand_bit, rest_bits);
207 if signed {
208 sign_mask = r << (bit_width - 1);
209 rand_bit = (r & 2) != 0;
210 rest_bits = (r >> 2) & ((1 << num_rest_bits) - 1);
211 } else {
212 sign_mask = 0;
213 rand_bit = (r & 1) != 0;
214 rest_bits = (r >> 1) & ((1 << num_rest_bits) - 1);
215 }
216
217 let increment_exponent = (mantissa == 0 && rand_bit) as i32;
219
220 let computed_rest_bits: i32 = if rest_bits > 0 {
222 rest_bits.trailing_zeros() as i32
223 } else {
224 num_rest_bits + next_binary_exp_distr32(&mut f) as i32
225 };
226 let mut exponent: i32 = -1 + increment_exponent - computed_rest_bits;
227
228 while exponent < -exponent_bias || exponent > 0 {
231 exponent = -1 + increment_exponent - next_binary_exp_distr32(&mut f) as i32;
232 }
233
234 let final_exponent = ((exponent + exponent_bias) as u64) << num_mantissa_bits;
235 f64::from_bits(sign_mask | final_exponent | mantissa)
236}