randomize/
formulas.rs

1//! Base formulas used elsewhere in the crate.
2
3/// This is the suggested multiplier for a PCG with 64 bits of state.
4pub const PCG_MUL_64: u64 = 6364136223846793005;
5
6/// Advance a 32-bit LCG's state.
7#[inline]
8#[must_use]
9pub const fn lcg32_step(mul: u32, add: u32, state: u32) -> u32 {
10  state.wrapping_mul(mul).wrapping_add(add)
11}
12
13/// Advance a 32-bit LCG by `delta` steps in `log2(delta)` time.
14#[inline]
15#[must_use]
16pub const fn lcg32_jump(mul: u32, add: u32, state: u32, mut delta: u32) -> u32 {
17  let mut cur_mult: u32 = mul;
18  let mut cur_plus: u32 = add;
19  let mut acc_mult: u32 = 1;
20  let mut acc_plus: u32 = 0;
21  while delta > 0 {
22    if (delta & 1) > 0 {
23      acc_mult = acc_mult.wrapping_mul(cur_mult);
24      acc_plus = acc_plus.wrapping_mul(cur_mult).wrapping_add(cur_plus);
25    }
26    cur_plus = cur_mult.wrapping_add(1).wrapping_mul(cur_plus);
27    cur_mult = cur_mult.wrapping_mul(cur_mult);
28    delta /= 2;
29  }
30  state.wrapping_mul(acc_mult).wrapping_add(acc_plus)
31}
32
33/// Advance a 32-bit LCG's state.
34#[inline]
35#[must_use]
36pub const fn lcg64_step(mul: u64, add: u64, state: u64) -> u64 {
37  state.wrapping_mul(mul).wrapping_add(add)
38}
39
40/// Advance a 32-bit LCG by `delta` steps in `log2(delta)` time.
41#[inline]
42#[must_use]
43pub const fn lcg64_jump(mul: u64, add: u64, state: u64, mut delta: u64) -> u64 {
44  let mut cur_mult: u64 = mul;
45  let mut cur_plus: u64 = add;
46  let mut acc_mult: u64 = 1;
47  let mut acc_plus: u64 = 0;
48  while delta > 0 {
49    if (delta & 1) > 0 {
50      acc_mult = acc_mult.wrapping_mul(cur_mult);
51      acc_plus = acc_plus.wrapping_mul(cur_mult).wrapping_add(cur_plus);
52    }
53    cur_plus = cur_mult.wrapping_add(1).wrapping_mul(cur_plus);
54    cur_mult = cur_mult.wrapping_mul(cur_mult);
55    delta /= 2;
56  }
57  state.wrapping_mul(acc_mult).wrapping_add(acc_plus)
58}
59
60/// Advance a 32-bit LCG's state.
61#[inline]
62#[must_use]
63pub const fn lcg128_step(mul: u128, add: u128, state: u128) -> u128 {
64  state.wrapping_mul(mul).wrapping_add(add)
65}
66
67/// Advance a 32-bit LCG by `delta` steps in `log2(delta)` time.
68#[inline]
69#[must_use]
70pub const fn lcg128_jump(mul: u128, add: u128, state: u128, mut delta: u128) -> u128 {
71  let mut cur_mult: u128 = mul;
72  let mut cur_plus: u128 = add;
73  let mut acc_mult: u128 = 1;
74  let mut acc_plus: u128 = 0;
75  while delta > 0 {
76    if (delta & 1) > 0 {
77      acc_mult = acc_mult.wrapping_mul(cur_mult);
78      acc_plus = acc_plus.wrapping_mul(cur_mult).wrapping_add(cur_plus);
79    }
80    cur_plus = cur_mult.wrapping_add(1).wrapping_mul(cur_plus);
81    cur_mult = cur_mult.wrapping_mul(cur_mult);
82    delta /= 2;
83  }
84  state.wrapping_mul(acc_mult).wrapping_add(acc_plus)
85}
86
87/// "Xor-shift high bits" then "randomized rotate", `u64` down to `u32`.
88#[inline]
89#[must_use]
90pub const fn xsh_rr_u64_to_u32(state: u64) -> u32 {
91  // Note(Lokathor): Bit randomness quality is better in the higher bits. The
92  // top 5 bits specify the random rotation, while the next 32 bits are the
93  // "source" of the final value. The xor-shift amount is half the total bits in
94  // play, so it's (32+5)/2==18.
95  let rot_amount: u32 = (state >> (64 - 5)) as u32;
96  let xor_shifted: u64 = state ^ (state >> ((32 + 5) / 2));
97  let kept_bits: u32 = (xor_shifted >> (32 - 5)) as u32;
98  kept_bits.rotate_right(rot_amount)
99}
100
101/// "Xor-shift low bits" then "randomized rotate", `u128` to `u64`.
102#[inline]
103#[must_use]
104pub const fn xsl_rr_u128_to_u64(state: u128) -> u64 {
105  // Note(Lokathor): Similar ideas to how `xsh_rr_u64_to_u32` works. At 128-bit
106  // size we now use 6 bits to select the random rotation. The xor-shift is by
107  // exactly 64 bits because on modern computers a `u128` will actually be two
108  // 64-bit registers. Having the xor-shift be by exactly 64 bits makes the
109  // operation is a simple `reg1 ^ reg2`.
110  let rot_amount: u32 = (state >> (128 - 6)) as u32;
111  let folded_bits: u64 = (state ^ (state >> 64)) as u64;
112  folded_bits.rotate_right(rot_amount)
113}
114
115/// Returns `k` with probability `2^(-k-1)`, a "binary exponential
116/// distribution".
117#[inline]
118#[must_use]
119pub fn next_binary_exp_distr32<F: FnMut() -> u32>(mut f: F) -> u32 {
120  // Based on a function provided by <https://github.com/orlp>
121
122  // Note(Lokathor): We want to calculate the number of trailing zeroes on a
123  // result `r` from the generator. However, as long as `r` is 0 we act like
124  // it's just an "extra" 32 trailing zeros and then generate again.
125  let mut extra = 0;
126  let mut r: u32 = f();
127  while r == 0 {
128    extra += 1;
129    r = f();
130  }
131  extra * 32 + r.trailing_zeros()
132}
133
134/// Generates an `f32` in the signed or unsigned unit range.
135///
136/// * signed: `[-1.0, 1.0]`
137/// * unsigned: `[0.0, 1.0]`
138#[inline]
139#[must_use]
140pub fn ieee754_random_f32<F: FnMut() -> u32>(mut f: F, signed: bool) -> f32 {
141  // This function provided by <https://github.com/orlp>
142
143  // Returns random number in [0, 1] or [-1, 1] depending on signed.
144  let bit_width = 32;
145  let exponent_bias = 127;
146  let num_mantissa_bits = 23;
147  let num_rest_bits = bit_width - num_mantissa_bits - 1 - signed as i32;
148  let r: u32 = f();
149
150  debug_assert!(num_rest_bits >= 0);
151  debug_assert!(core::mem::size_of::<u32>() * 8 == bit_width as _);
152
153  let mantissa = r >> (bit_width - num_mantissa_bits);
154  let (sign_mask, rand_bit, rest_bits);
155  if signed {
156    sign_mask = r << (bit_width - 1);
157    rand_bit = (r & 2) != 0;
158    rest_bits = (r >> 2) & ((1 << num_rest_bits) - 1);
159  } else {
160    sign_mask = 0;
161    rand_bit = (r & 1) != 0;
162    rest_bits = (r >> 1) & ((1 << num_rest_bits) - 1);
163  }
164
165  // If our mantissa is zero, half of the time we must increase our exponent.
166  let increment_exponent = (mantissa == 0 && rand_bit) as i32;
167
168  // We can usually reuse `rest_bits` to save more calls to the rng.
169  let computed_rest_bits: i32 = if rest_bits > 0 {
170    rest_bits.trailing_zeros() as i32
171  } else {
172    num_rest_bits + next_binary_exp_distr32(&mut f) as i32
173  };
174  let mut exponent: i32 = -1 + increment_exponent - computed_rest_bits;
175
176  // It is very unlikely our exponent is invalid at this point, but keep
177  // regenerating it until it is valid.
178  while exponent < -exponent_bias || exponent > 0 {
179    exponent = -1 + increment_exponent - next_binary_exp_distr32(&mut f) as i32;
180  }
181
182  let final_exponent = ((exponent + exponent_bias) as u32) << num_mantissa_bits;
183  f32::from_bits(sign_mask | final_exponent | mantissa)
184}
185
186/// Generates an `f64` in the signed or unsigned unit range.
187///
188/// * signed: `[-1.0, 1.0]`
189/// * unsigned: `[0.0, 1.0]`
190#[inline]
191#[must_use]
192pub fn ieee754_random_f64<F: FnMut() -> u32>(mut f: F, signed: bool) -> f64 {
193  // This function provided by <https://github.com/orlp>
194
195  // Returns random number in [0, 1] or [-1, 1] depending on signed.
196  let bit_width = 64;
197  let exponent_bias = 1023;
198  let num_mantissa_bits = 52;
199  let num_rest_bits = bit_width - num_mantissa_bits - 1 - signed as i32;
200  let r: u64 = ((f() as u64) << 32) | (f() as u64);
201
202  debug_assert!(num_rest_bits >= 0);
203  debug_assert!(core::mem::size_of::<u32>() * 8 == bit_width as _);
204
205  let mantissa = r >> (bit_width - num_mantissa_bits);
206  let (sign_mask, rand_bit, rest_bits);
207  if signed {
208    sign_mask = r << (bit_width - 1);
209    rand_bit = (r & 2) != 0;
210    rest_bits = (r >> 2) & ((1 << num_rest_bits) - 1);
211  } else {
212    sign_mask = 0;
213    rand_bit = (r & 1) != 0;
214    rest_bits = (r >> 1) & ((1 << num_rest_bits) - 1);
215  }
216
217  // If our mantissa is zero, half of the time we must increase our exponent.
218  let increment_exponent = (mantissa == 0 && rand_bit) as i32;
219
220  // We can usually reuse `rest_bits` to save more calls to the rng.
221  let computed_rest_bits: i32 = if rest_bits > 0 {
222    rest_bits.trailing_zeros() as i32
223  } else {
224    num_rest_bits + next_binary_exp_distr32(&mut f) as i32
225  };
226  let mut exponent: i32 = -1 + increment_exponent - computed_rest_bits;
227
228  // It is very unlikely our exponent is invalid at this point, but keep
229  // regenerating it until it is valid.
230  while exponent < -exponent_bias || exponent > 0 {
231    exponent = -1 + increment_exponent - next_binary_exp_distr32(&mut f) as i32;
232  }
233
234  let final_exponent = ((exponent + exponent_bias) as u64) << num_mantissa_bits;
235  f64::from_bits(sign_mask | final_exponent | mantissa)
236}