Skip to main content

lexe_crypto/
rng.rs

1//! Random number generation utilities
2
3use std::{
4    cell::Cell,
5    num::NonZeroU32,
6    ops::{Index, Range},
7};
8
9#[cfg(any(test, feature = "test-utils"))]
10use proptest::{
11    arbitrary::{Arbitrary, any},
12    strategy::{BoxedStrategy, Strategy},
13};
14pub use rand_core::RngCore;
15use rand_core::{CryptoRng, SeedableRng, le::read_u32_into};
16use ring::rand::SecureRandom;
17
18const RAND_ERROR_CODE: NonZeroU32 =
19    NonZeroU32::new(rand_core::Error::CUSTOM_START).unwrap();
20
21/// A succinct trait alias for a Cryptographically Secure PRNG.
22pub trait Crng: RngCore + CryptoRng {}
23
24impl<R: RngCore + CryptoRng> Crng for R {}
25
26/// Minimal extension trait on [`rand_core::RngCore`], containing small utility
27/// methods for generating random values.
28pub trait RngExt: RngCore {
29    fn gen_bytes<const N: usize>(&mut self) -> [u8; N];
30    fn gen_u32(&mut self) -> u32;
31    fn gen_u64(&mut self) -> u64;
32
33    #[inline]
34    fn gen_u8(&mut self) -> u8 {
35        u8::from_le_bytes(self.gen_bytes())
36    }
37
38    #[inline]
39    fn gen_u16(&mut self) -> u16 {
40        u16::from_le_bytes(self.gen_bytes())
41    }
42
43    #[inline]
44    fn gen_u128(&mut self) -> u128 {
45        u128::from_le_bytes(self.gen_bytes())
46    }
47
48    /// Generate `true` with probability `p`
49    #[inline]
50    fn gen_bool(&mut self, p: f32) -> bool {
51        self.gen_f32() < p
52    }
53
54    /// Flip a coin. Generate `true` with probability `0.5`
55    #[inline]
56    fn gen_boolean(&mut self) -> bool {
57        self.gen_u32() & 0x1 == 0
58    }
59
60    /// Generates an [`f32`] uniformly distributed in `[0, 1)`.
61    #[inline]
62    fn gen_f32(&mut self) -> f32 {
63        // Take the upper 24 bits of a random u32, convert to f32, then scale to
64        // [0, 1). 24 bits is the maximum integer precision of f32 (23-bit
65        // mantissa + implicit bit).
66        const SCALE: f32 = 1.0 / (1u32 << 24) as f32;
67        (self.gen_u32() >> 8) as f32 * SCALE
68    }
69
70    /// Generates an [`f64`] uniformly distributed in `[0, 1)`.
71    #[inline]
72    fn gen_f64(&mut self) -> f64 {
73        // Take the upper 53 bits of a random u64, convert to f64, then scale to
74        // [0, 1). 53 bits is the maximum integer precision of f64 (52-bit
75        // mantissa + implicit bit).
76        const SCALE: f64 = 1.0 / (1u64 << 53) as f64; // 2^-53
77        (self.gen_u64() >> 11) as f64 * SCALE
78    }
79
80    /// Generates an [`i32`] in `[range.start, range.end)`. Has slight modulo
81    /// bias for large ranges. See `fastmap32`.
82    fn gen_range_i32(&mut self, range: Range<i32>) -> i32 {
83        let span = ((range.end as i64) - (range.start as i64)) as u32;
84        (fastmap32(self.gen_u32(), span) as i32).wrapping_add(range.start)
85    }
86
87    /// Generates a [`u32`] in `[range.start, range.end)`. Has slight modulo
88    /// bias for large ranges. See `fastmap32`.
89    fn gen_range_u32(&mut self, range: Range<u32>) -> u32 {
90        let span = range.end - range.start;
91        fastmap32(self.gen_u32(), span) + range.start
92    }
93
94    /// Generates a [`u32`] in `[range.start, range.end)`. Has slight modulo
95    /// bias for large ranges. See `fastmap64`.
96    fn gen_range_u64(&mut self, range: Range<u64>) -> u64 {
97        let span = range.end - range.start;
98        fastmap64(self.gen_u64(), span) + range.start
99    }
100
101    /// Generates a [`usize`] in `[range.start, range.end)`. Has slight modulo
102    /// bias for large ranges. See `fastmap64`.
103    fn gen_range_usize(&mut self, range: Range<usize>) -> usize {
104        let span = (range.end as u64) - (range.start as u64);
105        (fastmap64(self.gen_u64(), span) + range.start as u64) as usize
106    }
107
108    /// Generate `N` (nearly uniformly random) alphanumeric (0-9, A-Z, a-z)
109    /// bytes.
110    fn gen_alphanum_bytes<const N: usize>(&mut self) -> [u8; N] {
111        let mut out = self.gen_bytes();
112        encode_alphanum_bytes(&mut out);
113        out
114    }
115
116    #[cfg(any(test, feature = "test-utils"))]
117    fn gen_alphanum_vec(&mut self, n: usize) -> Vec<u8> {
118        let mut out = vec![0u8; n];
119        self.fill_bytes(&mut out);
120        encode_alphanum_slice(&mut out);
121        out
122    }
123}
124
125impl<R: RngCore> RngExt for R {
126    fn gen_bytes<const N: usize>(&mut self) -> [u8; N] {
127        let mut out = [0u8; N];
128        self.fill_bytes(&mut out);
129        out
130    }
131
132    #[inline]
133    fn gen_u32(&mut self) -> u32 {
134        self.next_u32()
135    }
136
137    #[inline]
138    fn gen_u64(&mut self) -> u64 {
139        self.next_u64()
140    }
141}
142
143#[allow(clippy::len_without_is_empty)]
144pub trait RngSliceExt: Index<usize> {
145    fn len(&self) -> usize;
146
147    /// Sample an element from `self` uniformly at random. Returns `None` if
148    /// empty.
149    fn choose<R: RngCore>(&self, rng: &mut R) -> Option<&Self::Output> {
150        let len = self.len();
151        if len == 0 {
152            None
153        } else {
154            Some(&self[rng.gen_range_usize(0..len)])
155        }
156    }
157
158    /// Shuffle the elements in `self`. Very fast, but has slight modulo bias
159    /// for huge slices, so don't use for crypto. Will panic if the slice is
160    /// longer than `u32::MAX`.
161    fn shuffle<R: RngCore>(&mut self, rng: &mut R);
162}
163
164impl<T> RngSliceExt for [T] {
165    fn len(&self) -> usize {
166        self.len()
167    }
168
169    fn shuffle<R: RngCore>(&mut self, rng: &mut R) {
170        assert!(self.len() < (u32::MAX as usize));
171
172        for i in (1..self.len()).rev() {
173            // invariant: elements with index > i have been locked in place.
174            let n = (i as u32) + 1;
175            let j = fastmap32(rng.next_u32(), n) as usize;
176            self.swap(i, j);
177        }
178    }
179}
180
181#[inline(never)]
182fn encode_alphanum_bytes<const N: usize>(inout: &mut [u8; N]) {
183    for x in inout.iter_mut() {
184        *x = encode_alphanum_byte(*x);
185    }
186}
187
188#[cfg(any(test, feature = "test-utils"))]
189#[inline(never)]
190fn encode_alphanum_slice(inout: &mut [u8]) {
191    for x in inout.iter_mut() {
192        *x = encode_alphanum_byte(*x);
193    }
194}
195
196/// "project" a full byte `x ∈ [0, 255]` into the alphanumeric ASCII character
197/// range `(['0','9'] ∪ ['A','Z'] ∪ ['a','z'])`.
198///
199/// The projection is slightly biased (e.g., P('0') = 5/256 vs P('1') = 4/256),
200/// to avoid a rejection sampling loop and improve codegen.
201#[inline(always)]
202#[allow(non_snake_case)]
203const fn encode_alphanum_byte(x: u8) -> u8 {
204    //                    idx >= 10               idx >= 10 + 26
205    //                         |                       |
206    //                         v                       v
207    // [         ] -- gap9A -- [         ] -- gapZa -- [         ]
208    // 0 1 2 ... 9 : ; ... ? @ A B ... Y Z ] \ ... _ ` a b ... y z
209
210    let idx = fastmap8(x, 10 + 26 + 26);
211
212    let base = idx + b'0';
213    let gap_9A = if idx >= 10 { b'A' - b'9' - 1 } else { 0 };
214    let gap_Za = if idx >= 10 + 26 { b'a' - b'Z' - 1 } else { 0 };
215
216    base + gap_9A + gap_Za
217}
218
219/// A cryptographically secure random number generator.
220//
221// Compatibility wrapper so we can use `ring`'s PRG with `rand` traits.
222#[derive(Clone, Debug)]
223pub struct SysRng(ring::rand::SystemRandom);
224
225impl SysRng {
226    pub fn new() -> Self {
227        Self(ring::rand::SystemRandom::new())
228    }
229}
230
231impl Default for SysRng {
232    fn default() -> Self {
233        Self::new()
234    }
235}
236
237/// [`ring::rand::SystemRandom`] is a cryptographically secure PRG
238impl CryptoRng for SysRng {}
239
240impl RngCore for SysRng {
241    #[inline]
242    fn next_u32(&mut self) -> u32 {
243        rand_core::impls::next_u32_via_fill(self)
244    }
245
246    #[inline]
247    fn next_u64(&mut self) -> u64 {
248        rand_core::impls::next_u64_via_fill(self)
249    }
250
251    fn fill_bytes(&mut self, dest: &mut [u8]) {
252        self.try_fill_bytes(dest).expect("ring SystemRandom failed")
253    }
254
255    fn try_fill_bytes(
256        &mut self,
257        dest: &mut [u8],
258    ) -> Result<(), rand_core::Error> {
259        self.0
260            .fill(dest)
261            // just some random error code. ring's error type here is
262            // empty/unspecified anyway, so not a big deal.
263            .map_err(|_| rand_core::Error::from(RAND_ERROR_CODE))
264    }
265}
266
267/// A small, fast, _non-cryptographic_ rng with decent statistical properties.
268/// Useful for sampling non-security sensitive data or as a deterministic RNG
269/// for tests (instead of the [`SysRng`] above, which uses the global OS RNG).
270///
271/// The implementation is the same as [`Xoroshiro64Star`].
272///
273/// [`Xoroshiro64Star`]: https://github.com/rust-random/rngs/blob/master/rand_xoshiro/src/xoroshiro64star.rs
274#[derive(Debug)]
275#[cfg_attr(any(test, feature = "test-utils"), derive(Clone))]
276pub struct FastRng {
277    s0: u32,
278    s1: u32,
279}
280
281impl FastRng {
282    pub fn new() -> Self {
283        Self {
284            s0: 0xdeadbeef,
285            s1: 0xf00baa44,
286        }
287    }
288
289    /// Seed a new [`FastRng`] from an existing [`SysRng`].
290    pub fn from_sysrng(sys_rng: &mut SysRng) -> Self {
291        let seed = sys_rng.gen_u64();
292        Self::seed_from_u64(seed)
293    }
294
295    pub fn from_u64(s: u64) -> Self {
296        Self::seed_from_u64(s)
297    }
298}
299
300impl Default for FastRng {
301    fn default() -> Self {
302        Self::new()
303    }
304}
305
306/// Only enable [`CryptoRng`] for this rng when testing.
307#[cfg(any(test, feature = "test-utils"))]
308impl CryptoRng for FastRng {}
309
310impl RngCore for FastRng {
311    #[inline]
312    fn next_u32(&mut self) -> u32 {
313        let (s0, s1, r) = xoroshiro64star_next_u32(self.s0, self.s1);
314        self.s0 = s0;
315        self.s1 = s1;
316        r
317    }
318
319    #[inline]
320    fn next_u64(&mut self) -> u64 {
321        rand_core::impls::next_u64_via_u32(self)
322    }
323
324    #[inline]
325    fn fill_bytes(&mut self, dest: &mut [u8]) {
326        rand_core::impls::fill_bytes_via_next(self, dest);
327    }
328
329    #[inline]
330    fn try_fill_bytes(
331        &mut self,
332        dest: &mut [u8],
333    ) -> Result<(), rand_core::Error> {
334        self.fill_bytes(dest);
335        Ok(())
336    }
337}
338
339/// The core rng step that generates the next random output for [`FastRng`] and
340/// [`ThreadFastRng`].
341#[inline(always)]
342fn xoroshiro64star_next_u32(mut s0: u32, mut s1: u32) -> (u32, u32, u32) {
343    let r = s0.wrapping_mul(0x9e3779bb);
344    s1 ^= s0;
345    s0 = s0.rotate_left(26) ^ s1 ^ (s1 << 9);
346    s1 = s1.rotate_left(13);
347    (s0, s1, r)
348}
349
350impl SeedableRng for FastRng {
351    type Seed = [u8; 8];
352
353    fn from_seed(seed: Self::Seed) -> Self {
354        // zero is a pathological case for Xoroshiro64Star, just map it to
355        // the default seed
356        if seed == [0u8; 8] {
357            Self::new()
358        } else {
359            let mut parts = [0u32, 0u32];
360            read_u32_into(&seed, &mut parts);
361            Self {
362                s0: parts[0],
363                s1: parts[1],
364            }
365        }
366    }
367}
368
369#[cfg(any(test, feature = "test-utils"))]
370impl Arbitrary for FastRng {
371    type Parameters = ();
372    type Strategy = BoxedStrategy<Self>;
373
374    fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
375        // We use `no_shrink` here since shrinking an RNG seed won't produce
376        // "simpler" output samples. This setting lets `proptest` know not to
377        // waste time trying to shrink the rng seed.
378        any::<[u8; 8]>()
379            .no_shrink()
380            .prop_map(FastRng::from_seed)
381            .boxed()
382    }
383}
384
385/// A thread-local [`FastRng`] that is seeded from the global [`SysRng`] the
386/// first time a thread uses it.
387///
388/// Like `FastRng`, it's a small, fast, and _non-cryptographic_ rng with decent
389/// statistical properties. Useful for sampling non-security sensitive data.
390///
391/// Shines in multithreaded/async scenarios where don't want to have to
392/// synchronize on a single `Mutex<FastRng>` or deal with handing out `FastRng`s
393/// to each thread. Instead we let thread-locals handle all the drudgery.
394pub struct ThreadFastRng(());
395
396impl ThreadFastRng {
397    #[inline]
398    pub fn new() -> Self {
399        Self(())
400    }
401
402    /// Set the current thread local rng seed.
403    pub fn seed(seed: u64) {
404        // splitmix64
405        // <https://github.com/rust-random/rngs/blob/master/rand_xoshiro/src/splitmix64.rs#L48>
406        let mut z = seed.wrapping_add(0x9e3779b97f4a7c15);
407        z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
408        z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
409        z = z ^ (z >> 31);
410        THREAD_RNG_STATE.set(z)
411    }
412}
413
414impl Default for ThreadFastRng {
415    #[inline]
416    fn default() -> Self {
417        Self::new()
418    }
419}
420
421/// Only enable [`CryptoRng`] for this rng when testing.
422#[cfg(any(test, feature = "test-utils"))]
423impl CryptoRng for ThreadFastRng {}
424
425// Can't put a `FastRng` here directly, since it's not `Copy`
426// (and shouldn't impl `Copy`).
427//
428// Using `const { .. }` with a noop-drop type (allegedly) lets us
429// use a faster thread_local impl.
430thread_local! {
431    // clippy errors when built for SGX without without this lint line
432    // TODO(phlip9): incorrect lint, remove when clippy not broken
433    #[allow(clippy::missing_const_for_thread_local)]
434    static THREAD_RNG_STATE: Cell<u64> = const { Cell::new(0) };
435}
436
437impl RngCore for ThreadFastRng {
438    fn next_u32(&mut self) -> u32 {
439        let mut s01 = THREAD_RNG_STATE.get();
440
441        // Need to seed this thread-local rng
442        if s01 == 0 {
443            // Mark this branch cold to encourage better codegen, since
444            // seeding should only happen once per thread.
445            #[cold]
446            #[inline(never)]
447            fn reseed() -> u64 {
448                SysRng::new().gen_u64()
449            }
450            s01 = reseed();
451        }
452
453        // unpack state
454        let s0 = (s01 >> 32) as u32;
455        let s1 = s01 as u32;
456
457        // gen next state and output
458        let (s0, s1, r) = xoroshiro64star_next_u32(s0, s1);
459
460        // repack state and update
461        let s01 = ((s0 as u64) << 32) | (s1 as u64);
462        THREAD_RNG_STATE.set(s01);
463
464        r
465    }
466
467    #[inline]
468    fn next_u64(&mut self) -> u64 {
469        rand_core::impls::next_u64_via_u32(self)
470    }
471
472    #[inline]
473    fn fill_bytes(&mut self, dest: &mut [u8]) {
474        rand_core::impls::fill_bytes_via_next(self, dest);
475    }
476
477    #[inline]
478    fn try_fill_bytes(
479        &mut self,
480        dest: &mut [u8],
481    ) -> Result<(), rand_core::Error> {
482        self.fill_bytes(dest);
483        Ok(())
484    }
485}
486
487/// Map `x` uniformly into the range `[0, n)`. Has slight modulo bias for large
488/// ranges.
489///
490/// See: <https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/>
491#[inline(always)]
492const fn fastmap8(x: u8, n: u8) -> u8 {
493    ((x as u16).wrapping_mul(n as u16) >> 8) as u8
494}
495
496/// Map `x` uniformly into the range `[0, n)`. Has slight modulo bias for large
497/// ranges.
498///
499/// See: <https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/>
500#[inline(always)]
501const fn fastmap32(x: u32, n: u32) -> u32 {
502    ((x as u64).wrapping_mul(n as u64) >> 32) as u32
503}
504
505/// Map `x` uniformly into the range `[0, n)`. Has slight modulo bias for large
506/// ranges.
507///
508/// See: <https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/>
509#[inline(always)]
510const fn fastmap64(x: u64, n: u64) -> u64 {
511    ((x as u128).wrapping_mul(n as u128) >> 64) as u64
512}
513
514#[cfg(test)]
515mod test {
516    use proptest::{prop_assert, proptest};
517
518    use super::*;
519
520    #[test]
521    fn test_encode_alphanum_byte() {
522        let mut mset = [0u8; 256];
523        for c in 0..=255 {
524            let o = encode_alphanum_byte(c);
525            mset[o as usize] += 1;
526        }
527
528        let actual_alphabet = mset
529            .as_slice()
530            .iter()
531            .enumerate()
532            .filter(|(_idx, count)| **count != 0)
533            .map(|(idx, _count)| (idx as u8) as char)
534            .collect::<String>();
535
536        let expected_alphabet =
537            "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
538        assert_eq!(&actual_alphabet, expected_alphabet);
539        assert_eq!(actual_alphabet.len(), 10 + 26 + 26);
540    }
541
542    #[test]
543    fn test_gen_alphanum_bytes() {
544        proptest!(|(mut rng: FastRng)| {
545            let alphanum = rng.gen_alphanum_bytes::<16>();
546            let alphanum_str = std::str::from_utf8(alphanum.as_slice()).unwrap();
547            prop_assert!(alphanum_str.chars().all(|c| c.is_ascii_alphanumeric()));
548        });
549    }
550
551    #[test]
552    fn test_gen_f32_and_f64() {
553        let mut rng = FastRng::from_u64(202603111712);
554        for _ in 0..1000 {
555            assert!((0.0..1.0).contains(&rng.gen_f32()));
556            assert!((0.0..1.0).contains(&rng.gen_f64()));
557        }
558    }
559}