Skip to main content

dandelion/
lib.rs

1#![doc = include_str!("../README.md")]
2#![no_std]
3
4#[cfg(feature = "std")]
5extern crate std;
6
7use core::fmt::Debug;
8use core::fmt::Formatter;
9use core::fmt;
10use core::hint::cold_path;
11use core::hint::select_unpredictable;
12use core::mem::MaybeUninit;
13use core::num::NonZeroU128;
14use core::num::NonZeroU32;
15use core::num::NonZeroU64;
16
17/// A high performance non-cryptographic random number generator.
18#[derive(Clone)]
19pub struct Rng { state: NonZeroU128 }
20
21#[inline(always)]
22const fn concat(x: u64, y: u64) -> u128 {
23  x as u128 ^ (y as u128) << 64
24}
25
26#[inline(always)]
27const fn lo(x: u128) -> u64 {
28  x as u64
29}
30
31#[inline(always)]
32const fn hi(x: u128) -> u64 {
33  (x >> 64) as u64
34}
35
36#[inline(always)]
37const fn asr(x: u64, a: usize) -> u64 {
38  ((x as i64) >> a) as u64
39}
40
41#[inline(always)]
42const fn lsl(x: u64, a: usize) -> u64 {
43  x << a
44}
45
46#[inline(always)]
47const fn mulhi(x: u64, y: u64) -> u64 {
48  ((x as u128 * y as u128) >> 64) as u64
49}
50
51impl Rng {
52  /// Creates a random number generator with an initial state derived by
53  /// hashing the given seed.
54  pub const fn new(seed: NonZeroU128) -> Self {
55    // The hash uses the multiplier
56    //
57    //   M = round_nearest_odd(EULER_MASCHERONI * 2¹²⁸)
58    //
59    // The Euler-Mascheroni constant was selected because it is a well-known
60    // number in the range (0.5, 1.0).
61    const M: u128 = 0x93c4_67e3_7db0_c7a4_d1be_3f81_0152_cb57;
62    let x = seed.get();
63    let x = x.wrapping_mul(M);
64    let x = x.swap_bytes();
65    let x = x.wrapping_mul(M);
66    let x = x.swap_bytes();
67    let x = x.wrapping_mul(M);
68    let s = unsafe { NonZeroU128::new_unchecked(x) };
69    Self { state: s }
70  }
71
72  /// Creates a random number generator with an initial state derived by
73  /// hashing the given `u64` seed.
74  pub const fn from_u64(seed: u64) -> Self {
75    let s = concat(seed, 1);
76    let s = unsafe { NonZeroU128::new_unchecked(s) };
77    Self::new(s)
78  }
79
80  /// Retrieves the current state of the random number generator.
81  #[inline(always)]
82  pub const fn state(&self) -> NonZeroU128 {
83    self.state
84  }
85
86  /// Creates a random number generator with a particular initial state.
87  ///
88  /// <div class="warning">
89  ///
90  /// If you want to deterministically initialize a generator from a small
91  /// integer or other weak seed, you should *NOT* use this function and should
92  /// instead use [Rng::new] or [Rng::from_u64] which hash their arguments.
93  ///
94  /// </div>
95  #[inline(always)]
96  pub const fn from_state(state: NonZeroU128) -> Self {
97    Self { state }
98  }
99
100  /// Creates a random number generator with a random seed retrieved from the
101  /// operating system.
102  #[cfg(feature = "getrandom")]
103  #[inline(never)]
104  #[cold]
105  pub fn from_operating_system() -> Self {
106    let mut buf = [0; 16];
107    getrandom::fill(&mut buf).expect("getrandom::fill failed!");
108    let s = 1 | u128::from_le_bytes(buf);
109    let s = unsafe { NonZeroU128::new_unchecked(s) };
110    Self { state: s }
111  }
112
113  /// Samples a `bool` from the Bernoulli distribution where `true` appears
114  /// with probability approximately equal to `p`.
115  ///
116  /// A probability `p` <= 0 or NaN is treated as 0, and a probability `p` >= 1
117  /// is treated as 1.
118  #[inline(always)]
119  pub fn bernoulli(&mut self, p: f64) -> bool {
120    // The specification of the float-to-integer cast in Rust is such that
121    // - rounding is toward zero,
122    // - NaN produces zero, and
123    // - out of range values saturate.
124    //
125    // Also, note that `n == u64::MAX` if and only if `p >= 1`, so we use that
126    // as a sentinal value.
127    //
128    // On aarch64 we get a nice sequence:
129    //
130    //   fcvtzu x8, d0, #64
131    //   ...
132    //   cmn x8, #1
133    //   ccmp x9, x8, #0, ne
134    //   cset w0, lo
135    //
136    // However on x86-64, the saturating cast is currently compiled a bit
137    // inefficiently by rustc + llvm, with an unnecessary check of the upper
138    // bound, as vcvttsd2ust should already produce u64::MAX for any out of
139    // range value.
140    let n = (p * f64::from_bits(0x43f0_0000_0000_0000)) as u64;
141    let x = self.u64();
142    x < n || n == u64::MAX
143  }
144
145  /// Samples a `bool` from the uniform distribution.
146  #[inline(always)]
147  pub fn bool(&mut self) -> bool {
148    self.i64() < 0
149  }
150
151  /// Samples a `i32` from the uniform distribution.
152  #[inline(always)]
153  pub fn i32(&mut self) -> i32 {
154    self.u64() as i32
155  }
156
157  /// Samples a `i64` from the uniform distribution.
158  #[inline(always)]
159  pub fn i64(&mut self) -> i64 {
160    self.u64() as i64
161  }
162
163  /// Samples a `i128` from the uniform distribution.
164  #[inline(always)]
165  pub fn i128(&mut self) -> i128 {
166    self.u128() as i128
167  }
168
169  /// Samples a `u32` from the uniform distribution.
170  #[inline(always)]
171  pub fn u32(&mut self) -> u32 {
172    self.u64() as u32
173  }
174
175  /// Samples a `u64` from the uniform distribution.
176  #[inline(always)]
177  pub fn u64(&mut self) -> u64 {
178    // This is the core generator.
179    let s = self.state.get();
180    let x = lo(s);
181    let y = hi(s);
182    let s = concat(y ^ asr(x, 4), x ^ lsl(y, 7));
183    let s = unsafe { NonZeroU128::new_unchecked(s) };
184    self.state = s;
185    y.wrapping_add(x.wrapping_mul(x)) ^ mulhi(x, x)
186  }
187
188  /// Samples a `u128` from the uniform distribution.
189  #[inline(always)]
190  pub fn u128(&mut self) -> u128 {
191    let x = self.u64();
192    let y = self.u64();
193    concat(x, y)
194  }
195
196  /// Samples a `NonZeroU32` from the uniform distribution.
197  #[inline(always)]
198  pub fn non_zero_u32(&mut self) -> NonZeroU32 {
199    loop {
200      if let Some(x) = NonZeroU32::new(self.u32()) {
201        break x
202      }
203    }
204  }
205
206  /// Samples a `NonZeroU64` from the uniform distribution.
207  #[inline(always)]
208  pub fn non_zero_u64(&mut self) -> NonZeroU64 {
209    loop {
210      if let Some(x) = NonZeroU64::new(self.u64()) {
211        break x
212      }
213    }
214  }
215
216  /// Samples a `NonZeroU128` from the uniform distribution.
217  #[inline(always)]
218  pub fn non_zero_u128(&mut self) -> NonZeroU128 {
219    loop {
220      if let Some(x) = NonZeroU128::new(self.u128()) {
221        break x
222      }
223    }
224  }
225
226  /// Samples a `u32` from the uniform distribution over the range `0 ... n`.
227  ///
228  /// The upper bound is inclusive.
229  #[inline(always)]
230  pub fn bounded_u32(&mut self, n: u32) -> u32 {
231    // Cf. `bounded_u64`.
232    let x = self.u64();
233    let n = n as u64;
234    let m = n + 1;
235    let a = mulhi(x, m);
236    let b = x.wrapping_mul(m);
237    if b.overflowing_add(n).1 {
238      debug_assert!(m != 0);
239      let mut r = b;
240      loop {
241        let y = self.u64();
242        let c = mulhi(y, m);
243        let d = y.wrapping_mul(m);
244        let s = r.overflowing_add(c);
245        let w = s.0;
246        let p = s.1;
247        if w != u64::MAX { break (a + p as u64) as u32 }
248        r = d;
249        cold_path();
250      }
251    } else {
252      a as u32
253    }
254  }
255
256  /// Samples a `u64` from the uniform distribution over the range `0 ... n`.
257  ///
258  /// The upper bound is inclusive.
259  #[inline(always)]
260  pub fn bounded_u64(&mut self, n: u64) -> u64 {
261    let x = self.u64();
262    let m = n.wrapping_add(1);
263    let a = mulhi(x, m);
264    let b = x.wrapping_mul(m);
265    let u = select_unpredictable(m == 0, x, a);
266    if b.overflowing_add(n).1 {
267      debug_assert!(m != 0);
268      let mut r = b;
269      loop {
270        let y = self.u64();
271        let c = mulhi(y, m);
272        let d = y.wrapping_mul(m);
273        let s = r.overflowing_add(c);
274        let w = s.0;
275        let p = s.1;
276        if w != u64::MAX { break a + p as u64 }
277        // NB: We get here with negligible probability, but we include the loop
278        // anyway to prevent the compiler from doing the pessimization of
279        // if-converting the control flow away.
280        r = d;
281        cold_path();
282      }
283    } else {
284      u
285    }
286  }
287
288  /// Samples a `usize` from the uniform distribution over the range `0 ... n`.
289  ///
290  /// The upper bound is inclusive.
291  #[inline(always)]
292  pub fn bounded_usize(&mut self, n: usize) -> usize {
293    match const { usize::BITS } {
294      32 => self.bounded_u32(n as u32) as usize,
295      64 => self.bounded_u64(n as u64) as usize,
296      _ => unimplemented!(),
297    }
298  }
299
300  /// Samples a `i32` from the uniform distribution over the range `a ... b`.
301  ///
302  /// The lower and upper bounds are inclusive, and the range can wrap around
303  /// from `i32::MAX` to `i32::MIN`.
304  #[inline(always)]
305  pub fn range_i32(&mut self, a: i32, b: i32) -> i32 {
306    self.range_u32(a as u32, b as u32) as i32
307  }
308
309  /// Samples a `i64` from the uniform distribution over the range `a ... b`.
310  ///
311  /// The lower and upper bounds are inclusive, and the range can wrap around
312  /// from `i64::MAX` to `i64::MIN`.
313  #[inline(always)]
314  pub fn range_i64(&mut self, a: i64, b: i64) -> i64 {
315    self.range_u64(a as u64, b as u64) as i64
316  }
317
318  /// Samples a `isize` from the uniform distribution over the range `a ... b`.
319  ///
320  /// The lower and upper bounds are inclusive, and the range can wrap around
321  /// from `isize::MAX` to `isize::MIN`.
322  #[inline(always)]
323  pub fn range_isize(&mut self, a: isize, b: isize) -> isize {
324    self.range_usize(a as usize, b as usize) as isize
325  }
326
327  /// Samples a `u32` from the uniform distribution over the range `a ... b`.
328  ///
329  /// The lower and upper bounds are inclusive, and the range can wrap around
330  /// from `u32::MAX` to `u32::MIN`.
331  #[inline(always)]
332  pub fn range_u32(&mut self, a: u32, b: u32) -> u32 {
333    a.wrapping_add(self.bounded_u32(b.wrapping_sub(a)))
334  }
335
336  /// Samples a `u64` from the uniform distribution over the range `a ... b`.
337  ///
338  /// The lower and upper bounds are inclusive, and the range can wrap around
339  /// from `u64::MAX` to `u64::MIN`.
340  #[inline(always)]
341  pub fn range_u64(&mut self, a: u64, b: u64) -> u64 {
342    a.wrapping_add(self.bounded_u64(b.wrapping_sub(a)))
343  }
344
345  /// Samples a `usize` from the uniform distribution over the range `a ... b`.
346  ///
347  /// The lower and upper bounds are inclusive, and the range can wrap around
348  /// from `usize::MAX` to `usize::MIN`.
349  #[inline(always)]
350  pub fn range_usize(&mut self, a: usize, b: usize) -> usize {
351    a.wrapping_add(self.bounded_usize(b.wrapping_sub(a)))
352  }
353
354  /// Samples a `f32` from a distribution that approximates the uniform
355  /// distribution over the real interval [0, 1].
356  ///
357  /// The distribution is the same as the one produced by the following
358  /// procedure:
359  ///
360  /// - Sample a real number from the uniform distribution on [0, 1].
361  /// - Round to the nearest multiple of 2⁻⁶³.
362  /// - Round to a `f32` using the default rounding mode.
363  ///
364  /// Every output, including zero, has a positive sign bit.
365  #[inline(always)]
366  pub fn f32(&mut self) -> f32 {
367    let x = self.i64();
368    let x = f32::from_bits(0x2000_0000) * (x as f32);
369    x.abs()
370  }
371
372  /// Samples a `f64` from a distribution that approximates the uniform
373  /// distribution over the real interval [0, 1].
374  ///
375  /// The distribution is the same as the one produced by the following
376  /// procedure:
377  ///
378  /// - Sample a real number from the uniform distribution on [0, 1].
379  /// - Round to the nearest multiple of 2⁻⁶³.
380  /// - Round to a `f64` using the default rounding mode.
381  ///
382  /// Every output, including zero, has a positive sign bit.
383  #[inline(always)]
384  pub fn f64(&mut self) -> f64 {
385    // The conversion into a `f64` is two instructions on aarch64:
386    //
387    //   scvtf d0, x8, #63
388    //   fabs d0, d0
389    //
390    let x = self.i64();
391    let x = f64::from_bits(0x3c00_0000_0000_0000) * (x as f64);
392    x.abs()
393  }
394
395  /// Samples a `f32` from a distribution that approximates the uniform
396  /// distribution over the real interval [-1, 1].
397  ///
398  /// The distribution is the same as the one produced by the following
399  /// procedure:
400  ///
401  /// - Sample a real number from the uniform distribution on [-1, 1].
402  /// - Round to the nearest multiple of 2⁻⁶².
403  /// - Round to a `f32` using the default rounding mode.
404  #[inline(always)]
405  pub fn biunit_f32(&mut self) -> f32 {
406    let x = self.i64();
407    let x = (x & 1) + (x >> 1);
408    f32::from_bits(0x2080_0000) * (x as f32)
409  }
410
411  /// Samples a `f64` from a distribution that approximates the uniform
412  /// distribution over the real interval [-1, 1].
413  ///
414  /// The distribution is the same as the one produced by the following
415  /// procedure:
416  ///
417  /// - Sample a real number from the uniform distribution on [-1, 1].
418  /// - Round to the nearest multiple of 2⁻⁶².
419  /// - Round to a `f64` using the default rounding mode.
420  #[inline(always)]
421  pub fn biunit_f64(&mut self) -> f64 {
422    // The conversion into a `f64` is three instructions on aarch64:
423    //
424    //   and x1, x0, #0x1
425    //   add x2, x1, x0, asr #1
426    //   scvtf d0, x2, #62
427    //
428    let x = self.i64();
429    let x = (x & 1) + (x >> 1);
430    f64::from_bits(0x3c10_0000_0000_0000) * (x as f64)
431  }
432
433  #[inline(always)]
434  unsafe fn fill_unchecked_inlined(&mut self, dst: *mut u8, len: usize) {
435    let mut p = dst;
436    let mut n = len;
437    if n == 0 { return }
438    while n >= 17 {
439      let x = self.u64().to_le_bytes();
440      let y = self.u64().to_le_bytes();
441      unsafe { p.copy_from_nonoverlapping(&raw const x as _, 8) };
442      p = unsafe { p.add(8) };
443      n = n - 8;
444      unsafe { p.copy_from_nonoverlapping(&raw const y as _, 8) };
445      p = unsafe { p.add(8) };
446      n = n - 8;
447    }
448    if n >= 9 {
449      let x = self.u64().to_le_bytes();
450      unsafe { p.copy_from_nonoverlapping(&raw const x as _, 8) };
451      p = unsafe { p.add(8) };
452      n = n - 8;
453    }
454    let x = self.u64().to_le_bytes();
455    match n {
456      1 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 1) },
457      2 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 2) },
458      3 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 3) },
459      4 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 4) },
460      5 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 5) },
461      6 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 6) },
462      7 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 7) },
463      8 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 8) },
464      _ => unreachable!(),
465    }
466  }
467
468  /// Fills the provided buffer with independent uniformly distributed bytes.
469  pub fn fill(&mut self, dst: &mut [u8]) {
470    let n = dst.len();
471    unsafe { self.fill_unchecked(&raw mut *dst as _, n) };
472  }
473
474  /// Fills the provided buffer with independent uniformly distributed bytes.
475  /// The caller must guarantee that it is valid to write those bytes.
476  pub unsafe fn fill_unchecked(&mut self, dst: *mut u8, len: usize) {
477    unsafe { self.fill_unchecked_inlined(dst, len) };
478  }
479
480  /// Fills the provided buffer with independent uniformly distributed bytes,
481  /// returning a reference to the initialized buffer. The returned buffer is a
482  /// reference the same memory and has the same length as the input buffer.
483  pub fn fill_uninit<'a>(&mut self, dst: &'a mut [MaybeUninit<u8>]) -> &'a mut [u8] {
484    let n = dst.len();
485    unsafe { self.fill_unchecked(&raw mut *dst as _, n) };
486    unsafe { dst.assume_init_mut() }
487  }
488
489  /// Samples an array of independent uniformly distributed bytes.
490  pub fn byte_array<const N: usize>(&mut self) -> [u8; N] {
491    let mut buf = [0; N];
492    unsafe { self.fill_unchecked_inlined(&raw mut buf as _, N) };
493    buf
494  }
495
496  /// Shuffles a mutable slice in place with a random permutation.
497  pub fn shuffle<T>(&mut self, slice: &mut [T]) {
498    let n = slice.len();
499    if n >= 2 {
500      let p = &raw mut *slice as *mut T;
501      for i in 1 .. n {
502        let j = self.bounded_usize(i);
503        unsafe { p.add(i).swap(p.add(j)) };
504      }
505    }
506  }
507}
508
509impl Debug for Rng {
510  fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
511    f.debug_struct("Rng").finish_non_exhaustive()
512  }
513}
514
515#[cfg(feature = "rand_core")]
516impl rand_core::TryRng for Rng {
517  type Error = rand_core::Infallible;
518
519  #[inline(always)]
520  fn try_next_u32(&mut self) -> Result<u32, Self::Error> {
521    Ok(self.u32())
522  }
523
524  #[inline(always)]
525  fn try_next_u64(&mut self) -> Result<u64, Self::Error> {
526    Ok(self.u64())
527  }
528
529  fn try_fill_bytes(&mut self, dst: &mut [u8]) -> Result<(), Self::Error> {
530    self.fill(dst);
531    Ok(())
532  }
533}
534
535#[cfg(feature = "rand_core")]
536impl rand_core::SeedableRng for Rng {
537  type Seed = [u8; 16];
538
539  fn from_seed(seed: Self::Seed) -> Self {
540    let s = 1 | u128::from_le_bytes(seed);
541    let s = unsafe { NonZeroU128::new_unchecked(s) };
542    Self { state: s }
543  }
544
545  fn seed_from_u64(seed: u64) -> Self {
546    Self::from_u64(seed)
547  }
548
549  fn from_rng<T: rand_core::Rng + ?Sized>(g: &mut T) -> Self {
550    let x = g.next_u64();
551    let y = g.next_u64();
552    let s = 1 | concat(x, y);
553    let s = unsafe { NonZeroU128::new_unchecked(s) };
554    Self { state: s }
555  }
556
557  fn try_from_rng<T: rand_core::TryRng + ?Sized>(g: &mut T) -> Result<Self, T::Error> {
558    let x = g.try_next_u64()?;
559    let y = g.try_next_u64()?;
560    let s = 1 | concat(x, y);
561    let s = unsafe { NonZeroU128::new_unchecked(s) };
562    Ok(Self { state: s })
563  }
564
565  // We keep the default implementations of `fork` and `try_fork`.
566}
567
568#[cfg(feature = "thread_local")]
569pub mod thread_local {
570  //! Access a thread-local random number generator.
571
572  use std::cell::Cell;
573  use std::mem::MaybeUninit;
574  use std::num::NonZeroU128;
575  use std::num::NonZeroU32;
576  use std::num::NonZeroU64;
577  use std::thread_local;
578  use super::Rng;
579
580  thread_local! {
581    static RNG: Cell<Option<NonZeroU128>> = const {
582      Cell::new(None)
583    };
584  }
585
586  // This function, while safe, is NOT logically re-entrant, so we should not
587  // expose it publicly.
588  #[inline(always)]
589  fn with<T>(f: impl FnOnce(&mut Rng) -> T) -> T {
590    RNG.with(|state| {
591      let mut g =
592        match state.get() {
593          None => Rng::from_operating_system(),
594          Some(s) => Rng::from_state(s),
595        };
596      let x = f(&mut g);
597      state.set(Some(g.state()));
598      x
599    })
600  }
601
602  /// See [Rng::bernoulli].
603  pub fn bernoulli(p: f64) -> bool {
604    with(|g| g.bernoulli(p))
605  }
606
607  /// See [Rng::bool].
608  pub fn bool() -> bool {
609    with(|g| g.bool())
610  }
611
612  /// See [Rng::i32].
613  pub fn i32() -> i32 {
614    with(|g| g.i32())
615  }
616
617  /// See [Rng::i64].
618  pub fn i64() -> i64 {
619    with(|g| g.i64())
620  }
621
622  /// See [Rng::i128].
623  pub fn i128() -> i128 {
624    with(|g| g.i128())
625  }
626
627  /// See [Rng::u32].
628  pub fn u32() -> u32 {
629    with(|g| g.u32())
630  }
631
632  /// See [Rng::u64].
633  pub fn u64() -> u64 {
634    with(|g| g.u64())
635  }
636
637  /// See [Rng::u128].
638  pub fn u128() -> u128 {
639    with(|g| g.u128())
640  }
641
642  /// See [Rng::non_zero_u32].
643  pub fn non_zero_u32() -> NonZeroU32 {
644    with(|g| g.non_zero_u32())
645  }
646
647  /// See [Rng::non_zero_u64].
648  pub fn non_zero_u64() -> NonZeroU64 {
649    with(|g| g.non_zero_u64())
650  }
651
652  /// See [Rng::non_zero_u128].
653  pub fn non_zero_u128() -> NonZeroU128 {
654    with(|g| g.non_zero_u128())
655  }
656  /// See [Rng::bounded_u32].
657  pub fn bounded_u32(n: u32) -> u32 {
658    with(|g| g.bounded_u32(n))
659  }
660
661  /// See [Rng::bounded_u64].
662  pub fn bounded_u64(n: u64) -> u64 {
663    with(|g| g.bounded_u64(n))
664  }
665
666  /// See [Rng::bounded_usize].
667  pub fn bounded_usize(n: usize) -> usize {
668    with(|g| g.bounded_usize(n))
669  }
670
671  /// See [Rng::range_i32].
672  pub fn range_i32(lo: i32, hi: i32) -> i32 {
673    with(|g| g.range_i32(lo, hi))
674  }
675
676  /// See [Rng::range_i64].
677  pub fn range_i64(lo: i64, hi: i64) -> i64 {
678    with(|g| g.range_i64(lo, hi))
679  }
680
681  /// See [Rng::range_isize].
682  pub fn range_isize(lo: isize, hi: isize) -> isize {
683    with(|g| g.range_isize(lo, hi))
684  }
685
686  /// See [Rng::range_u32].
687  pub fn range_u32(lo: u32, hi: u32) -> u32 {
688    with(|g| g.range_u32(lo, hi))
689  }
690
691  /// See [Rng::range_u64].
692  pub fn range_u64(lo: u64, hi: u64) -> u64 {
693    with(|g| g.range_u64(lo, hi))
694  }
695
696  /// See [Rng::range_usize].
697  pub fn range_usize(lo: usize, hi: usize) -> usize {
698    with(|g| g.range_usize(lo, hi))
699  }
700
701  /// See [Rng::f32].
702  pub fn f32() -> f32 {
703    with(|g| g.f32())
704  }
705
706  /// See [Rng::f64].
707  pub fn f64() -> f64 {
708    with(|g| g.f64())
709  }
710
711  /// See [Rng::biunit_f32].
712  pub fn biunit_f32() -> f32 {
713    with(|g| g.biunit_f32())
714  }
715
716  /// See [Rng::biunit_f64].
717  pub fn biunit_f64() -> f64 {
718    with(|g| g.biunit_f64())
719  }
720
721  /// See [Rng::fill].
722  pub fn fill(dst: &mut [u8]) {
723    with(|g| g.fill(dst))
724  }
725
726  /// See [Rng::fill_unchecked].
727  pub unsafe fn fill_unchecked(dst: *mut u8, len: usize) {
728    with(|g| unsafe { g.fill_unchecked(dst, len) })
729  }
730
731  /// See [Rng::fill_uninit].
732  pub fn fill_uninit(dst: &mut [MaybeUninit<u8>]) -> &mut [u8] {
733    with(|g| g.fill_uninit(dst))
734  }
735
736  /// See [Rng::byte_array].
737  pub fn byte_array<const N: usize>() -> [u8; N] {
738    with(|g| g.byte_array())
739  }
740}