Skip to main content

dandelion/
lib.rs

1#![doc = include_str!("../README.md")]
2#![no_std]
3
4#[cfg(feature = "std")]
5extern crate std;
6
7use core::fmt::Debug;
8use core::fmt::Formatter;
9use core::fmt;
10use core::hint::cold_path;
11use core::hint::select_unpredictable;
12use core::mem::MaybeUninit;
13use core::num::NonZeroU128;
14use core::num::NonZeroU32;
15use core::num::NonZeroU64;
16
17/// A high performance non-cryptographic random number generator.
18#[derive(Clone)]
19pub struct Rng { state: NonZeroU128 }
20
21#[inline(always)]
22const fn concat(x: u64, y: u64) -> u128 {
23  x as u128 ^ (y as u128) << 64
24}
25
26#[inline(always)]
27const fn lo(x: u128) -> u64 {
28  x as u64
29}
30
31#[inline(always)]
32const fn hi(x: u128) -> u64 {
33  (x >> 64) as u64
34}
35
36#[inline(always)]
37const fn asr(x: u64, a: usize) -> u64 {
38  ((x as i64) >> a) as u64
39}
40
41#[inline(always)]
42const fn lsl(x: u64, a: usize) -> u64 {
43  x << a
44}
45
46#[inline(always)]
47const fn mulhi(x: u64, y: u64) -> u64 {
48  ((x as u128 * y as u128) >> 64) as u64
49}
50
51impl Rng {
52  /// Creates a random number generator with an initial state derived by
53  /// hashing the given seed.
54  pub const fn new(seed: NonZeroU128) -> Self {
55    // The hash uses the multiplier
56    //
57    //   M = round_nearest_odd(EULER_MASCHERONI * 2¹²⁸)
58    //
59    // The Euler-Mascheroni constant was selected because it is a well-known
60    // number in the range (0.5, 1.0).
61    const M: u128 = 0x93c4_67e3_7db0_c7a4_d1be_3f81_0152_cb57;
62    let x = seed.get();
63    let x = x.wrapping_mul(M);
64    let x = x.swap_bytes();
65    let x = x.wrapping_mul(M);
66    let x = x.swap_bytes();
67    let x = x.wrapping_mul(M);
68    let s = unsafe { NonZeroU128::new_unchecked(x) };
69    Self { state: s }
70  }
71
72  /// Creates a random number generator with an initial state derived by
73  /// hashing the given `u64` seed.
74  pub const fn from_u64(seed: u64) -> Self {
75    let s = concat(seed, 1);
76    let s = unsafe { NonZeroU128::new_unchecked(s) };
77    Self::new(s)
78  }
79
80  /// Retrieves the current state of the random number generator.
81  #[inline(always)]
82  pub const fn state(&self) -> NonZeroU128 {
83    self.state
84  }
85
86  /// Creates a random number generator with a particular initial state.
87  ///
88  /// <div class="warning">
89  ///
90  /// If you want to deterministically initialize a generator from a small
91  /// integer or other weak seed, you should *NOT* use this function and should
92  /// instead use [Rng::new] or [Rng::from_u64] which hash their arguments.
93  ///
94  /// </div>
95  #[inline(always)]
96  pub const fn from_state(state: NonZeroU128) -> Self {
97    Self { state }
98  }
99
100  /// Creates a random number generator with a random seed retrieved from the
101  /// operating system.
102  #[cfg(feature = "getrandom")]
103  #[inline(never)]
104  #[cold]
105  pub fn from_operating_system() -> Self {
106    let mut buf = [0; 16];
107    getrandom::fill(&mut buf).expect("getrandom::fill failed!");
108    let s = 1 | u128::from_le_bytes(buf);
109    let s = unsafe { NonZeroU128::new_unchecked(s) };
110    Self { state: s }
111  }
112
113  /// Samples a `bool` from the Bernoulli distribution where `true` appears
114  /// with probability approximately equal to `p`.
115  ///
116  /// A probability `p` <= 0 or NaN is treated as 0, and a probability `p` >= 1
117  /// is treated as 1.
118  #[inline(always)]
119  pub fn bernoulli(&mut self, p: f64) -> bool {
120    // The specification of the float-to-integer cast in Rust is such that
121    // - rounding is toward zero,
122    // - NaN produces zero, and
123    // - out of range values saturate.
124    //
125    // Also, note that `n == u64::MAX` if and only if `p >= 1`, so we use that
126    // as a sentinal value.
127    //
128    // On aarch64 we get a nice sequence:
129    //
130    //   fcvtzu x8, d0, #64
131    //   ...
132    //   cmn x8, #1
133    //   ccmp x9, x8, #0, ne
134    //   cset w0, lo
135    //
136    // On x86-64, the bounds checks against 0.0 and 2.0 ** 64 are performed
137    // directly.
138    let n = (p * f64::from_bits(0x43f0_0000_0000_0000)) as u64;
139    let x = self.u64();
140    x < n || n == u64::MAX
141  }
142
143  /// Samples a `bool` from the uniform distribution.
144  #[inline(always)]
145  pub fn bool(&mut self) -> bool {
146    self.i64() < 0
147  }
148
149  /// Samples a `i32` from the uniform distribution.
150  #[inline(always)]
151  pub fn i32(&mut self) -> i32 {
152    self.u64() as i32
153  }
154
155  /// Samples a `i64` from the uniform distribution.
156  #[inline(always)]
157  pub fn i64(&mut self) -> i64 {
158    self.u64() as i64
159  }
160
161  /// Samples a `i128` from the uniform distribution.
162  #[inline(always)]
163  pub fn i128(&mut self) -> i128 {
164    self.u128() as i128
165  }
166
167  /// Samples a `u32` from the uniform distribution.
168  #[inline(always)]
169  pub fn u32(&mut self) -> u32 {
170    self.u64() as u32
171  }
172
173  /// Samples a `u64` from the uniform distribution.
174  #[inline(always)]
175  pub fn u64(&mut self) -> u64 {
176    // This is the core generator.
177    let s = self.state.get();
178    let x = lo(s);
179    let y = hi(s);
180    let s = concat(y ^ asr(x, 4), x ^ lsl(y, 7));
181    let s = unsafe { NonZeroU128::new_unchecked(s) };
182    self.state = s;
183    y.wrapping_add(x.wrapping_mul(x)) ^ mulhi(x, x)
184  }
185
186  /// Samples a `u128` from the uniform distribution.
187  #[inline(always)]
188  pub fn u128(&mut self) -> u128 {
189    let x = self.u64();
190    let y = self.u64();
191    concat(x, y)
192  }
193
194  /// Samples a `NonZeroU32` from the uniform distribution.
195  #[inline(always)]
196  pub fn non_zero_u32(&mut self) -> NonZeroU32 {
197    loop {
198      if let Some(x) = NonZeroU32::new(self.u32()) {
199        break x
200      }
201    }
202  }
203
204  /// Samples a `NonZeroU64` from the uniform distribution.
205  #[inline(always)]
206  pub fn non_zero_u64(&mut self) -> NonZeroU64 {
207    loop {
208      if let Some(x) = NonZeroU64::new(self.u64()) {
209        break x
210      }
211    }
212  }
213
214  /// Samples a `NonZeroU128` from the uniform distribution.
215  #[inline(always)]
216  pub fn non_zero_u128(&mut self) -> NonZeroU128 {
217    loop {
218      if let Some(x) = NonZeroU128::new(self.u128()) {
219        break x
220      }
221    }
222  }
223
224  /// Samples a `u32` from the uniform distribution over the range `0 ... n`.
225  ///
226  /// The upper bound is inclusive.
227  #[inline(always)]
228  pub fn bounded_u32(&mut self, n: u32) -> u32 {
229    // Cf. `bounded_u64`.
230    let x = self.u64();
231    let n = n as u64;
232    let m = n + 1;
233    let a = mulhi(x, m);
234    let b = x.wrapping_mul(m);
235    if b.overflowing_add(n).1 {
236      debug_assert!(m != 0);
237      let mut r = b;
238      loop {
239        let y = self.u64();
240        let c = mulhi(y, m);
241        let d = y.wrapping_mul(m);
242        let s = r.overflowing_add(c);
243        let w = s.0;
244        let p = s.1;
245        if w != u64::MAX { break (a + p as u64) as u32 }
246        r = d;
247        cold_path();
248      }
249    } else {
250      a as u32
251    }
252  }
253
254  /// Samples a `u64` from the uniform distribution over the range `0 ... n`.
255  ///
256  /// The upper bound is inclusive.
257  #[inline(always)]
258  pub fn bounded_u64(&mut self, n: u64) -> u64 {
259    let x = self.u64();
260    let m = n.wrapping_add(1);
261    let a = mulhi(x, m);
262    let b = x.wrapping_mul(m);
263    let u = select_unpredictable(m == 0, x, a);
264    if b.overflowing_add(n).1 {
265      debug_assert!(m != 0);
266      let mut r = b;
267      loop {
268        let y = self.u64();
269        let c = mulhi(y, m);
270        let d = y.wrapping_mul(m);
271        let s = r.overflowing_add(c);
272        let w = s.0;
273        let p = s.1;
274        if w != u64::MAX { break a + p as u64 }
275        // NB: We get here with negligible probability, but we include the loop
276        // anyway to prevent the compiler from doing the pessimization of
277        // if-converting the control flow away.
278        r = d;
279        cold_path();
280      }
281    } else {
282      u
283    }
284  }
285
286  /// Samples a `usize` from the uniform distribution over the range `0 ... n`.
287  ///
288  /// The upper bound is inclusive.
289  #[inline(always)]
290  pub fn bounded_usize(&mut self, n: usize) -> usize {
291    match const { usize::BITS } {
292      32 => self.bounded_u32(n as u32) as usize,
293      64 => self.bounded_u64(n as u64) as usize,
294      _ => unimplemented!(),
295    }
296  }
297
298  /// Samples a `i32` from the uniform distribution over the range `a ... b`.
299  ///
300  /// The lower and upper bounds are inclusive, and the range can wrap around
301  /// from `i32::MAX` to `i32::MIN`.
302  #[inline(always)]
303  pub fn range_i32(&mut self, a: i32, b: i32) -> i32 {
304    self.range_u32(a as u32, b as u32) as i32
305  }
306
307  /// Samples a `i64` from the uniform distribution over the range `a ... b`.
308  ///
309  /// The lower and upper bounds are inclusive, and the range can wrap around
310  /// from `i64::MAX` to `i64::MIN`.
311  #[inline(always)]
312  pub fn range_i64(&mut self, a: i64, b: i64) -> i64 {
313    self.range_u64(a as u64, b as u64) as i64
314  }
315
316  /// Samples a `isize` from the uniform distribution over the range `a ... b`.
317  ///
318  /// The lower and upper bounds are inclusive, and the range can wrap around
319  /// from `isize::MAX` to `isize::MIN`.
320  #[inline(always)]
321  pub fn range_isize(&mut self, a: isize, b: isize) -> isize {
322    self.range_usize(a as usize, b as usize) as isize
323  }
324
325  /// Samples a `u32` from the uniform distribution over the range `a ... b`.
326  ///
327  /// The lower and upper bounds are inclusive, and the range can wrap around
328  /// from `u32::MAX` to `u32::MIN`.
329  #[inline(always)]
330  pub fn range_u32(&mut self, a: u32, b: u32) -> u32 {
331    a.wrapping_add(self.bounded_u32(b.wrapping_sub(a)))
332  }
333
334  /// Samples a `u64` from the uniform distribution over the range `a ... b`.
335  ///
336  /// The lower and upper bounds are inclusive, and the range can wrap around
337  /// from `u64::MAX` to `u64::MIN`.
338  #[inline(always)]
339  pub fn range_u64(&mut self, a: u64, b: u64) -> u64 {
340    a.wrapping_add(self.bounded_u64(b.wrapping_sub(a)))
341  }
342
343  /// Samples a `usize` from the uniform distribution over the range `a ... b`.
344  ///
345  /// The lower and upper bounds are inclusive, and the range can wrap around
346  /// from `usize::MAX` to `usize::MIN`.
347  #[inline(always)]
348  pub fn range_usize(&mut self, a: usize, b: usize) -> usize {
349    a.wrapping_add(self.bounded_usize(b.wrapping_sub(a)))
350  }
351
352  /// Samples a `f32` from a distribution that approximates the uniform
353  /// distribution over the real interval [0, 1].
354  ///
355  /// The distribution is the same as the one produced by the following
356  /// procedure:
357  ///
358  /// - Sample a real number from the uniform distribution on [0, 1].
359  /// - Round to the nearest multiple of 2⁻⁶³.
360  /// - Round to a `f32` using the default rounding mode.
361  ///
362  /// Every output, including zero, has a positive sign bit.
363  #[inline(always)]
364  pub fn f32(&mut self) -> f32 {
365    let x = self.i64();
366    let x = f32::from_bits(0x2000_0000) * (x as f32);
367    x.abs()
368  }
369
370  /// Samples a `f64` from a distribution that approximates the uniform
371  /// distribution over the real interval [0, 1].
372  ///
373  /// The distribution is the same as the one produced by the following
374  /// procedure:
375  ///
376  /// - Sample a real number from the uniform distribution on [0, 1].
377  /// - Round to the nearest multiple of 2⁻⁶³.
378  /// - Round to a `f64` using the default rounding mode.
379  ///
380  /// Every output, including zero, has a positive sign bit.
381  #[inline(always)]
382  pub fn f64(&mut self) -> f64 {
383    // The conversion into a `f64` is two instructions on aarch64:
384    //
385    //   scvtf d0, x8, #63
386    //   fabs d0, d0
387    //
388    let x = self.i64();
389    let x = f64::from_bits(0x3c00_0000_0000_0000) * (x as f64);
390    x.abs()
391  }
392
393  /// Samples a `f32` from a distribution that approximates the uniform
394  /// distribution over the real interval [-1, 1].
395  ///
396  /// The distribution is the same as the one produced by the following
397  /// procedure:
398  ///
399  /// - Sample a real number from the uniform distribution on [-1, 1].
400  /// - Round to the nearest multiple of 2⁻⁶².
401  /// - Round to a `f32` using the default rounding mode.
402  #[inline(always)]
403  pub fn biunit_f32(&mut self) -> f32 {
404    let x = self.i64();
405    let x = (x & 1) + (x >> 1);
406    f32::from_bits(0x2080_0000) * (x as f32)
407  }
408
409  /// Samples a `f64` from a distribution that approximates the uniform
410  /// distribution over the real interval [-1, 1].
411  ///
412  /// The distribution is the same as the one produced by the following
413  /// procedure:
414  ///
415  /// - Sample a real number from the uniform distribution on [-1, 1].
416  /// - Round to the nearest multiple of 2⁻⁶².
417  /// - Round to a `f64` using the default rounding mode.
418  #[inline(always)]
419  pub fn biunit_f64(&mut self) -> f64 {
420    // The conversion into a `f64` is three instructions on aarch64:
421    //
422    //   and x1, x0, #0x1
423    //   add x2, x1, x0, asr #1
424    //   scvtf d0, x2, #62
425    //
426    let x = self.i64();
427    let x = (x & 1) + (x >> 1);
428    f64::from_bits(0x3c10_0000_0000_0000) * (x as f64)
429  }
430
431  #[inline(always)]
432  unsafe fn fill_unchecked_inlined(&mut self, dst: *mut u8, len: usize) {
433    let mut p = dst;
434    let mut n = len;
435    if n == 0 { return }
436    while n >= 17 {
437      let x = self.u64().to_le_bytes();
438      let y = self.u64().to_le_bytes();
439      unsafe { p.copy_from_nonoverlapping(&raw const x as _, 8) };
440      p = unsafe { p.add(8) };
441      n = n - 8;
442      unsafe { p.copy_from_nonoverlapping(&raw const y as _, 8) };
443      p = unsafe { p.add(8) };
444      n = n - 8;
445    }
446    if n >= 9 {
447      let x = self.u64().to_le_bytes();
448      unsafe { p.copy_from_nonoverlapping(&raw const x as _, 8) };
449      p = unsafe { p.add(8) };
450      n = n - 8;
451    }
452    let x = self.u64().to_le_bytes();
453    match n {
454      1 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 1) },
455      2 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 2) },
456      3 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 3) },
457      4 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 4) },
458      5 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 5) },
459      6 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 6) },
460      7 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 7) },
461      8 => unsafe { p.copy_from_nonoverlapping(&raw const x as _, 8) },
462      _ => unreachable!(),
463    }
464  }
465
466  /// Fills the provided buffer with independent uniformly distributed bytes.
467  pub fn fill(&mut self, dst: &mut [u8]) {
468    let n = dst.len();
469    unsafe { self.fill_unchecked(&raw mut *dst as _, n) };
470  }
471
472  /// Fills the provided buffer with independent uniformly distributed bytes.
473  ///
474  /// # Safety
475  ///
476  /// It must be valid to write `len` arbitrary bytes at `dst`.
477  pub unsafe fn fill_unchecked(&mut self, dst: *mut u8, len: usize) {
478    unsafe { self.fill_unchecked_inlined(dst, len) };
479  }
480
481  /// Fills the provided buffer with independent uniformly distributed bytes,
482  /// returning a reference to the initialized buffer. The returned buffer is a
483  /// reference the same memory and has the same length as the input buffer.
484  pub fn fill_uninit<'a>(&mut self, dst: &'a mut [MaybeUninit<u8>]) -> &'a mut [u8] {
485    let n = dst.len();
486    unsafe { self.fill_unchecked(&raw mut *dst as _, n) };
487    unsafe { dst.assume_init_mut() }
488  }
489
490  /// Samples an array of independent uniformly distributed bytes.
491  pub fn byte_array<const N: usize>(&mut self) -> [u8; N] {
492    let mut buf = [0; N];
493    unsafe { self.fill_unchecked_inlined(&raw mut buf as _, N) };
494    buf
495  }
496
497  /// Shuffles a mutable slice in place with a random permutation.
498  pub fn shuffle<T>(&mut self, slice: &mut [T]) {
499    let n = slice.len();
500    if n >= 2 {
501      let p = &raw mut *slice as *mut T;
502      for i in 1 .. n {
503        let j = self.bounded_usize(i);
504        unsafe { p.add(i).swap(p.add(j)) };
505      }
506    }
507  }
508}
509
510impl Debug for Rng {
511  fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
512    f.debug_struct("Rng").finish_non_exhaustive()
513  }
514}
515
516#[cfg(feature = "rand_core")]
517impl rand_core::TryRng for Rng {
518  type Error = rand_core::Infallible;
519
520  #[inline(always)]
521  fn try_next_u32(&mut self) -> Result<u32, Self::Error> {
522    Ok(self.u32())
523  }
524
525  #[inline(always)]
526  fn try_next_u64(&mut self) -> Result<u64, Self::Error> {
527    Ok(self.u64())
528  }
529
530  fn try_fill_bytes(&mut self, dst: &mut [u8]) -> Result<(), Self::Error> {
531    self.fill(dst);
532    Ok(())
533  }
534}
535
536#[cfg(feature = "rand_core")]
537impl rand_core::SeedableRng for Rng {
538  type Seed = [u8; 16];
539
540  #[inline(always)]
541  fn from_seed(seed: Self::Seed) -> Self {
542    let s = 1 | u128::from_le_bytes(seed);
543    let s = unsafe { NonZeroU128::new_unchecked(s) };
544    Self::from_state(s)
545  }
546
547  fn seed_from_u64(seed: u64) -> Self {
548    Self::from_u64(seed)
549  }
550
551  fn from_rng<T: rand_core::Rng + ?Sized>(g: &mut T) -> Self {
552    let Ok(x) = Self::try_from_rng(g);
553    x
554  }
555
556  fn try_from_rng<T: rand_core::TryRng + ?Sized>(g: &mut T) -> Result<Self, T::Error> {
557    let x = g.try_next_u64()?;
558    let y = g.try_next_u64()?;
559    let s = 1 | concat(x, y);
560    let s = unsafe { NonZeroU128::new_unchecked(s) };
561    Ok(Self::from_state(s))
562  }
563
564  // We keep the default implementations of `fork` and `try_fork`.
565}
566
567#[cfg(feature = "thread_local")]
568pub mod thread_local {
569  //! Access a thread-local random number generator.
570
571  use std::cell::Cell;
572  use std::mem::MaybeUninit;
573  use std::num::NonZeroU128;
574  use std::num::NonZeroU32;
575  use std::num::NonZeroU64;
576  use std::thread_local;
577  use super::Rng;
578
579  thread_local! {
580    static RNG: Cell<Option<NonZeroU128>> = const {
581      Cell::new(None)
582    };
583  }
584
585  // This function, while safe, is NOT logically re-entrant, so we should not
586  // expose it publicly.
587  #[inline(always)]
588  fn with<T>(f: impl FnOnce(&mut Rng) -> T) -> T {
589    RNG.with(|state| {
590      let mut g =
591        match state.get() {
592          None => Rng::from_operating_system(),
593          Some(s) => Rng::from_state(s),
594        };
595      let x = f(&mut g);
596      state.set(Some(g.state()));
597      x
598    })
599  }
600
601  /// See [Rng::bernoulli].
602  pub fn bernoulli(p: f64) -> bool {
603    with(|g| g.bernoulli(p))
604  }
605
606  /// See [Rng::bool].
607  pub fn bool() -> bool {
608    with(|g| g.bool())
609  }
610
611  /// See [Rng::i32].
612  pub fn i32() -> i32 {
613    with(|g| g.i32())
614  }
615
616  /// See [Rng::i64].
617  pub fn i64() -> i64 {
618    with(|g| g.i64())
619  }
620
621  /// See [Rng::i128].
622  pub fn i128() -> i128 {
623    with(|g| g.i128())
624  }
625
626  /// See [Rng::u32].
627  pub fn u32() -> u32 {
628    with(|g| g.u32())
629  }
630
631  /// See [Rng::u64].
632  pub fn u64() -> u64 {
633    with(|g| g.u64())
634  }
635
636  /// See [Rng::u128].
637  pub fn u128() -> u128 {
638    with(|g| g.u128())
639  }
640
641  /// See [Rng::non_zero_u32].
642  pub fn non_zero_u32() -> NonZeroU32 {
643    with(|g| g.non_zero_u32())
644  }
645
646  /// See [Rng::non_zero_u64].
647  pub fn non_zero_u64() -> NonZeroU64 {
648    with(|g| g.non_zero_u64())
649  }
650
651  /// See [Rng::non_zero_u128].
652  pub fn non_zero_u128() -> NonZeroU128 {
653    with(|g| g.non_zero_u128())
654  }
655  /// See [Rng::bounded_u32].
656  pub fn bounded_u32(n: u32) -> u32 {
657    with(|g| g.bounded_u32(n))
658  }
659
660  /// See [Rng::bounded_u64].
661  pub fn bounded_u64(n: u64) -> u64 {
662    with(|g| g.bounded_u64(n))
663  }
664
665  /// See [Rng::bounded_usize].
666  pub fn bounded_usize(n: usize) -> usize {
667    with(|g| g.bounded_usize(n))
668  }
669
670  /// See [Rng::range_i32].
671  pub fn range_i32(a: i32, b: i32) -> i32 {
672    with(|g| g.range_i32(a, b))
673  }
674
675  /// See [Rng::range_i64].
676  pub fn range_i64(a: i64, b: i64) -> i64 {
677    with(|g| g.range_i64(a, b))
678  }
679
680  /// See [Rng::range_isize].
681  pub fn range_isize(a: isize, b: isize) -> isize {
682    with(|g| g.range_isize(a, b))
683  }
684
685  /// See [Rng::range_u32].
686  pub fn range_u32(a: u32, b: u32) -> u32 {
687    with(|g| g.range_u32(a, b))
688  }
689
690  /// See [Rng::range_u64].
691  pub fn range_u64(a: u64, b: u64) -> u64 {
692    with(|g| g.range_u64(a, b))
693  }
694
695  /// See [Rng::range_usize].
696  pub fn range_usize(a: usize, b: usize) -> usize {
697    with(|g| g.range_usize(a, b))
698  }
699
700  /// See [Rng::f32].
701  pub fn f32() -> f32 {
702    with(|g| g.f32())
703  }
704
705  /// See [Rng::f64].
706  pub fn f64() -> f64 {
707    with(|g| g.f64())
708  }
709
710  /// See [Rng::biunit_f32].
711  pub fn biunit_f32() -> f32 {
712    with(|g| g.biunit_f32())
713  }
714
715  /// See [Rng::biunit_f64].
716  pub fn biunit_f64() -> f64 {
717    with(|g| g.biunit_f64())
718  }
719
720  /// See [Rng::fill].
721  pub fn fill(dst: &mut [u8]) {
722    with(|g| g.fill(dst))
723  }
724
725  /// See [Rng::fill_unchecked].
726  pub unsafe fn fill_unchecked(dst: *mut u8, len: usize) {
727    with(|g| unsafe { g.fill_unchecked(dst, len) })
728  }
729
730  /// See [Rng::fill_uninit].
731  pub fn fill_uninit(dst: &mut [MaybeUninit<u8>]) -> &mut [u8] {
732    with(|g| g.fill_uninit(dst))
733  }
734
735  /// See [Rng::byte_array].
736  pub fn byte_array<const N: usize>() -> [u8; N] {
737    with(|g| g.byte_array())
738  }
739}