use std::{
cell::Cell,
num::NonZeroU32,
ops::{Index, Range},
};
#[cfg(any(test, feature = "test-utils"))]
use proptest::{
arbitrary::{Arbitrary, any},
strategy::{BoxedStrategy, Strategy},
};
pub use rand_core::RngCore;
use rand_core::{CryptoRng, SeedableRng, le::read_u32_into};
use ring::rand::SecureRandom;
const RAND_ERROR_CODE: NonZeroU32 =
NonZeroU32::new(rand_core::Error::CUSTOM_START).unwrap();
pub trait Crng: RngCore + CryptoRng {}
impl<R: RngCore + CryptoRng> Crng for R {}
pub trait RngExt: RngCore {
fn gen_bytes<const N: usize>(&mut self) -> [u8; N];
fn gen_u32(&mut self) -> u32;
fn gen_u64(&mut self) -> u64;
#[inline]
fn gen_u8(&mut self) -> u8 {
u8::from_le_bytes(self.gen_bytes())
}
#[inline]
fn gen_u16(&mut self) -> u16 {
u16::from_le_bytes(self.gen_bytes())
}
#[inline]
fn gen_u128(&mut self) -> u128 {
u128::from_le_bytes(self.gen_bytes())
}
#[inline]
fn gen_bool(&mut self, p: f32) -> bool {
self.gen_f32() < p
}
#[inline]
fn gen_boolean(&mut self) -> bool {
self.gen_u32() & 0x1 == 0
}
#[inline]
fn gen_f32(&mut self) -> f32 {
const SCALE: f32 = 1.0 / (1u32 << 24) as f32;
(self.gen_u32() >> 8) as f32 * SCALE
}
#[inline]
fn gen_f64(&mut self) -> f64 {
const SCALE: f64 = 1.0 / (1u64 << 53) as f64; (self.gen_u64() >> 11) as f64 * SCALE
}
fn gen_range_i32(&mut self, range: Range<i32>) -> i32 {
let span = ((range.end as i64) - (range.start as i64)) as u32;
(fastmap32(self.gen_u32(), span) as i32).wrapping_add(range.start)
}
fn gen_range_u32(&mut self, range: Range<u32>) -> u32 {
let span = range.end - range.start;
fastmap32(self.gen_u32(), span) + range.start
}
fn gen_range_u64(&mut self, range: Range<u64>) -> u64 {
let span = range.end - range.start;
fastmap64(self.gen_u64(), span) + range.start
}
fn gen_range_usize(&mut self, range: Range<usize>) -> usize {
let span = (range.end as u64) - (range.start as u64);
(fastmap64(self.gen_u64(), span) + range.start as u64) as usize
}
fn gen_alphanum_bytes<const N: usize>(&mut self) -> [u8; N] {
let mut out = self.gen_bytes();
encode_alphanum_bytes(&mut out);
out
}
#[cfg(any(test, feature = "test-utils"))]
fn gen_alphanum_vec(&mut self, n: usize) -> Vec<u8> {
let mut out = vec![0u8; n];
self.fill_bytes(&mut out);
encode_alphanum_slice(&mut out);
out
}
}
impl<R: RngCore> RngExt for R {
fn gen_bytes<const N: usize>(&mut self) -> [u8; N] {
let mut out = [0u8; N];
self.fill_bytes(&mut out);
out
}
#[inline]
fn gen_u32(&mut self) -> u32 {
self.next_u32()
}
#[inline]
fn gen_u64(&mut self) -> u64 {
self.next_u64()
}
}
#[allow(clippy::len_without_is_empty)]
pub trait RngSliceExt: Index<usize> {
fn len(&self) -> usize;
fn choose<R: RngCore>(&self, rng: &mut R) -> Option<&Self::Output> {
let len = self.len();
if len == 0 {
None
} else {
Some(&self[rng.gen_range_usize(0..len)])
}
}
fn shuffle<R: RngCore>(&mut self, rng: &mut R);
}
impl<T> RngSliceExt for [T] {
fn len(&self) -> usize {
self.len()
}
fn shuffle<R: RngCore>(&mut self, rng: &mut R) {
assert!(self.len() < (u32::MAX as usize));
for i in (1..self.len()).rev() {
let n = (i as u32) + 1;
let j = fastmap32(rng.next_u32(), n) as usize;
self.swap(i, j);
}
}
}
#[inline(never)]
fn encode_alphanum_bytes<const N: usize>(inout: &mut [u8; N]) {
for x in inout.iter_mut() {
*x = encode_alphanum_byte(*x);
}
}
#[cfg(any(test, feature = "test-utils"))]
#[inline(never)]
fn encode_alphanum_slice(inout: &mut [u8]) {
for x in inout.iter_mut() {
*x = encode_alphanum_byte(*x);
}
}
#[inline(always)]
#[allow(non_snake_case)]
const fn encode_alphanum_byte(x: u8) -> u8 {
let idx = fastmap8(x, 10 + 26 + 26);
let base = idx + b'0';
let gap_9A = if idx >= 10 { b'A' - b'9' - 1 } else { 0 };
let gap_Za = if idx >= 10 + 26 { b'a' - b'Z' - 1 } else { 0 };
base + gap_9A + gap_Za
}
#[derive(Clone, Debug)]
pub struct SysRng(ring::rand::SystemRandom);
impl SysRng {
pub fn new() -> Self {
Self(ring::rand::SystemRandom::new())
}
}
impl Default for SysRng {
fn default() -> Self {
Self::new()
}
}
impl CryptoRng for SysRng {}
impl RngCore for SysRng {
#[inline]
fn next_u32(&mut self) -> u32 {
rand_core::impls::next_u32_via_fill(self)
}
#[inline]
fn next_u64(&mut self) -> u64 {
rand_core::impls::next_u64_via_fill(self)
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.try_fill_bytes(dest).expect("ring SystemRandom failed")
}
fn try_fill_bytes(
&mut self,
dest: &mut [u8],
) -> Result<(), rand_core::Error> {
self.0
.fill(dest)
.map_err(|_| rand_core::Error::from(RAND_ERROR_CODE))
}
}
#[derive(Debug)]
#[cfg_attr(any(test, feature = "test-utils"), derive(Clone))]
pub struct FastRng {
s0: u32,
s1: u32,
}
impl FastRng {
pub fn new() -> Self {
Self {
s0: 0xdeadbeef,
s1: 0xf00baa44,
}
}
pub fn from_sysrng(sys_rng: &mut SysRng) -> Self {
let seed = sys_rng.gen_u64();
Self::seed_from_u64(seed)
}
pub fn from_u64(s: u64) -> Self {
Self::seed_from_u64(s)
}
}
impl Default for FastRng {
fn default() -> Self {
Self::new()
}
}
#[cfg(any(test, feature = "test-utils"))]
impl CryptoRng for FastRng {}
impl RngCore for FastRng {
#[inline]
fn next_u32(&mut self) -> u32 {
let (s0, s1, r) = xoroshiro64star_next_u32(self.s0, self.s1);
self.s0 = s0;
self.s1 = s1;
r
}
#[inline]
fn next_u64(&mut self) -> u64 {
rand_core::impls::next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
rand_core::impls::fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(
&mut self,
dest: &mut [u8],
) -> Result<(), rand_core::Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[inline(always)]
fn xoroshiro64star_next_u32(mut s0: u32, mut s1: u32) -> (u32, u32, u32) {
let r = s0.wrapping_mul(0x9e3779bb);
s1 ^= s0;
s0 = s0.rotate_left(26) ^ s1 ^ (s1 << 9);
s1 = s1.rotate_left(13);
(s0, s1, r)
}
impl SeedableRng for FastRng {
type Seed = [u8; 8];
fn from_seed(seed: Self::Seed) -> Self {
if seed == [0u8; 8] {
Self::new()
} else {
let mut parts = [0u32, 0u32];
read_u32_into(&seed, &mut parts);
Self {
s0: parts[0],
s1: parts[1],
}
}
}
}
#[cfg(any(test, feature = "test-utils"))]
impl Arbitrary for FastRng {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
any::<[u8; 8]>()
.no_shrink()
.prop_map(FastRng::from_seed)
.boxed()
}
}
pub struct ThreadFastRng(());
impl ThreadFastRng {
#[inline]
pub fn new() -> Self {
Self(())
}
pub fn seed(seed: u64) {
let mut z = seed.wrapping_add(0x9e3779b97f4a7c15);
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
THREAD_RNG_STATE.set(z)
}
}
impl Default for ThreadFastRng {
#[inline]
fn default() -> Self {
Self::new()
}
}
#[cfg(any(test, feature = "test-utils"))]
impl CryptoRng for ThreadFastRng {}
thread_local! {
#[allow(clippy::missing_const_for_thread_local)]
static THREAD_RNG_STATE: Cell<u64> = const { Cell::new(0) };
}
impl RngCore for ThreadFastRng {
fn next_u32(&mut self) -> u32 {
let mut s01 = THREAD_RNG_STATE.get();
if s01 == 0 {
#[cold]
#[inline(never)]
fn reseed() -> u64 {
SysRng::new().gen_u64()
}
s01 = reseed();
}
let s0 = (s01 >> 32) as u32;
let s1 = s01 as u32;
let (s0, s1, r) = xoroshiro64star_next_u32(s0, s1);
let s01 = ((s0 as u64) << 32) | (s1 as u64);
THREAD_RNG_STATE.set(s01);
r
}
#[inline]
fn next_u64(&mut self) -> u64 {
rand_core::impls::next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
rand_core::impls::fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(
&mut self,
dest: &mut [u8],
) -> Result<(), rand_core::Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[inline(always)]
const fn fastmap8(x: u8, n: u8) -> u8 {
((x as u16).wrapping_mul(n as u16) >> 8) as u8
}
#[inline(always)]
const fn fastmap32(x: u32, n: u32) -> u32 {
((x as u64).wrapping_mul(n as u64) >> 32) as u32
}
#[inline(always)]
const fn fastmap64(x: u64, n: u64) -> u64 {
((x as u128).wrapping_mul(n as u128) >> 64) as u64
}
#[cfg(test)]
mod test {
use proptest::{prop_assert, proptest};
use super::*;
#[test]
fn test_encode_alphanum_byte() {
let mut mset = [0u8; 256];
for c in 0..=255 {
let o = encode_alphanum_byte(c);
mset[o as usize] += 1;
}
let actual_alphabet = mset
.as_slice()
.iter()
.enumerate()
.filter(|(_idx, count)| **count != 0)
.map(|(idx, _count)| (idx as u8) as char)
.collect::<String>();
let expected_alphabet =
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
assert_eq!(&actual_alphabet, expected_alphabet);
assert_eq!(actual_alphabet.len(), 10 + 26 + 26);
}
#[test]
fn test_gen_alphanum_bytes() {
proptest!(|(mut rng: FastRng)| {
let alphanum = rng.gen_alphanum_bytes::<16>();
let alphanum_str = std::str::from_utf8(alphanum.as_slice()).unwrap();
prop_assert!(alphanum_str.chars().all(|c| c.is_ascii_alphanumeric()));
});
}
#[test]
fn test_gen_f32_and_f64() {
let mut rng = FastRng::from_u64(202603111712);
for _ in 0..1000 {
assert!((0.0..1.0).contains(&rng.gen_f32()));
assert!((0.0..1.0).contains(&rng.gen_f64()));
}
}
}