#![no_std]
#![deny(unsafe_code)]
#![warn(missing_docs)]
#![deny(missing_debug_implementations)]
#![cfg_attr(docs_rs, feature(doc_cfg))]
use core::convert::{TryFrom, TryInto};
const DEFAULT_PCG_SEED: u128 = 201526561274146932589719779721328219291;
const DEFAULT_PCG_INC: u128 = 34172814569070222299;
const PCG_MULTIPLIER_64: u64 = 6364136223846793005;
pub trait Gen32 {
fn next_u32(&mut self) -> u32;
#[inline(always)]
fn next_bool(&mut self) -> bool {
(self.next_u32() as i32) < 0
}
#[inline(always)]
fn next_u8(&mut self) -> u8 {
(self.next_u32() >> 24) as u8
}
#[inline(always)]
fn next_u16(&mut self) -> u16 {
(self.next_u32() >> 16) as u16
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
let l = self.next_u32() as u64;
let h = self.next_u32() as u64;
h << 32 | l
}
#[inline]
fn next_f32_unit(&mut self) -> f32 {
ieee754_random_f32(self, true)
}
#[inline]
fn next_f32_signed_unit(&mut self) -> f32 {
ieee754_random_f32(self, false)
}
#[inline]
fn next_bounded(&mut self, b: u32) -> u32 {
assert!(b != 0, "Gen32::next_bounded> Bound must be non-zero.");
let mut x = self.next_u32() as u64;
let mut mul = (b as u64).wrapping_mul(x);
let mut low = mul as u32;
if low < b {
let threshold = b.wrapping_neg() % b;
while low < threshold {
x = self.next_u32() as u64;
mul = (b as u64).wrapping_mul(x);
low = mul as u32;
}
}
let high = (mul >> 32) as u32;
high
}
#[inline]
fn dice(&mut self, mut count: i32, sides: i32) -> i32 {
use core::cmp::Ordering;
let range = match sides.cmp(&1) {
Ordering::Less => return 0,
Ordering::Equal => return count.max(0),
Ordering::Greater => match sides {
4 => D4,
6 => D6,
8 => D8,
10 => D10,
12 => D12,
20 => D20,
_ => StandardDie::new(sides as u32),
},
};
let mut t = 0_i32;
while count > 0 {
t = t.wrapping_add(range.sample(self));
count -= 1;
}
t
}
#[inline]
fn step_ed4(&mut self, mut step: i32) -> i32 {
if step < 1 {
1
} else {
let mut total: i32 = 0;
while step > 13 {
total = total.wrapping_add(X12.sample(self));
step -= 7;
}
total.wrapping_add(match step {
1 => X4.sample(self).wrapping_sub(2).max(1),
2 => X4.sample(self).wrapping_sub(1).max(1),
3 => X4.sample(self),
4 => X6.sample(self),
5 => X8.sample(self),
6 => X10.sample(self),
7 => X12.sample(self),
8 => X6.sample(self).wrapping_add(X6.sample(self)),
9 => X8.sample(self).wrapping_add(X6.sample(self)),
10 => X8.sample(self).wrapping_add(X8.sample(self)),
11 => X10.sample(self).wrapping_add(X8.sample(self)),
12 => X10.sample(self).wrapping_add(X10.sample(self)),
13 => X12.sample(self).wrapping_add(X10.sample(self)),
_ => unreachable!(),
})
}
}
#[inline]
fn sundown_pool(&mut self, mut size: u32) -> u32 {
let mut hits = 0;
while size > 0 {
if D6.sample(self) >= 5 {
hits += 1
}
size -= 1;
}
hits
}
#[inline]
fn rn_bounded_luck(&mut self, x: i32, luck: i32) -> i32 {
assert!(x > 0);
let adjustment =
if x <= 15 { (luck.abs() + 1) / 3 * luck.signum() } else { luck };
let mut i = self.next_bounded(x as u32) as i32;
if adjustment != 0 && self.next_bounded(37 + adjustment.abs() as u32) != 0 {
i -= adjustment;
i = i.max(0).min(x - 1);
}
i
}
#[inline]
fn rn_exponential_decay(&mut self, x: i32) -> i32 {
assert!(x > 1);
let mut temp = 1;
while self.next_bounded(x as u32) == 0 {
temp += 1;
}
temp
}
#[inline]
fn rn_z(&mut self, i: i32) -> i32 {
let mut x = i as i64;
let mut temp = 1000_i64;
temp += self.next_bounded(1000) as i64;
temp *= self.rn_exponential_decay(4).min(5) as i64;
if self.next_bool() {
x *= temp;
x /= 1000;
} else {
x *= 1000;
x /= temp;
}
x as i32
}
#[inline(always)]
fn pick<T>(&mut self, buf: &[T]) -> T
where
Self: Sized,
T: Copy,
{
let end: u32 = saturating_usize_as_u32(buf.len());
buf[usize::try_from(self.next_bounded(end)).unwrap()]
}
#[inline(always)]
fn pick_ref<'b, T>(&mut self, buf: &'b [T]) -> &'b T
where
Self: Sized,
{
let end: u32 = saturating_usize_as_u32(buf.len());
&buf[usize::try_from(self.next_bounded(end)).unwrap()]
}
#[inline(always)]
fn pick_mut<'b, T>(&mut self, buf: &'b mut [T]) -> &'b mut T
where
Self: Sized,
{
let end: u32 = saturating_usize_as_u32(buf.len());
&mut buf[usize::try_from(self.next_bounded(end)).unwrap()]
}
#[inline]
fn shuffle<T>(&mut self, buf: &mut [T])
where
Self: Sized,
{
let mut possibility_count: u32 =
buf.len().try_into().unwrap_or(u32::max_value());
let mut this_index: usize = 0;
let end = buf.len() - 1;
while this_index < end {
let offset = self.next_bounded(possibility_count) as usize;
buf.swap(this_index, this_index + offset);
possibility_count -= 1;
this_index += 1;
}
}
}
const _: [&mut dyn Gen32; 0] = [];
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct PCG32 {
state: u64,
inc: u64,
}
impl Default for PCG32 {
#[inline(always)]
fn default() -> Self {
PCG32::seed(DEFAULT_PCG_SEED as u64, DEFAULT_PCG_INC as u64)
}
}
impl PCG32 {
#[inline]
pub const fn seed(seed: u64, inc: u64) -> Self {
let inc = (inc << 1) | 1;
let mut state = pcg_core_state64(0, inc);
state = state.wrapping_add(seed);
state = pcg_core_state64(state, inc);
Self { state, inc }
}
#[inline]
#[cfg(feature = "getrandom")]
#[cfg_attr(docs_rs, doc(cfg(feature = "getrandom")))]
pub fn from_getrandom() -> Self {
const SIZE_U64: usize = core::mem::size_of::<u64>();
let mut buf = [0_u8; SIZE_U64 * 2];
match getrandom::getrandom(&mut buf) {
Ok(_) => {
let (seed_slice, inc_slice) = buf.split_at(SIZE_U64);
let seed = u64::from_ne_bytes(seed_slice.try_into().unwrap());
let inc = u64::from_ne_bytes(inc_slice.try_into().unwrap());
Self::seed(seed, inc)
}
Err(_) => Self::default(),
}
}
#[inline]
pub fn next_u32(&mut self) -> u32 {
let out = xsh_rr_64_32(self.state);
self.state = pcg_core_state64(self.state, self.inc);
out
}
#[inline]
pub fn jump(&mut self, delta: u64) {
self.state = jump_lcg64(delta, self.state, PCG_MULTIPLIER_64, self.inc)
}
}
impl From<[u64; 2]> for PCG32 {
#[must_use]
#[inline(always)]
fn from([state, inc]: [u64; 2]) -> Self {
Self { state, inc }
}
}
impl Gen32 for PCG32 {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
PCG32::next_u32(self)
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
let out = xsl_rr_rr_64_64(self.state);
self.state = pcg_core_state64(self.state, self.inc);
out
}
}
#[must_use]
#[inline(always)]
const fn xsh_rr_64_32(state: u64) -> u32 {
((((state >> 18) ^ state) >> 27) as u32).rotate_right((state >> 59) as u32)
}
#[must_use]
#[inline(always)]
const fn xsl_rr_rr_64_64(state: u64) -> u64 {
let rot1: u32 = (state >> 59) as u32;
let high: u32 = (state >> 32) as u32;
let low: u32 = state as u32;
let xor_d: u32 = high ^ low;
let new_low: u32 = xor_d.rotate_right(rot1);
let new_high: u32 = high.rotate_right(new_low & 31);
((new_high as u64) << 32) | new_low as u64
}
#[must_use]
#[inline(always)]
const fn pcg_core_state64(state: u64, inc: u64) -> u64 {
lcg64(state, PCG_MULTIPLIER_64, inc)
}
#[must_use]
#[inline(always)]
const fn lcg64(state: u64, mult: u64, inc: u64) -> u64 {
state.wrapping_mul(mult).wrapping_add(inc)
}
macro_rules! make_jump_lcgX {
($(#[$attr:meta])* $f:ident, $u:ty) => {
$(#[$attr])*
#[must_use]
#[inline(always)]
const fn $f(mut delta: $u, state: $u, mult: $u, inc: $u) -> $u {
let mut cur_mult: $u = mult;
let mut cur_plus: $u = inc;
let mut acc_mult: $u = 1;
let mut acc_plus: $u = 0;
while delta > 0 {
if (delta & 1) > 0 {
acc_mult = acc_mult.wrapping_mul(cur_mult);
acc_plus = acc_plus.wrapping_mul(cur_mult).wrapping_add(cur_plus);
}
cur_plus = cur_mult.wrapping_add(1).wrapping_mul(cur_plus);
cur_mult = cur_mult.wrapping_mul(cur_mult);
delta /= 2;
}
acc_mult.wrapping_mul(state).wrapping_add(acc_plus)
}
};
}
make_jump_lcgX!(jump_lcg64, u64);
#[inline(always)]
const fn saturating_usize_as_u32(val: usize) -> u32 {
#[cfg(target_pointer_width = "16")]
{
val as u32
}
#[cfg(target_pointer_width = "32")]
{
val as u32
}
#[cfg(target_pointer_width = "64")]
{
if val <= core::u32::MAX as usize {
val as u32
} else {
core::u32::MAX
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct BoundedRandU32 {
count: u32,
threshold: u32,
}
impl BoundedRandU32 {
#[inline]
pub const fn new(count: u32) -> Self {
let threshold = count.wrapping_neg() % count;
Self { count, threshold }
}
#[inline]
pub const fn try_new(count: u32) -> Option<Self> {
if count > 0 {
Some(Self::new(count))
} else {
None
}
}
#[inline]
pub const fn count(self) -> u32 {
self.count
}
#[inline]
pub const fn place_in_range(self, val: u32) -> Option<u32> {
let mul: u64 = (val as u64).wrapping_mul(self.count as u64);
let low_part: u32 = mul as u32;
if low_part < self.threshold {
None
} else {
Some((mul >> 32) as u32)
}
}
#[inline]
pub fn sample<G: Gen32 + ?Sized>(self, gen: &mut G) -> u32 {
loop {
if let Some(output) = self.place_in_range(gen.next_u32()) {
return output;
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct StandardDie(BoundedRandU32);
impl StandardDie {
#[inline]
pub const fn new(sides: u32) -> Self {
Self(BoundedRandU32::new(sides))
}
#[inline]
pub const fn sides(self) -> i32 {
self.0.count() as i32
}
#[inline]
pub fn sample<G: Gen32 + ?Sized>(self, gen: &mut G) -> i32 {
1 + self.0.sample(gen) as i32
}
}
#[doc(hidden)]
pub const D4: StandardDie = StandardDie::new(4);
#[doc(hidden)]
pub const D6: StandardDie = StandardDie::new(6);
#[doc(hidden)]
pub const D8: StandardDie = StandardDie::new(8);
#[doc(hidden)]
pub const D10: StandardDie = StandardDie::new(10);
#[doc(hidden)]
pub const D12: StandardDie = StandardDie::new(12);
#[doc(hidden)]
pub const D20: StandardDie = StandardDie::new(20);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct ExplodingDie(StandardDie);
impl ExplodingDie {
#[inline]
pub const fn new(sides: u32) -> Self {
Self(StandardDie::new(sides))
}
#[inline]
pub const fn sides(self) -> i32 {
self.0.sides()
}
#[inline]
pub fn sample<G: Gen32 + ?Sized>(self, gen: &mut G) -> i32 {
let mut t: i32 = 0;
while self.0.sample(gen) == self.0.sides() {
t = t.wrapping_add(1).wrapping_add(self.sides());
}
t.wrapping_add(self.0.sample(gen) as i32)
}
}
#[doc(hidden)]
pub const X4: ExplodingDie = ExplodingDie::new(4);
#[doc(hidden)]
pub const X6: ExplodingDie = ExplodingDie::new(6);
#[doc(hidden)]
pub const X8: ExplodingDie = ExplodingDie::new(8);
#[doc(hidden)]
pub const X10: ExplodingDie = ExplodingDie::new(10);
#[doc(hidden)]
pub const X12: ExplodingDie = ExplodingDie::new(12);
#[doc(hidden)]
pub const X20: ExplodingDie = ExplodingDie::new(20);
#[inline]
fn next_binary_exp_distr<G: Gen32 + ?Sized>(g: &mut G) -> u32 {
let r: u32 = g.next_u32();
if r > 0 {
r.trailing_zeros()
} else {
32 + next_binary_exp_distr(g)
}
}
fn ieee754_random_f32<G: Gen32 + ?Sized>(g: &mut G, signed: bool) -> f32 {
let bit_width = 32;
let exponent_bias = 127;
let num_mantissa_bits = 23;
let num_rest_bits = bit_width - num_mantissa_bits - 1 - signed as i32;
let r: u32 = g.next_u32();
debug_assert!(num_rest_bits >= 0);
debug_assert!(core::mem::size_of::<u32>() * 8 == bit_width as _);
let mantissa = r >> (bit_width - num_mantissa_bits);
let (sign_mask, rand_bit, rest_bits);
if signed {
sign_mask = r << (bit_width - 1);
rand_bit = (r & 2) != 0;
rest_bits = (r >> 2) & ((1 << num_rest_bits) - 1);
} else {
sign_mask = 0;
rand_bit = (r & 1) != 0;
rest_bits = (r >> 1) & ((1 << num_rest_bits) - 1);
}
let increment_exponent = (mantissa == 0 && rand_bit) as i32;
let mut exponent: i32 = -1 + increment_exponent
- if rest_bits > 0 {
rest_bits.trailing_zeros() as i32
} else {
num_rest_bits + next_binary_exp_distr(g) as i32
};
while exponent < -exponent_bias || exponent > 0 {
exponent = -1 + increment_exponent - next_binary_exp_distr(g) as i32;
}
f32::from_bits(
sign_mask
| (((exponent + exponent_bias) as u32) << num_mantissa_bits)
| mantissa,
)
}
#[allow(dead_code)]
fn ieee754_random_f64<G: Gen32 + ?Sized>(g: &mut G, signed: bool) -> f64 {
let bit_width = 64;
let exponent_bias = 1023;
let num_mantissa_bits = 52;
let num_rest_bits = bit_width - num_mantissa_bits - 1 - signed as i32;
let r: u64 = g.next_u64();
debug_assert!(num_rest_bits >= 0);
debug_assert!(core::mem::size_of::<u64>() * 8 == bit_width as _);
let mantissa = r >> (bit_width - num_mantissa_bits);
let (sign_mask, rand_bit, rest_bits);
if signed {
sign_mask = r << (bit_width - 1);
rand_bit = (r & 2) != 0;
rest_bits = (r >> 2) & ((1 << num_rest_bits) - 1);
} else {
sign_mask = 0;
rand_bit = (r & 1) != 0;
rest_bits = (r >> 1) & ((1 << num_rest_bits) - 1);
}
let increment_exponent = (mantissa == 0 && rand_bit) as i32;
let mut exponent: i32 = -1 + increment_exponent
- if rest_bits > 0 {
rest_bits.trailing_zeros() as i32
} else {
num_rest_bits + next_binary_exp_distr(g) as i32
};
while exponent < -exponent_bias || exponent > 0 {
exponent = -1 + increment_exponent - next_binary_exp_distr(g) as i32;
}
f64::from_bits(
sign_mask
| (((exponent + exponent_bias) as u64) << num_mantissa_bits)
| mantissa,
)
}