1#![warn(missing_docs)]
74#![deny(clippy::unwrap_used)]
75#![cfg_attr(feature = "unstable_tls", feature(thread_local))]
76#![cfg_attr(feature = "unstable_simd", feature(portable_simd))]
77#![cfg_attr(not(feature = "std"), no_std)]
78#![cfg_attr(docsrs, feature(doc_cfg))]
79
80#[cfg(feature = "rand")]
81mod rand_compatibility;
82#[cfg(not(feature = "unstable_simd"))]
83mod stable_simd;
84#[cfg(all(feature = "tls", not(feature = "unstable_tls")))]
85mod stable_tls;
86#[cfg(feature = "unstable_simd")]
87mod unstable_simd;
88#[cfg(feature = "unstable_tls")]
89mod unstable_tls;
90
91use core::{
92 cell::Cell,
93 ops::{Bound, RangeBounds},
94};
95
96#[cfg(not(feature = "unstable_simd"))]
97pub use stable_simd::*;
98#[cfg(all(feature = "tls", not(feature = "unstable_tls")))]
99pub use stable_tls::*;
100#[cfg(feature = "unstable_simd")]
101pub use unstable_simd::*;
102#[cfg(feature = "unstable_tls")]
103pub use unstable_tls::*;
104
105macro_rules! range_integer {
106 ($fn:tt, $target:tt, $base:tt, $tmp:tt, $doc:tt) => {
107 #[doc = $doc]
108 #[inline(always)]
109 pub fn $fn<T: RangeBounds<$target>>(&self, range: T) -> $target {
110 let low = match range.start_bound() {
111 Bound::Included(&x) => x,
112 Bound::Excluded(&x) => x.checked_add(1).unwrap_or_else(|| {
113 panic!(
114 "start is invalid: {:?}..{:?}",
115 range.start_bound(),
116 range.end_bound()
117 )
118 }),
119 Bound::Unbounded => $target::MIN,
120 };
121
122 let high = match range.end_bound() {
123 Bound::Included(&x) => x,
124 Bound::Excluded(&x) => x.checked_sub(1).unwrap_or_else(|| {
125 panic!(
126 "end is invalid: {:?}..{:?}",
127 range.start_bound(),
128 range.end_bound()
129 )
130 }),
131 Bound::Unbounded => $target::MAX,
132 };
133
134 if low > high {
135 panic!(
136 "start is bigger than end: {:?}..{:?}",
137 range.start_bound(),
138 range.end_bound()
139 );
140 }
141
142 if low == $target::MIN && high == $target::MAX {
143 self.next() as $target
144 } else {
145 let range = high.wrapping_sub(low).wrapping_add(1) as $base;
146
147 let mut x = self.next() as $base;
150 let mut result = (x as $tmp).wrapping_mul(range as $tmp);
151 let mut leftover = result as $base;
152 if leftover < range {
153 let threshold = range.wrapping_neg() % range;
154 while leftover < threshold {
155 x = self.next() as $base;
156 result = (x as $tmp).wrapping_mul(range as $tmp);
157 leftover = result as $base;
158 }
159 }
160
161 low.wrapping_add((result >> (core::mem::size_of::<$base>() * 8)) as $target)
162 }
163 }
164 };
165}
166
167#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
169#[allow(unused)]
170pub enum SeedSource {
171 GetRandom,
174 Std,
179 MemoryAddress,
182 User,
184 Fixed,
186}
187
188fn generate_seed(memory_address: u64) -> ([u64; 3], SeedSource) {
189 #[cfg(feature = "getrandom")]
190 return collect_getrandom_randomness(memory_address);
191 #[cfg(all(feature = "std", not(feature = "getrandom")))]
192 return collect_std_randomness(memory_address);
193 #[cfg(all(not(feature = "std"), not(feature = "getrandom")))]
194 return collect_memory_address_randomness(memory_address);
195}
196
197#[cfg(feature = "getrandom")]
198#[allow(unused_variables)]
199fn collect_getrandom_randomness(memory_address: u64) -> ([u64; 3], SeedSource) {
200 let mut b = [0u8; 24];
201 match getrandom::fill(&mut b) {
202 Ok(_) => (
203 [
204 u64::from_be_bytes([b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]]),
205 u64::from_be_bytes([b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15]]),
206 u64::from_be_bytes([b[16], b[17], b[18], b[19], b[20], b[21], b[22], b[23]]),
207 ],
208 SeedSource::GetRandom,
209 ),
210 Err(_) => {
211 #[cfg(feature = "std")]
212 return collect_std_randomness(memory_address);
213 #[cfg(not(feature = "std"))]
214 return collect_memory_address_randomness(memory_address);
215 }
216 }
217}
218
219#[cfg(feature = "std")]
220fn collect_std_randomness(memory_address: u64) -> ([u64; 3], SeedSource) {
221 use std::{
222 collections::hash_map::DefaultHasher,
223 hash::{Hash, Hasher},
224 thread,
225 time::Instant,
226 };
227
228 let mut hasher = DefaultHasher::new();
229 Instant::now().hash(&mut hasher);
230 let (first, _) = split_mix_64_round(hasher.finish());
231
232 let mut hasher = DefaultHasher::new();
233 thread::current().id().hash(&mut hasher);
234 let (second, _) = split_mix_64_round(hasher.finish());
235 let (third, _) = split_mix_64_round(memory_address);
236
237 ([first, second, third], SeedSource::Std)
238}
239
240#[cfg(not(feature = "std"))]
241fn collect_memory_address_randomness(memory_address: u64) -> ([u64; 3], SeedSource) {
242 let seed = split_mix_64(memory_address);
243 (seed, SeedSource::MemoryAddress)
244}
245
246const fn split_mix_64(state: u64) -> [u64; 3] {
248 let (x, state) = split_mix_64_round(state);
249 let (y, state) = split_mix_64_round(state);
250 let (z, _) = split_mix_64_round(state);
251
252 let x = if x == 0 { 1 } else { x };
253 let y = if y == 0 { 1 } else { y };
254 let z = if z == 0 { 1 } else { z };
255
256 [x, y, z]
257}
258
259const fn split_mix_64_round(mut state: u64) -> (u64, u64) {
267 state = state.wrapping_add(0x9E3779B97F4A7C15);
268
269 let mut z = state;
270 z = (z ^ (z.wrapping_shr(30))).wrapping_mul(0xBF58476D1CE4E5B9);
271 z = (z ^ (z.wrapping_shr(27))).wrapping_mul(0x94D049BB133111EB);
272 z = z ^ (z.wrapping_shr(31));
273
274 (z, state)
275}
276
277pub struct Rng {
279 x: Cell<u64>,
280 y: Cell<u64>,
281 z: Cell<u64>,
282 seed_source: Cell<SeedSource>,
283}
284
285impl Default for Rng {
286 fn default() -> Self {
287 let rng = Self {
288 x: Cell::new(0),
289 y: Cell::new(0),
290 z: Cell::new(0),
291 seed_source: Cell::new(SeedSource::Fixed),
292 };
293 rng.seed();
294
295 rng
296 }
297}
298
299impl Rng {
300 pub fn new() -> Self {
302 Self::default()
303 }
304
305 #[cfg(any(feature = "tls", feature = "unstable_tls"))]
306 pub(crate) const fn fixed_tls() -> Self {
308 Self {
309 x: Cell::new(0xAD9DA80FF4906D64),
310 y: Cell::new(0xD90576EBC62161CA),
311 z: Cell::new(0xBF0F8CA2E79B4817),
312 seed_source: Cell::new(SeedSource::Fixed),
313 }
314 }
315
316 pub const fn from_seed_with_64bit(seed: u64) -> Self {
318 let seed = split_mix_64(seed);
319
320 Self {
321 x: Cell::new(seed[0]),
322 y: Cell::new(seed[1]),
323 z: Cell::new(seed[2]),
324 seed_source: Cell::new(SeedSource::User),
325 }
326 }
327
328 pub const fn from_seed_with_192bit(seed: [u64; 3]) -> Self {
336 Self {
337 x: Cell::new(seed[0]),
338 y: Cell::new(seed[1]),
339 z: Cell::new(seed[2]),
340 seed_source: Cell::new(SeedSource::User),
341 }
342 }
343
344 pub fn seed_source(&self) -> SeedSource {
346 self.seed_source.get()
347 }
348
349 pub fn seed(&self) {
351 let memory_address = self as *const _ as u64;
352
353 let (seed, seed_source) = generate_seed(memory_address);
354 self.x.set(seed[0]);
355 self.y.set(seed[1]);
356 self.z.set(seed[2]);
357 self.seed_source.set(seed_source)
358 }
359
360 pub fn seed_with_64bit(&self, seed: u64) {
362 let seed = split_mix_64(seed);
363
364 self.x.set(seed[0]);
365 self.y.set(seed[1]);
366 self.z.set(seed[2]);
367 self.seed_source.set(SeedSource::User)
368 }
369
370 pub fn seed_with_192bit(&self, seed: [u64; 3]) {
378 self.x.set(seed[0]);
379 self.y.set(seed[1]);
380 self.z.set(seed[2]);
381 self.seed_source.set(SeedSource::User)
382 }
383
384 pub fn mix(&self) {
388 (0..10).for_each(|_| {
389 self.next();
390 });
391 }
392
393 #[inline(always)]
401 fn next(&self) -> u64 {
402 let xp = self.x.get();
403 let yp = self.y.get();
404 let zp = self.z.get();
405
406 self.x.set(zp.wrapping_mul(0xD3833E804F4C574B));
407 let y = yp.wrapping_sub(xp);
408 self.y.set(y.wrapping_shl(12) | y.wrapping_shr(52));
409 let z = zp.wrapping_sub(yp);
410 self.z.set(z.wrapping_shl(44) | z.wrapping_shr(20));
411
412 xp
413 }
414
415 #[inline(always)]
417 pub fn u8(&self) -> u8 {
418 self.next() as u8
419 }
420
421 #[inline(always)]
423 pub fn u16(&self) -> u16 {
424 self.next() as u16
425 }
426
427 #[inline(always)]
429 pub fn u32(&self) -> u32 {
430 self.next() as u32
431 }
432
433 #[inline(always)]
435 pub fn u64(&self) -> u64 {
436 self.next()
437 }
438
439 #[inline(always)]
441 pub fn usize(&self) -> usize {
442 self.next() as usize
443 }
444
445 #[inline(always)]
447 pub fn i8(&self) -> i8 {
448 self.next() as i8
449 }
450
451 #[inline(always)]
453 pub fn i16(&self) -> i16 {
454 self.next() as i16
455 }
456
457 #[inline(always)]
459 pub fn i32(&self) -> i32 {
460 self.next() as i32
461 }
462
463 #[inline(always)]
465 pub fn i64(&self) -> i64 {
466 self.next() as i64
467 }
468
469 #[inline(always)]
471 pub fn isize(&self) -> isize {
472 self.next() as isize
473 }
474
475 #[inline(always)]
477 pub fn bool(&self) -> bool {
478 self.next() % 2 == 0
479 }
480
481 #[inline(always)]
483 pub fn f32(&self) -> f32 {
484 #[cfg(feature = "std")]
485 return ((self.u32() >> 8) as f32) * (f32::exp2(-24.0));
486 #[cfg(not(feature = "std"))]
487 return ((self.u32() >> 8) as f32) * 0.000000059604645;
488 }
489
490 #[inline(always)]
492 pub fn f64(&self) -> f64 {
493 #[cfg(feature = "std")]
494 return ((self.u64() >> 11) as f64) * (f64::exp2(-53.0));
495 #[cfg(not(feature = "std"))]
496 return ((self.u64() >> 11) as f64) * 0.00000000000000011102230246251565;
497 }
498
499 pub fn shuffle<T>(&self, slice: &mut [T]) {
501 for i in 1..slice.len() {
502 slice.swap(i, self.range_usize(..=i));
503 }
504 }
505
506 pub fn fill_bytes(&self, slice: &mut [u8]) {
508 let mut chunks = slice.chunks_exact_mut(8);
509 for chunk in &mut chunks {
510 chunk.copy_from_slice(&self.next().to_ne_bytes())
511 }
512 chunks
513 .into_remainder()
514 .iter_mut()
515 .for_each(|x| *x = self.next() as u8);
516 }
517
518 #[inline(always)]
523 pub fn mod_u8(&self, n: u8) -> u8 {
524 (self.next() as u8 as u16)
525 .wrapping_mul(n as u16)
526 .wrapping_shr(8) as u8
527 }
528
529 #[inline(always)]
534 pub fn mod_u16(&self, n: u16) -> u16 {
535 (self.next() as u16 as u32)
536 .wrapping_mul(n as u32)
537 .wrapping_shr(16) as u16
538 }
539
540 #[inline(always)]
545 pub fn mod_u32(&self, n: u32) -> u32 {
546 (self.next() as u32 as u64)
547 .wrapping_mul(n as u64)
548 .wrapping_shr(32) as u32
549 }
550
551 #[inline(always)]
556 pub fn mod_u64(&self, n: u64) -> u64 {
557 (self.next() as u128)
558 .wrapping_mul(n as u128)
559 .wrapping_shr(64) as u64
560 }
561
562 #[cfg(target_pointer_width = "16")]
563 #[inline(always)]
568 pub fn mod_usize(&self, n: usize) -> usize {
569 (self.next() as u16 as u32)
570 .wrapping_mul(n as u32)
571 .wrapping_shr(16) as usize
572 }
573
574 #[cfg(target_pointer_width = "32")]
575 #[inline(always)]
580 pub fn mod_usize(&self, n: usize) -> usize {
581 (self.next() as u32 as u64)
582 .wrapping_mul(n as u64)
583 .wrapping_shr(32) as usize
584 }
585
586 #[cfg(target_pointer_width = "64")]
587 #[inline(always)]
592 pub fn mod_usize(&self, n: usize) -> usize {
593 (self.next() as u128)
594 .wrapping_mul(n as u128)
595 .wrapping_shr(64) as usize
596 }
597
598 range_integer!(
599 range_u8,
600 u8,
601 u8,
602 u16,
603 "Generates a random u8 value in the given range."
604 );
605
606 range_integer!(
607 range_u16,
608 u16,
609 u16,
610 u32,
611 "Generates a random u16 value in the given range."
612 );
613
614 range_integer!(
615 range_u32,
616 u32,
617 u32,
618 u64,
619 "Generates a random u32 value in the given range."
620 );
621
622 range_integer!(
623 range_u64,
624 u64,
625 u64,
626 u128,
627 "Generates a random u64 value in the given range."
628 );
629
630 #[cfg(target_pointer_width = "16")]
631 range_integer!(
632 range_usize,
633 usize,
634 u16,
635 u32,
636 "Generates a random usize value in the given range."
637 );
638
639 #[cfg(target_pointer_width = "32")]
640 range_integer!(
641 range_usize,
642 usize,
643 u32,
644 u64,
645 "Generates a random usize value in the given range."
646 );
647
648 #[cfg(target_pointer_width = "64")]
649 range_integer!(
650 range_usize,
651 usize,
652 u64,
653 u128,
654 "Generates a random usize value in the given range."
655 );
656
657 range_integer!(
658 range_i8,
659 i8,
660 u8,
661 u16,
662 "Generates a random i8 value in the given range."
663 );
664
665 range_integer!(
666 range_i16,
667 i16,
668 u16,
669 u32,
670 "Generates a random i16 value in the given range."
671 );
672
673 range_integer!(
674 range_i32,
675 i32,
676 u32,
677 u64,
678 "Generates a random i32 value in the given range."
679 );
680
681 range_integer!(
682 range_i64,
683 i64,
684 u64,
685 u128,
686 "Generates a random i64 value in the given range."
687 );
688
689 #[cfg(target_pointer_width = "16")]
690 range_integer!(
691 range_isize,
692 isize,
693 u16,
694 u32,
695 "Generates a random isize value in the given range."
696 );
697
698 #[cfg(target_pointer_width = "32")]
699 range_integer!(
700 range_isize,
701 isize,
702 u32,
703 u64,
704 "Generates a random isize value in the given range."
705 );
706
707 #[cfg(target_pointer_width = "64")]
708 range_integer!(
709 range_isize,
710 isize,
711 u64,
712 u128,
713 "Generates a random isize value in the given range."
714 );
715}
716
717#[cfg(test)]
718#[allow(clippy::unwrap_used)]
719mod tests {
720 use super::*;
721
722 #[test]
723 fn test_scalar() {
724 let rng = Rng::from_seed_with_192bit([42; 3]);
725 rng.mix();
726 assert_eq!(rng.u8(), 226);
727 assert_eq!(rng.u8(), 92);
728
729 let rng = Rng::from_seed_with_192bit([42; 3]);
730 rng.mix();
731 assert_eq!(rng.u16(), 64738);
732 assert_eq!(rng.u16(), 50524);
733
734 let rng = Rng::from_seed_with_192bit([42; 3]);
735 rng.mix();
736 assert_eq!(rng.u32(), 2204433634);
737 assert_eq!(rng.u32(), 3535914332);
738
739 let rng = Rng::from_seed_with_192bit([42; 3]);
740 rng.mix();
741 assert_eq!(rng.u64(), 3296835985448697058);
742 assert_eq!(rng.u64(), 4696255203626829148);
743
744 let rng = Rng::from_seed_with_192bit([42; 3]);
745 rng.mix();
746 assert_eq!(rng.i8(), -30);
747 assert_eq!(rng.i8(), 92);
748
749 let rng = Rng::from_seed_with_192bit([u64::MAX / 2; 3]);
750 rng.mix();
751 assert_eq!(rng.i16(), 3650);
752 assert_eq!(rng.i16(), 22372);
753 assert_eq!(rng.i16(), -6746);
754
755 let rng = Rng::from_seed_with_192bit([u64::MAX / 3; 3]);
756 rng.mix();
757 assert_eq!(rng.i32(), 682042504);
758 assert_eq!(rng.i32(), -679581114);
759
760 let rng = Rng::from_seed_with_192bit([u64::MAX / 2; 3]);
761 rng.mix();
762 assert_eq!(rng.i64(), 1027424955863928386);
763 assert_eq!(rng.i64(), -947746021273086108);
764
765 let rng = Rng::from_seed_with_192bit([42; 3]);
766 rng.mix();
767 assert!((rng.f32() - 0.51325965).abs() < f32::EPSILON);
768 assert!((rng.f32() - 0.8232692).abs() < f32::EPSILON);
769
770 let rng = Rng::from_seed_with_192bit([42; 3]);
771 rng.mix();
772 assert!((rng.f64() - 0.17872183688759324).abs() < f64::EPSILON);
773 assert!((rng.f64() - 0.2545845047159281).abs() < f64::EPSILON);
774
775 let rng = Rng::from_seed_with_192bit([42; 3]);
776 rng.mix();
777 assert!(rng.bool());
778 assert!(rng.bool());
779 assert!(!rng.bool());
780 }
781
782 #[test]
783 fn test_range() {
784 let rng = Rng::from_seed_with_192bit([42; 3]);
785 rng.mix();
786 assert_eq!(rng.mod_u8(128), 113);
787 assert_eq!(rng.mod_u8(128), 46);
788
789 let rng = Rng::from_seed_with_192bit([42; 3]);
790 rng.mix();
791 assert_eq!(rng.mod_u16(128), 126);
792 assert_eq!(rng.mod_u16(128), 98);
793
794 let rng = Rng::from_seed_with_192bit([42; 3]);
795 rng.mix();
796 assert_eq!(rng.mod_u32(128), 65);
797 assert_eq!(rng.mod_u32(128), 105);
798
799 let rng = Rng::from_seed_with_192bit([42; 3]);
800 rng.mix();
801 assert_eq!(rng.mod_u64(128), 22);
802 assert_eq!(rng.mod_u64(128), 32);
803
804 let rng = Rng::from_seed_with_192bit([42; 3]);
805 rng.mix();
806 assert_eq!(rng.range_u8(0..128), 113);
807 assert_eq!(rng.range_u8(0..128), 46);
808
809 let rng = Rng::from_seed_with_192bit([42; 3]);
810 rng.mix();
811 assert_eq!(rng.range_u16(0..128), 126);
812 assert_eq!(rng.range_u16(0..128), 98);
813
814 let rng = Rng::from_seed_with_192bit([42; 3]);
815 rng.mix();
816 assert_eq!(rng.range_u32(0..128), 65);
817 assert_eq!(rng.range_u32(0..128), 105);
818
819 let rng = Rng::from_seed_with_192bit([42; 3]);
820 rng.mix();
821 assert_eq!(rng.range_u64(0..128), 22);
822 assert_eq!(rng.range_u64(0..128), 32);
823
824 let rng = Rng::from_seed_with_192bit([42; 3]);
825 rng.mix();
826 assert_eq!(rng.range_i8(-64..64), 49);
827 assert_eq!(rng.range_i8(-64..64), -18);
828
829 let rng = Rng::from_seed_with_192bit([42; 3]);
830 rng.mix();
831 assert_eq!(rng.range_i16(-64..64), 62);
832 assert_eq!(rng.range_i16(-64..64), 34);
833
834 let rng = Rng::from_seed_with_192bit([42; 3]);
835 rng.mix();
836 assert_eq!(rng.range_i32(-64..64), 1);
837 assert_eq!(rng.range_i32(-64..64), 41);
838
839 let rng = Rng::from_seed_with_192bit([42; 3]);
840 rng.mix();
841 assert_eq!(rng.range_i64(-64..64), -42);
842 assert_eq!(rng.range_i64(-64..64), -32);
843 }
844
845 #[test]
846 fn test_shuffle() {
847 let mut slice = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
848 let org_slice = slice;
849
850 let rng = Rng::from_seed_with_192bit([42; 3]);
851 rng.shuffle(&mut slice);
852
853 assert_ne!(org_slice, slice);
854 }
855
856 #[test]
857 fn test_fill_bytes() {
858 let mut bytes = [1u8; 301];
859 let org_bytes = bytes;
860
861 let rng = Rng::from_seed_with_192bit([42; 3]);
862 rng.fill_bytes(&mut bytes);
863
864 assert_ne!(org_bytes, bytes);
865 }
866
867 #[test]
868 fn test_rng_wide() {
869 let mut rng = RngWide::from_seed_with_64bit([42, 43, 44, 45, 46, 47, 48, 49]);
870 rng.mix();
871
872 let rng0 = Rng::from_seed_with_64bit(42);
873 rng0.mix();
874
875 let rng1 = Rng::from_seed_with_64bit(43);
876 rng1.mix();
877
878 let rng2 = Rng::from_seed_with_64bit(44);
879 rng2.mix();
880
881 let rng3 = Rng::from_seed_with_64bit(45);
882 rng3.mix();
883
884 let rng4 = Rng::from_seed_with_64bit(46);
885 rng4.mix();
886
887 let rng5 = Rng::from_seed_with_64bit(47);
888 rng5.mix();
889
890 let rng6 = Rng::from_seed_with_64bit(48);
891 rng6.mix();
892
893 let rng7 = Rng::from_seed_with_64bit(49);
894 rng7.mix();
895
896 let res = rng.u64x8();
897
898 assert_eq!(res[0], rng0.u64());
899 assert_eq!(res[1], rng1.u64());
900 assert_eq!(res[2], rng2.u64());
901 assert_eq!(res[3], rng3.u64());
902 assert_eq!(res[4], rng4.u64());
903 assert_eq!(res[5], rng5.u64());
904 assert_eq!(res[6], rng6.u64());
905 assert_eq!(res[7], rng7.u64());
906 }
907}