1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
use num_modular::{Montgomery, Reducer};
use num_traits::{PrimInt, WrappingAdd, WrappingSub};

use crate::builder::RandomSequenceBuilder;

/// Generate a deterministic pseudo-random sequence of unique numbers.
///
/// Not cryptographically secure.
///
/// Properties:
/// - The sequence is deterministic and repeatable.
/// - The sequence will only include each number once (every index is unique).
/// - Computing the value for any random index in the sequence is an O(1) operation.
///
/// Based on the article by @preshing:
/// Article: http://preshing.com/20121224/how-to-generate-a-sequence-of-unique-random-integers/
/// Source: https://github.com/preshing/RandomSequence/blob/master/randomsequence.h
#[derive(Debug, Clone)]
pub struct RandomSequence<T>
where
    T: PrimInt + WrappingAdd + WrappingSub,
    Montgomery<T>: Reducer<T>,
{
    /// The config/builder holds the parameters that define the sequence.
    pub config: RandomSequenceBuilder<T>,

    /// Internal iterator-only state.
    pub(crate) start_index: T,
    pub(crate) current_index: T,
    pub(crate) intermediate_offset: T,
}

impl<T> RandomSequence<T>
where
    T: PrimInt + WrappingAdd + WrappingSub,
    Montgomery<T>: Reducer<T>,
{
    /// Get the next element in the sequence.
    #[inline]
    pub fn next(&mut self) -> T {
        let next = self.n_internal(self.current_index);
        self.current_index = self.current_index.wrapping_add(&T::one());
        next
    }

    /// Get the previous element in the sequence.
    #[inline]
    pub fn prev(&mut self) -> T {
        // decrement then compute, opposite to next()
        self.current_index = self.current_index.wrapping_sub(&T::one());
        self.n_internal(self.current_index)
    }

    /// Get the nth element in the sequence.
    #[inline]
    pub fn n(&self, index: T) -> T {
        let actual_index = self.start_index.wrapping_add(&index);
        self.n_internal(actual_index)
    }

    /// Get the nth element in the sequence, but using the absolute index rather than relative to `start_index`.
    ///
    /// `qpr(qpr(index + intermediate_offset) ^ intermediate_xor)`
    #[inline(always)]
    fn n_internal(&self, index: T) -> T {
        let inner_residue = self.config.permute_qpr(index).wrapping_add(&self.intermediate_offset);
        self.config.permute_qpr(inner_residue ^ self.config.intermediate_xor)
    }

    /// Get the current position in the sequence.
    #[inline]
    pub fn index(&self) -> T {
        self.current_index.wrapping_sub(&self.start_index)
    }
}

impl<T> Iterator for RandomSequence<T>
where
    T: PrimInt + WrappingAdd + WrappingSub,
    Montgomery<T>: Reducer<T>,
{
    type Item = T;

    #[inline]
    fn next(&mut self) -> Option<Self::Item> {
        Some(self.next())
    }
}

impl<T> DoubleEndedIterator for RandomSequence<T>
where
    T: PrimInt + WrappingAdd + WrappingSub,
    Montgomery<T>: Reducer<T>,
{
    #[inline]
    fn next_back(&mut self) -> Option<Self::Item> {
        Some(self.prev())
    }
}

impl<T> From<RandomSequenceBuilder<T>> for RandomSequence<T>
where
    T: PrimInt + WrappingAdd + WrappingSub,
    Montgomery<T>: Reducer<T>,
{
    fn from(value: RandomSequenceBuilder<T>) -> Self {
        value.into_iter()
    }
}

#[cfg(test)]
mod tests {
    use std::collections::{HashMap, HashSet};
    use std::vec::Vec;

    use rand::rngs::OsRng;
    use statrs::distribution::{ChiSquared, ContinuousCDF};

    use super::*;

    fn is_send<T: Send>() {}
    fn is_sync<T: Sync>() {}

    macro_rules! test_sequence {
        ($name:ident, $type:ident, $check:literal) => {
            #[test]
            fn $name() {
                let config = RandomSequenceBuilder::<$type>::new(0, 0);
                let sequence = config.into_iter();

                for (i, num) in std::iter::zip(0..10, sequence.clone()) {
                    assert_eq!(sequence.n(i as $type), num);
                }

                for (i, num) in std::iter::zip(0..10, sequence.clone().rev()) {
                    assert_eq!(sequence.n($type::MAX.wrapping_sub(i as $type)), num);
                }

                let nums: HashSet<$type> = config.into_iter().take($check).collect();
                assert_eq!(nums.len(), $check);

                // check sequence is send and sync (although index won't be synced between threads)
                is_send::<RandomSequence<$type>>();
                is_sync::<RandomSequence<$type>>();
            }
        };
    }

    test_sequence!(test_u8_sequence, u8, 256);
    test_sequence!(test_u16_sequence, u16, 65536);
    test_sequence!(test_u32_sequence, u32, 100_000);
    test_sequence!(test_u64_sequence, u64, 100_000);

    macro_rules! test_distribution {
        ($name:ident, $type:ident, $check:literal) => {
            #[test]
            fn $name() {
                const BUCKETS: usize = 100;
                let config = RandomSequenceBuilder::<$type>::rand(&mut OsRng);

                // compute a normalised histogram over the sequence with BUCKETS buckets, where each bucket value
                // is the percentage of values that fall into this bucket
                let mut data_buckets: HashMap<usize, usize> = HashMap::with_capacity(BUCKETS + 1);
                config
                    .into_iter()
                    .take($check)
                    .map(|i| ((i as f64 / $type::MAX as f64) * BUCKETS as f64) as usize)
                    .for_each(|i| *data_buckets.entry(i).or_insert(0) += 1);
                let data_buckets: Vec<f64> = (0..=BUCKETS)
                    .map(|i| *data_buckets.get(&i).unwrap_or(&0) as f64)
                    .collect();

                // compute the probability of each bucket being hit, assuming a uniform distribution.
                // careful for u8 where we have 256 for only 100 buckets; and so some buckets have 2 vs 3 expected values,
                // as this represents the percentage of values that should fall into each bucket assuming perfectly uniform.
                let mut uniform_buckets: Vec<f64> = (0..BUCKETS)
                    .map(|_| ($check as f64 / BUCKETS as f64))
                    .collect();
                uniform_buckets.push($check as f64 / $type::MAX as f64); // last bucket for value=$type::MAX

                // compute chi-squared statistic
                assert_eq!(data_buckets.len(), uniform_buckets.len(), "Data and uniform buckets logic issue.");
                let chi_squared = std::iter::zip(data_buckets.iter(), uniform_buckets.iter())
                    .map(|(x, e)| (x - e).powi(2) / e)
                    .sum::<f64>();

                // compute p_value from chi-squared statistic
                let chi_dist = ChiSquared::new((BUCKETS - 1) as f64).unwrap();
                let p_value = 1.0 - chi_dist.cdf(chi_squared);

                // FIXME: choose a better test, because this doesn't strictly confirm the uniform distribution
                //   and there is a suspiciously large amount of variance in the p_values between test runs.
                // p_value <= 0.05 would say with 95% certainty that this distribution is _not_ uniform
                assert!(p_value > 0.05, "Unexpectedly rejected the null hypothesis with high probability. stat: {}, p: {}", chi_squared, p_value);
            }
        };
    }

    test_distribution!(test_u8_distribution, u8, 256);
    test_distribution!(test_u16_distribution, u16, 65536);
    test_distribution!(test_u32_distribution, u32, 100_000);
    test_distribution!(test_u64_distribution, u64, 100_000);
}