tor_proto/channel/padding.rs
1//! Channel padding
2//!
3//! Tor spec `padding-spec.txt` section 2.
4//!
5//! # Overview of channel padding control arrangements
6//!
7//! 1. `tor_chanmgr::mgr::map` collates information about dormancy, netdir,
8//! and overall client configuration, to maintain a
9//! [`ChannelPaddingInstructions`](crate::channel::ChannelPaddingInstructions)
10//! which is to be used for all relevant[^relevant] channels.
11//! This is distributed to channel frontends (`Channel`s)
12//! by calling `Channel::reparameterize`.
13//!
14//! 2. Circuit and channel `get_or_launch` methods all take a `ChannelUsage`.
15//! This is plumbed through the layers to `AbstractChanMgr::get_or_launch`,
16//! which passes it to the channel frontend via `Channel::note_usage`.
17//!
18//! 3. The `Channel` collates this information, and maintains an idea
19//! of whether padding is relevant for this channel (`PaddingControlState`).
20//! For channels where it *is* relevant, it sends `CtrlMsg::ConfigUpdate`
21//! to the reactor.
22//!
23//! 4. The reactor handles `CtrlMsg::ConfigUpdate` by reconfiguring is padding timer;
24//! and by sending PADDING_NEGOTIATE cell(s).
25//!
26//! [^relevant]: A "relevant" channel is one which is not excluded by the rules about
27//! padding in padding-spec 2.2. Arti does not currently support acting as a relay,
28//! so all our channels are client-to-guard or client-to-directory.
29
30use std::pin::Pin;
31// TODO, coarsetime maybe? But see arti#496 and also we want to use the mockable SleepProvider
32use std::time::{Duration, Instant};
33
34use derive_builder::Builder;
35use educe::Educe;
36use futures::future::{self, FusedFuture};
37use futures::FutureExt;
38use pin_project::pin_project;
39use rand::distributions::Distribution;
40use tracing::error;
41
42use tor_cell::chancell::msg::{Padding, PaddingNegotiate};
43use tor_config::impl_standard_builder;
44use tor_error::into_internal;
45use tor_rtcompat::SleepProvider;
46use tor_units::IntegerMilliseconds;
47
48/// Timer that organises wakeups when channel padding should be sent
49///
50/// Use [`next()`](Timer::next) to find when to send padding, and
51/// [`note_cell_sent()`](Timer::note_cell_sent) to reset the timeout when data flows.
52///
53/// A `Timer` can be in "disabled" state, in which case `next()` never completes.
54///
55/// `Timer` must be pinned before use
56/// (this allows us to avoid involving the allocator when we reschedule).
57#[pin_project(project = PaddingTimerProj)]
58pub(crate) struct Timer<R: SleepProvider> {
59 /// [`SleepProvider`]
60 sleep_prov: R,
61
62 /// Parameters controlling distribution of padding time intervals
63 ///
64 /// Can be `None` to mean the timing parameters are set to infinity.
65 parameters: Option<PreparedParameters>,
66
67 /// Gap that we intend to leave between last sent cell, and the padding
68 ///
69 /// We only resample this (calculating a new random delay) after the previous
70 /// timeout actually expired.
71 ///
72 /// `None` if the timer is disabled.
73 /// (This can be done explicitly, but also occurs on time calculation overflow.)
74 ///
75 /// Invariants: this field may be `Some` or `None` regardless of the values
76 /// of other fields. If this field is `None` then the values in `trigger_at`
77 /// and `waker` are unspecified.
78 selected_timeout: Option<Duration>,
79
80 /// Absolute time at which we should send padding
81 ///
82 /// `None` if cells more recently sent than we were polled.
83 /// That would mean that we are currently moving data out through this channel.
84 /// The absolute timeout will need to be recalculated when the data flow pauses.
85 ///
86 /// `Some` means our `next` has been demanded recently.
87 /// Then `trigger_at` records the absolute timeout at which we should send padding,
88 /// which was calculated the first time we were polled (after data).
89 ///
90 /// Invariants: the value in this field is meaningful only if `selected_timeout`
91 /// is `Some`.
92 ///
93 /// If `selected_timeout` is `Some`, and `trigger_at` is therefore valid,
94 /// it is (obviously) no later than `selected_timeout` from now.
95 ///
96 /// See also `waker`.
97 trigger_at: Option<Instant>,
98
99 /// Actual waker from the `SleepProvider`
100 ///
101 /// This is created and updated lazily, because we suspect that with some runtimes
102 /// setting timeouts may be slow.
103 /// Lazy updating means that with intermittent data traffic, we do not keep scheduling,
104 /// descheduling, and adjusting, a wakeup time.
105 ///
106 /// Invariants:
107 ///
108 /// If `selected_timeout` is `Some`,
109 /// the time at which this waker will trigger here is never *later* than `trigger_at`,
110 /// and never *later* than `selected_timeout` from now.
111 ///
112 /// The wakeup time here may well be earlier than `trigger_at`,
113 /// and sooner than `selected_timeout` from now. It may even be in the past.
114 /// When we wake up and discover this situation, we reschedule a new waker.
115 ///
116 /// If `selected_timeout` is `None`, the value is unspecified.
117 /// We may retain a `Some` in this case so that if `SleepProvider` is enhanced to
118 /// support rescheduling, we can do that without making a new `SleepFuture`
119 /// (and without completely reorganising this the `Timer` state structure.)
120 #[pin]
121 waker: Option<R::SleepFuture>,
122}
123
124/// Timing parameters, as described in `padding-spec.txt`
125#[derive(Debug, Copy, Clone, Eq, PartialEq, Builder)]
126#[builder(build_fn(error = "tor_error::Bug"))]
127pub struct Parameters {
128 /// Low end of the distribution of `X`
129 #[builder(default = "1500.into()")]
130 pub(crate) low: IntegerMilliseconds<u32>,
131 /// High end of the distribution of `X` (inclusive)
132 #[builder(default = "9500.into()")]
133 pub(crate) high: IntegerMilliseconds<u32>,
134}
135
136impl_standard_builder! { Parameters: !Deserialize + !Builder + !Default }
137
138impl Parameters {
139 /// Return a `PADDING_NEGOTIATE START` cell specifying precisely these parameters
140 ///
141 /// This function does not take account of the need to avoid sending particular
142 /// parameters, and instead sending zeroes, if the requested padding is the consensus
143 /// default. The caller must take care of that.
144 pub fn padding_negotiate_cell(&self) -> Result<PaddingNegotiate, tor_error::Bug> {
145 let get = |input: IntegerMilliseconds<u32>| {
146 input
147 .try_map(TryFrom::try_from)
148 .map_err(into_internal!("padding negotiate out of range"))
149 };
150 Ok(PaddingNegotiate::start(get(self.low)?, get(self.high)?))
151 }
152
153 /// Make a Parameters containing the specification-defined default parameters
154 pub fn default_padding() -> Self {
155 Parameters::builder().build().expect("build succeeded")
156 }
157
158 /// Make a Parameters sentinel value, with both fields set to zero, which means "no padding"
159 pub fn disabled() -> Self {
160 Parameters {
161 low: 0.into(),
162 high: 0.into(),
163 }
164 }
165}
166
167/// Timing parameters, "compiled" into a form which can be sampled more efficiently
168///
169/// According to the docs for [`rand::Rng::gen_range`],
170/// it is better to construct a distribution,
171/// than to call `gen_range` repeatedly on the same range.
172#[derive(Debug, Clone)]
173struct PreparedParameters {
174 /// The distribution of `X` (not of the ultimate delay, which is `max(X1,X2)`)
175 x_distribution_ms: rand::distributions::Uniform<u32>,
176}
177
178/// Return value from `prepare_to_sleep`: instructions for what caller ought to do
179#[derive(Educe)]
180#[educe(Debug)]
181enum SleepInstructions<'f, R: SleepProvider> {
182 /// Caller should send padding immediately
183 Immediate {
184 /// The current `Instant`, returned so that the caller need not call `now` again
185 now: Instant,
186 },
187 /// Caller should wait forever
188 Forever,
189 /// Caller should `await` this
190 Waker(#[educe(Debug(ignore))] Pin<&'f mut R::SleepFuture>),
191}
192
193impl<R: SleepProvider> Timer<R> {
194 /// Create a new `Timer`
195 #[allow(dead_code)]
196 pub(crate) fn new(sleep_prov: R, parameters: Parameters) -> Self {
197 let parameters = parameters.prepare();
198 let selected_timeout = parameters.select_timeout();
199 // Too different to new_disabled to share its code, sadly.
200 Timer {
201 sleep_prov,
202 parameters: Some(parameters),
203 selected_timeout: Some(selected_timeout),
204 trigger_at: None,
205 waker: None,
206 }
207 }
208
209 /// Create a new `Timer` which starts out disabled
210 pub(crate) fn new_disabled(sleep_prov: R, parameters: Option<Parameters>) -> Self {
211 Timer {
212 sleep_prov,
213 parameters: parameters.map(|p| p.prepare()),
214 selected_timeout: None,
215 trigger_at: None,
216 waker: None,
217 }
218 }
219
220 /// Disable this `Timer`
221 ///
222 /// Idempotent.
223 pub(crate) fn disable(self: &mut Pin<&mut Self>) {
224 *self.as_mut().project().selected_timeout = None;
225 }
226
227 /// Enable this `Timer`
228 ///
229 /// (If the timer was disabled, the timeout will only start to run when `next()`
230 /// is next polled.)
231 ///
232 /// Idempotent.
233 pub(crate) fn enable(self: &mut Pin<&mut Self>) {
234 if !self.is_enabled() {
235 self.as_mut().select_fresh_timeout();
236 }
237 }
238
239 /// Set this `Timer`'s parameters
240 ///
241 /// Will not enable or disable the timer; that must be done separately if desired.
242 ///
243 /// The effect may not be immediate: if we are already in a gap between cells,
244 /// that existing gap may not be adjusted.
245 /// (We don't *restart* the timer since that would very likely result in a gap
246 /// longer than either of the configured values.)
247 ///
248 /// Idempotent.
249 pub(crate) fn reconfigure(self: &mut Pin<&mut Self>, parameters: &Parameters) {
250 *self.as_mut().project().parameters = Some(parameters.prepare());
251 }
252
253 /// Enquire whether this `Timer` is currently enabled
254 pub(crate) fn is_enabled(&self) -> bool {
255 self.selected_timeout.is_some()
256 }
257
258 /// Select a fresh timeout (and enable, if possible)
259 fn select_fresh_timeout(self: Pin<&mut Self>) {
260 let mut self_ = self.project();
261 let timeout = self_.parameters.as_ref().map(|p| p.select_timeout());
262 *self_.selected_timeout = timeout;
263 // This is no longer valid; recalculate it on next poll
264 *self_.trigger_at = None;
265 // Timeout might be earlier, so we will need a new waker too.
266 self_.waker.set(None);
267 }
268
269 /// Note that data has been sent (ie, reset the timeout, delaying the next padding)
270 pub(crate) fn note_cell_sent(self: &mut Pin<&mut Self>) {
271 // Fast path, does not need to do anything but clear the absolute expiry time
272 let self_ = self.as_mut().project();
273 *self_.trigger_at = None;
274 }
275
276 /// Calculate when to send padding, and return a suitable waker
277 ///
278 /// In the usual case returns [`SleepInstructions::Waker`].
279 fn prepare_to_sleep(mut self: Pin<&mut Self>, now: Option<Instant>) -> SleepInstructions<R> {
280 let mut self_ = self.as_mut().project();
281
282 let timeout = match self_.selected_timeout {
283 None => return SleepInstructions::Forever,
284 Some(t) => *t,
285 };
286
287 if self_.waker.is_some() {
288 // We need to do this with is_some and expect because we need to consume self
289 // to get a return value with the right lifetimes.
290 let waker = self
291 .project()
292 .waker
293 .as_pin_mut()
294 .expect("None but we just checked");
295 return SleepInstructions::Waker(waker);
296 }
297
298 let now = now.unwrap_or_else(|| self_.sleep_prov.now());
299
300 let trigger_at = match self_.trigger_at {
301 Some(t) => t,
302 None => self_.trigger_at.insert(match now.checked_add(timeout) {
303 None => {
304 error!("timeout overflowed computing next channel padding");
305 self.disable();
306 return SleepInstructions::Forever;
307 }
308 Some(r) => r,
309 }),
310 };
311
312 let remaining = trigger_at.checked_duration_since(now).unwrap_or_default();
313 if remaining.is_zero() {
314 return SleepInstructions::Immediate { now };
315 }
316
317 //dbg!(timeout, remaining, now, trigger_at);
318
319 // There is no Option::get_pin_mut_or_set_with
320 if self_.waker.is_none() {
321 self_.waker.set(Some(self_.sleep_prov.sleep(remaining)));
322 }
323 let waker = self
324 .project()
325 .waker
326 .as_pin_mut()
327 .expect("None but we just inserted!");
328 SleepInstructions::Waker(waker)
329 }
330
331 /// Wait until we should next send padding, and then return the padding message
332 ///
333 /// Should be used as a low-priority branch within `select_biased!`.
334 ///
335 /// (`next()` has to be selected on, along with other possible events, in the
336 /// main loop, so that the padding timer runs concurrently with other processing;
337 /// and it should be in a low-priority branch of `select_biased!` as an optimisation:
338 /// that avoids calculating timeouts etc. until necessary,
339 /// i.e. it calculates them only when the main loop would otherwise block.)
340 ///
341 /// The returned future is async-cancel-safe,
342 /// but once it yields, the padding must actually be sent.
343 pub(crate) fn next(self: Pin<&mut Self>) -> impl FusedFuture<Output = Padding> + '_ {
344 self.next_inner().fuse()
345 }
346
347 /// Wait until we should next send padding (not `FusedFuture`)
348 ///
349 /// Callers wants a [`FusedFuture`] because `select!` needs one.
350 async fn next_inner(mut self: Pin<&mut Self>) -> Padding {
351 let now = loop {
352 match self.as_mut().prepare_to_sleep(None) {
353 SleepInstructions::Forever => future::pending().await,
354 SleepInstructions::Immediate { now } => break now,
355 SleepInstructions::Waker(waker) => waker.await,
356 }
357
358 // This timer has fired and has therefore been used up.
359 // When we go round again we will make a new one.
360 //
361 // TODO: have SleepProviders provide a reschedule function, and use it.
362 // That is likely to be faster where supported.
363 self.as_mut().project().waker.set(None);
364 };
365
366 // It's time to send padding.
367
368 // Firstly, calculate the new timeout for the *next* padding,
369 // so that we leave the `Timer` properly programmed.
370 self.as_mut().select_fresh_timeout();
371
372 // Bet that we will be going to sleep again, and set up the new trigger time
373 // and waker now. This will save us a future call to Instant::now.
374 self.as_mut().prepare_to_sleep(Some(now));
375
376 Padding::new()
377 }
378}
379
380impl Parameters {
381 /// "Compile" the parameters into a form which can be quickly sampled
382 fn prepare(self) -> PreparedParameters {
383 PreparedParameters {
384 x_distribution_ms: rand::distributions::Uniform::new_inclusive(
385 self.low.as_millis(),
386 self.high.as_millis(),
387 ),
388 }
389 }
390}
391
392impl PreparedParameters {
393 /// Randomly select a timeout (as per `padding-spec.txt`)
394 fn select_timeout(&self) -> Duration {
395 let mut rng = rand::thread_rng();
396 let ms = std::cmp::max(
397 self.x_distribution_ms.sample(&mut rng),
398 self.x_distribution_ms.sample(&mut rng),
399 );
400 Duration::from_millis(ms.into())
401 }
402}
403
404#[cfg(test)]
405mod test {
406 // @@ begin test lint list maintained by maint/add_warning @@
407 #![allow(clippy::bool_assert_comparison)]
408 #![allow(clippy::clone_on_copy)]
409 #![allow(clippy::dbg_macro)]
410 #![allow(clippy::mixed_attributes_style)]
411 #![allow(clippy::print_stderr)]
412 #![allow(clippy::print_stdout)]
413 #![allow(clippy::single_char_pattern)]
414 #![allow(clippy::unwrap_used)]
415 #![allow(clippy::unchecked_duration_subtraction)]
416 #![allow(clippy::useless_vec)]
417 #![allow(clippy::needless_pass_by_value)]
418 //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
419
420 use super::*;
421 use futures::future::ready;
422 use futures::select_biased;
423 use itertools::{izip, Itertools};
424 use statrs::distribution::ContinuousCDF;
425 use tokio::pin;
426 use tokio_crate as tokio;
427 use tor_rtcompat::*;
428
429 async fn assert_not_ready<R: Runtime>(timer: &mut Pin<&mut Timer<R>>) {
430 select_biased! {
431 _ = timer.as_mut().next() => panic!("unexpectedly ready"),
432 _ = ready(()) => { },
433 };
434 }
435
436 async fn assert_is_ready<R: Runtime>(timer: &mut Pin<&mut Timer<R>>) {
437 let _: Padding = select_biased! {
438 p = timer.as_mut().next() => p,
439 _ = ready(()) => panic!("pad timer failed to yield"),
440 };
441 }
442
443 #[test]
444 fn timer_impl() {
445 let runtime = tor_rtcompat::tokio::TokioNativeTlsRuntime::create().unwrap();
446 let runtime = tor_rtmock::MockSleepRuntime::new(runtime);
447
448 let parameters = Parameters {
449 low: 1000.into(),
450 high: 1000.into(),
451 };
452
453 let () = runtime.block_on(async {
454 let timer = Timer::new(runtime.clone(), parameters);
455 pin!(timer);
456 assert_eq! { true, timer.is_enabled() }
457
458 // expiry time not yet calculated
459 assert_eq! { timer.as_mut().trigger_at, None };
460
461 // ---------- timeout value ----------
462
463 // Just created, not ready yet
464 assert_not_ready(&mut timer).await;
465
466 runtime.advance(Duration::from_millis(999)).await;
467 // Not quite ready
468 assert_not_ready(&mut timer).await;
469
470 runtime.advance(Duration::from_millis(1)).await;
471 // Should go off precisely now
472 assert_is_ready(&mut timer).await;
473
474 assert_not_ready(&mut timer).await;
475 runtime.advance(Duration::from_millis(1001)).await;
476 // Should go off 1ms ago, fine
477 assert_is_ready(&mut timer).await;
478
479 // ---------- various resets ----------
480
481 runtime.advance(Duration::from_millis(500)).await;
482 timer.note_cell_sent();
483 assert_eq! { timer.as_mut().trigger_at, None };
484
485 // This ought not to cause us to actually calculate the expiry time
486 let () = select_biased! {
487 _ = ready(()) => { },
488 _ = timer.as_mut().next() => panic!(),
489 };
490 assert_eq! { timer.as_mut().trigger_at, None };
491
492 // ---------- disable/enable ----------
493
494 timer.disable();
495 runtime.advance(Duration::from_millis(2000)).await;
496 assert_eq! { timer.as_mut().selected_timeout, None };
497 assert_eq! { false, timer.is_enabled() }
498 assert_not_ready(&mut timer).await;
499
500 timer.enable();
501 runtime.advance(Duration::from_millis(3000)).await;
502 assert_eq! { true, timer.is_enabled() }
503 // Shouldn't be already ready, since we haven't polled yet
504 assert_not_ready(&mut timer).await;
505
506 runtime.advance(Duration::from_millis(1000)).await;
507 // *Now*
508 assert_is_ready(&mut timer).await;
509 });
510
511 let () = runtime.block_on(async {
512 let timer = Timer::new(runtime.clone(), parameters);
513 pin!(timer);
514
515 assert! { timer.as_mut().selected_timeout.is_some() };
516 assert! { timer.as_mut().trigger_at.is_none() };
517 // Force an overflow by guddling
518 *timer.as_mut().project().selected_timeout = Some(Duration::MAX);
519
520 assert_not_ready(&mut timer).await;
521 dbg!(timer.as_mut().project().trigger_at);
522 assert_eq! { false, timer.is_enabled() }
523 });
524
525 let () = runtime.block_on(async {
526 let timer = Timer::new_disabled(runtime.clone(), None);
527 assert! { timer.parameters.is_none() };
528 pin!(timer);
529 assert_not_ready(&mut timer).await;
530 assert! { timer.as_mut().selected_timeout.is_none() };
531 assert! { timer.as_mut().trigger_at.is_none() };
532 });
533
534 let () = runtime.block_on(async {
535 let timer = Timer::new_disabled(runtime.clone(), Some(parameters));
536 assert! { timer.parameters.is_some() };
537 pin!(timer);
538 assert_not_ready(&mut timer).await;
539 runtime.advance(Duration::from_millis(3000)).await;
540 assert_not_ready(&mut timer).await;
541 timer.as_mut().enable();
542 assert_not_ready(&mut timer).await;
543 runtime.advance(Duration::from_millis(3000)).await;
544 assert_is_ready(&mut timer).await;
545 });
546 }
547
548 #[test]
549 #[allow(clippy::print_stderr)]
550 fn timeout_distribution() {
551 // Test that the distribution of padding intervals is as we expect. This is not so
552 // straightforward. We need to deal with true randomness (since we can't plumb a
553 // testing RNG into the padding timer, and perhaps don't even *want* to make that a
554 // mockable interface). Measuring a distribution of random variables involves some
555 // statistics.
556
557 // The overall approach is:
558 // Use a fixed (but nontrivial) low to high range
559 // Sample N times into n equal sized buckets
560 // Calculate the expected number of samples in each bucket
561 // Do a chi^2 test. If it doesn't spot a potential difference, declare OK.
562 // If the chi^2 test does definitely declare a difference, declare failure.
563 // Otherwise increase N and go round again.
564 //
565 // This allows most runs to be fast without having an appreciable possibility of a
566 // false test failure and while being able to detect even quite small deviations.
567
568 // Notation from
569 // https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Calculating_the_test-statistic
570 // I haven't done a formal power calculation but empirically
571 // this detects the following most of the time:
572 // deviation of the CDF power from B^2 to B^1.98
573 // wrong minimum value by 25ms out of 12s, low_ms = min + 25
574 // wrong maximum value by 10ms out of 12s, high_ms = max -1 - 10
575
576 #[allow(non_snake_case)]
577 let mut N = 100_0000;
578
579 #[allow(non_upper_case_globals)]
580 const n: usize = 100;
581
582 const P_GOOD: f64 = 0.05; // Investigate further 5% of times (if all is actually well)
583 const P_BAD: f64 = 1e-12;
584
585 loop {
586 eprintln!("padding distribution test, n={} N={}", n, N);
587
588 let min = 5000;
589 let max = 17000; // Exclusive
590 assert_eq!(0, (max - min) % (n as u32)); // buckets must match up to integer boundaries
591
592 let cdf = (0..=n)
593 .map(|bi| {
594 let b = (bi as f64) / (n as f64);
595 // expected distribution:
596 // with B = bi / n
597 // P(X) < B == B
598 // P(max(X1,X1)) < B = B^2
599 b.powi(2)
600 })
601 .collect_vec();
602
603 let pdf = cdf
604 .iter()
605 .cloned()
606 .tuple_windows()
607 .map(|(p, q)| q - p)
608 .collect_vec();
609 let exp = pdf.iter().cloned().map(|p| p * f64::from(N)).collect_vec();
610
611 // chi-squared test only valid if every cell expects at least 5
612 assert!(exp.iter().cloned().all(|ei| ei >= 5.));
613
614 let mut obs = [0_u32; n];
615
616 let params = Parameters {
617 low: min.into(),
618 high: (max - 1).into(), // convert exclusive to inclusive
619 }
620 .prepare();
621
622 for _ in 0..N {
623 let xx = params.select_timeout();
624 let ms = xx.as_millis();
625 let ms = u32::try_from(ms).unwrap();
626 assert!(ms >= min);
627 assert!(ms < max);
628 // Integer arithmetic ensures that we classify exactly
629 let bi = ((ms - min) * (n as u32)) / (max - min);
630 obs[bi as usize] += 1;
631 }
632
633 let chi2 = izip!(&obs, &exp)
634 .map(|(&oi, &ei)| (f64::from(oi) - ei).powi(2) / ei)
635 .sum::<f64>();
636
637 // n degrees of freedom, one-tailed test
638 // (since distro parameters are all fixed, not estimated from the sample)
639 let chi2_distr = statrs::distribution::ChiSquared::new(n as _).unwrap();
640
641 // probability of good code generating a result at least this bad
642 let p = 1. - chi2_distr.cdf(chi2);
643
644 eprintln!(
645 "padding distribution test, n={} N={} chi2={} p={}",
646 n, N, chi2, p
647 );
648
649 if p >= P_GOOD {
650 break;
651 }
652
653 for (i, (&oi, &ei)) in izip!(&obs, &exp).enumerate() {
654 eprintln!("bi={:4} OI={:4} EI={}", i, oi, ei);
655 }
656
657 if p < P_BAD {
658 panic!("distribution is wrong (p < {:e})", P_BAD);
659 }
660
661 // This is statistically rather cheaty: we keep trying until we get a definite
662 // answer! But we radically increase the power of the test each time.
663 // If the distribution is really wrong, this test ought to find it soon enough,
664 // especially since we run this repeatedly in CI.
665 N *= 10;
666 }
667 }
668}