Skip to main content

tacet_core/adaptive/
calibration.rs

1//! Calibration data and computation for adaptive sampling (no_std compatible).
2//!
3//! This module defines the calibration results needed for the adaptive sampling loop,
4//! as well as the core calibration computation that can run in no_std environments.
5//!
6//! For no_std compatibility:
7//! - No `std::time::Instant` (throughput measured by caller)
8//! - Core preflight checks are no_std compatible
9//! - Uses only `alloc` for heap allocation
10//!
11//! # Usage
12//!
13//! The `calibrate()` function runs the calibration phase using pre-collected samples.
14//! Unlike the `tacet` crate version, it takes `samples_per_second` as a parameter
15//! instead of measuring it internally (to avoid `std::time::Instant`).
16
17extern crate alloc;
18
19use alloc::string::String;
20use alloc::vec::Vec;
21use core::f64::consts::PI;
22
23use nalgebra::Cholesky;
24use rand::prelude::*;
25use rand::SeedableRng;
26use rand_xoshiro::Xoshiro256PlusPlus;
27
28use crate::analysis::mde::estimate_mde;
29use crate::constants::DEFAULT_SEED;
30use crate::math;
31use crate::preflight::{run_core_checks, PreflightResult};
32use crate::statistics::{
33    bootstrap_difference_covariance, bootstrap_difference_covariance_discrete, AcquisitionStream,
34    OnlineStats,
35};
36use crate::types::{Matrix9, Vector9};
37
38use super::CalibrationSnapshot;
39
40#[cfg(feature = "parallel")]
41use rayon::prelude::*;
42
43/// Counter-based RNG seed generation using SplitMix64.
44/// Provides deterministic, well-distributed seeds for parallel MC sampling.
45#[cfg(feature = "parallel")]
46#[inline]
47fn counter_rng_seed(base_seed: u64, counter: u64) -> u64 {
48    let mut z = base_seed.wrapping_add(counter.wrapping_mul(0x9e3779b97f4a7c15));
49    z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
50    z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
51    z ^ (z >> 31)
52}
53
54/// Conservative prior scale factor used as fallback.
55/// Chosen to give higher exceedance probability than the target 62%.
56const CONSERVATIVE_PRIOR_SCALE: f64 = 1.5;
57
58/// Target prior exceedance probability.
59const TARGET_EXCEEDANCE: f64 = 0.62;
60
61/// Number of Monte Carlo samples for prior calibration.
62const PRIOR_CALIBRATION_SAMPLES: usize = 50_000;
63
64/// Maximum iterations for prior calibration root-finding.
65const MAX_CALIBRATION_ITERATIONS: usize = 20;
66
67/// Condition number threshold for triggering robust shrinkage (§3.3.5).
68const CONDITION_NUMBER_THRESHOLD: f64 = 1e4;
69
70/// Minimum diagonal floor for regularization (prevents division by zero).
71const DIAGONAL_FLOOR: f64 = 1e-12;
72
73/// Degrees of freedom for Student's t prior (v5.4).
74pub const NU: f64 = 4.0;
75
76/// Calibration results from the initial measurement phase (no_std compatible).
77///
78/// This struct contains the essential statistical data needed for the adaptive
79/// sampling loop. It is designed for use in no_std environments like SGX enclaves.
80///
81/// For full calibration with preflight checks, see `tacet::Calibration`.
82///
83/// ## Student's t Prior (v5.4+)
84///
85/// The prior is a Student's t distribution with ν=4 degrees of freedom:
86/// δ ~ t_ν(0, σ_t²R)
87///
88/// This is implemented via scale mixture: λ ~ Gamma(ν/2, ν/2), δ|λ ~ N(0, (σ_t²/λ)R).
89/// The marginal variance is (ν/(ν-2)) σ_t² R = 2σ_t² R for ν=4.
90#[derive(Debug, Clone)]
91pub struct Calibration {
92    /// Covariance "rate" - multiply by 1/n to get Sigma_n for n samples.
93    /// Computed as Sigma_cal * n_cal where Sigma_cal is calibration covariance.
94    /// This allows O(1) covariance scaling as samples accumulate.
95    pub sigma_rate: Matrix9,
96
97    /// Block length from Politis-White algorithm.
98    /// Used for block bootstrap to preserve autocorrelation structure.
99    pub block_length: usize,
100
101    /// Calibrated Student's t prior scale (v5.4).
102    /// This is the σ in δ|λ ~ N(0, (σ²/λ)R).
103    pub sigma_t: f64,
104
105    /// Cholesky factor L_R of correlation matrix R.
106    /// Used for Gibbs sampling: δ = (σ/√λ) L_R z.
107    pub l_r: Matrix9,
108
109    /// Marginal prior covariance: 2σ²R (for ν=4).
110    /// This is the unconditional prior variance of δ under the t-prior.
111    pub prior_cov_marginal: Matrix9,
112
113    /// The theta threshold being used (in nanoseconds).
114    pub theta_ns: f64,
115
116    /// Number of calibration samples collected per class.
117    pub calibration_samples: usize,
118
119    /// Whether discrete mode is active (< 10% unique values).
120    /// When true, use mid-quantile estimators and m-out-of-n bootstrap.
121    pub discrete_mode: bool,
122
123    /// Minimum detectable effect in nanoseconds.
124    pub mde_ns: f64,
125
126    /// Statistics snapshot from calibration phase for drift detection.
127    pub calibration_snapshot: CalibrationSnapshot,
128
129    /// Timer resolution in nanoseconds.
130    pub timer_resolution_ns: f64,
131
132    /// Measured throughput for time estimation (samples per second).
133    /// The caller is responsible for measuring this during calibration.
134    pub samples_per_second: f64,
135
136    /// Floor-rate constant.
137    /// Computed once at calibration: 95th percentile of max_k|Z_k| where Z ~ N(0, Σ_rate).
138    /// Used for analytical theta_floor computation: theta_floor_stat(n) = c_floor / sqrt(n).
139    pub c_floor: f64,
140
141    /// Projection mismatch threshold.
142    /// 99th percentile of bootstrap Q_proj distribution.
143    pub projection_mismatch_thresh: f64,
144
145    /// Timer resolution floor component.
146    /// theta_tick = (1 tick in ns) / K where K is the batch size.
147    pub theta_tick: f64,
148
149    /// Effective threshold for this run.
150    /// theta_eff = max(theta_user, theta_floor) or just theta_floor in research mode.
151    pub theta_eff: f64,
152
153    /// Initial measurement floor at calibration time.
154    pub theta_floor_initial: f64,
155
156    /// Deterministic RNG seed used for this run.
157    pub rng_seed: u64,
158
159    /// Batch size K for adaptive batching.
160    /// When K > 1, samples contain K iterations worth of timing.
161    /// Effect estimates must be divided by K to report per-call differences.
162    pub batch_k: u32,
163
164    /// Results of core preflight checks run during calibration (no_std compatible).
165    /// Platform-specific checks (like system configuration) are handled by the
166    /// `tacet` crate's wrapper.
167    pub preflight_result: PreflightResult,
168}
169
170impl Calibration {
171    /// Create a new Calibration with v5.4+ Student's t prior.
172    #[allow(clippy::too_many_arguments)]
173    pub fn new(
174        sigma_rate: Matrix9,
175        block_length: usize,
176        sigma_t: f64,
177        l_r: Matrix9,
178        theta_ns: f64,
179        calibration_samples: usize,
180        discrete_mode: bool,
181        mde_ns: f64,
182        calibration_snapshot: CalibrationSnapshot,
183        timer_resolution_ns: f64,
184        samples_per_second: f64,
185        c_floor: f64,
186        projection_mismatch_thresh: f64,
187        theta_tick: f64,
188        theta_eff: f64,
189        theta_floor_initial: f64,
190        rng_seed: u64,
191        batch_k: u32,
192    ) -> Self {
193        // Compute marginal prior covariance: 2σ²R (for ν=4)
194        // The marginal variance of t_ν(0, σ²R) is (ν/(ν-2)) σ²R = 2σ²R for ν=4
195        let r = l_r * l_r.transpose();
196        let prior_cov_marginal = r * (2.0 * sigma_t * sigma_t);
197
198        Self {
199            sigma_rate,
200            block_length,
201            sigma_t,
202            l_r,
203            prior_cov_marginal,
204            theta_ns,
205            calibration_samples,
206            discrete_mode,
207            mde_ns,
208            calibration_snapshot,
209            timer_resolution_ns,
210            samples_per_second,
211            c_floor,
212            projection_mismatch_thresh,
213            theta_tick,
214            theta_eff,
215            theta_floor_initial,
216            rng_seed,
217            batch_k,
218            preflight_result: PreflightResult::new(),
219        }
220    }
221
222    /// Create a new Calibration with all fields including preflight results.
223    #[allow(clippy::too_many_arguments)]
224    pub fn with_preflight(
225        sigma_rate: Matrix9,
226        block_length: usize,
227        sigma_t: f64,
228        l_r: Matrix9,
229        theta_ns: f64,
230        calibration_samples: usize,
231        discrete_mode: bool,
232        mde_ns: f64,
233        calibration_snapshot: CalibrationSnapshot,
234        timer_resolution_ns: f64,
235        samples_per_second: f64,
236        c_floor: f64,
237        projection_mismatch_thresh: f64,
238        theta_tick: f64,
239        theta_eff: f64,
240        theta_floor_initial: f64,
241        rng_seed: u64,
242        batch_k: u32,
243        preflight_result: PreflightResult,
244    ) -> Self {
245        // Compute marginal prior covariance: 2σ²R (for ν=4)
246        // The marginal variance of t_ν(0, σ²R) is (ν/(ν-2)) σ²R = 2σ²R for ν=4
247        let r = l_r * l_r.transpose();
248        let prior_cov_marginal = r * (2.0 * sigma_t * sigma_t);
249
250        Self {
251            sigma_rate,
252            block_length,
253            sigma_t,
254            l_r,
255            prior_cov_marginal,
256            theta_ns,
257            calibration_samples,
258            discrete_mode,
259            mde_ns,
260            calibration_snapshot,
261            timer_resolution_ns,
262            samples_per_second,
263            c_floor,
264            projection_mismatch_thresh,
265            theta_tick,
266            theta_eff,
267            theta_floor_initial,
268            rng_seed,
269            batch_k,
270            preflight_result,
271        }
272    }
273
274    /// Compute effective sample size accounting for dependence (spec §3.3.2 v5.6).
275    ///
276    /// Under strong temporal dependence, n samples do not provide n independent observations.
277    /// The effective sample size approximates the number of effectively independent blocks.
278    ///
279    /// n_eff = max(1, floor(n / block_length))
280    ///
281    /// where block_length is the estimated dependence length from Politis-White selector.
282    pub fn n_eff(&self, n: usize) -> usize {
283        if self.block_length == 0 {
284            return n.max(1);
285        }
286        (n / self.block_length).max(1)
287    }
288
289    /// Scale sigma_rate to get covariance for n samples using effective sample size (v5.6).
290    ///
291    /// Σ_n = Σ_rate / n_eff
292    ///
293    /// where n_eff = max(1, floor(n / block_length)) to correctly account for reduced
294    /// information under temporal dependence.
295    pub fn covariance_for_n(&self, n: usize) -> Matrix9 {
296        if n == 0 {
297            return self.sigma_rate; // Avoid division by zero
298        }
299        let n_eff = self.n_eff(n);
300        self.sigma_rate / (n_eff as f64)
301    }
302
303    /// Scale sigma_rate to get covariance using raw n samples (ignoring dependence).
304    ///
305    /// Σ_n = Σ_rate / n
306    ///
307    /// Use this only when you need the raw scaling without n_eff correction.
308    pub fn covariance_for_n_raw(&self, n: usize) -> Matrix9 {
309        if n == 0 {
310            return self.sigma_rate; // Avoid division by zero
311        }
312        self.sigma_rate / (n as f64)
313    }
314
315    /// Estimate time to collect `n` additional samples based on calibration throughput.
316    ///
317    /// Returns estimated seconds. Caller should add any overhead.
318    pub fn estimate_collection_time_secs(&self, n: usize) -> f64 {
319        if self.samples_per_second <= 0.0 {
320            return 0.0;
321        }
322        n as f64 / self.samples_per_second
323    }
324
325    /// Convert to an FFI-friendly summary containing only scalar fields.
326    pub fn to_summary(&self) -> crate::ffi_summary::CalibrationSummary {
327        crate::ffi_summary::CalibrationSummary {
328            block_length: self.block_length,
329            calibration_samples: self.calibration_samples,
330            discrete_mode: self.discrete_mode,
331            timer_resolution_ns: self.timer_resolution_ns,
332            theta_ns: self.theta_ns,
333            theta_eff: self.theta_eff,
334            theta_floor_initial: self.theta_floor_initial,
335            theta_tick: self.theta_tick,
336            mde_ns: self.mde_ns,
337            samples_per_second: self.samples_per_second,
338        }
339    }
340}
341
342/// Configuration for calibration phase (no_std compatible).
343///
344/// This contains parameters for calibration that don't require std features.
345#[derive(Debug, Clone)]
346pub struct CalibrationConfig {
347    /// Number of samples to collect per class during calibration.
348    pub calibration_samples: usize,
349
350    /// Number of bootstrap iterations for covariance estimation.
351    pub bootstrap_iterations: usize,
352
353    /// Timer resolution in nanoseconds.
354    pub timer_resolution_ns: f64,
355
356    /// Theta threshold in nanoseconds.
357    pub theta_ns: f64,
358
359    /// Alpha level for MDE computation.
360    pub alpha: f64,
361
362    /// Random seed for bootstrap.
363    pub seed: u64,
364
365    /// Whether to skip preflight checks.
366    pub skip_preflight: bool,
367
368    /// Force discrete mode regardless of uniqueness ratio.
369    pub force_discrete_mode: bool,
370}
371
372impl Default for CalibrationConfig {
373    fn default() -> Self {
374        Self {
375            calibration_samples: 5000,
376            bootstrap_iterations: 200, // Fewer iterations for calibration phase
377            timer_resolution_ns: 1.0,
378            theta_ns: 100.0,
379            alpha: 0.01,
380            seed: DEFAULT_SEED,
381            skip_preflight: false,
382            force_discrete_mode: false,
383        }
384    }
385}
386
387/// Errors that can occur during calibration.
388#[derive(Debug, Clone)]
389pub enum CalibrationError {
390    /// Too few samples collected for reliable calibration.
391    TooFewSamples {
392        /// Number of samples actually collected.
393        collected: usize,
394        /// Minimum required samples.
395        minimum: usize,
396    },
397
398    /// Covariance estimation failed (e.g., singular matrix).
399    CovarianceEstimationFailed {
400        /// Reason for failure.
401        reason: String,
402    },
403
404    /// A preflight check failed before calibration.
405    PreflightCheckFailed {
406        /// Which check failed.
407        check: String,
408        /// Error message.
409        message: String,
410    },
411
412    /// Timer is too coarse to measure this operation.
413    TimerTooCoarse {
414        /// Timer resolution in nanoseconds.
415        resolution_ns: f64,
416        /// Measured operation time in nanoseconds.
417        operation_ns: f64,
418    },
419}
420
421impl core::fmt::Display for CalibrationError {
422    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
423        match self {
424            CalibrationError::TooFewSamples { collected, minimum } => {
425                write!(
426                    f,
427                    "Too few samples: collected {}, need at least {}",
428                    collected, minimum
429                )
430            }
431            CalibrationError::CovarianceEstimationFailed { reason } => {
432                write!(f, "Covariance estimation failed: {}", reason)
433            }
434            CalibrationError::PreflightCheckFailed { check, message } => {
435                write!(f, "Preflight check '{}' failed: {}", check, message)
436            }
437            CalibrationError::TimerTooCoarse {
438                resolution_ns,
439                operation_ns,
440            } => {
441                write!(
442                    f,
443                    "Timer resolution ({:.1}ns) too coarse for operation ({:.1}ns)",
444                    resolution_ns, operation_ns
445                )
446            }
447        }
448    }
449}
450
451/// Run calibration phase to estimate covariance and set priors (no_std compatible).
452///
453/// This is the core calibration function that can run in no_std environments.
454/// Unlike the `tacet` crate version, throughput (`samples_per_second`) must be
455/// provided by the caller since `std::time::Instant` is not available.
456///
457/// # Arguments
458///
459/// * `baseline_samples` - Pre-collected baseline timing samples (in native units)
460/// * `sample_samples` - Pre-collected sample timing samples (in native units)
461/// * `ns_per_tick` - Conversion factor from native units to nanoseconds
462/// * `config` - Calibration configuration
463/// * `samples_per_second` - Throughput measured by caller (e.g., via performance.now())
464///
465/// # Returns
466///
467/// A `Calibration` struct with all computed quantities, or a `CalibrationError`.
468pub fn calibrate(
469    baseline_samples: &[u64],
470    sample_samples: &[u64],
471    ns_per_tick: f64,
472    config: &CalibrationConfig,
473    samples_per_second: f64,
474) -> Result<Calibration, CalibrationError> {
475    let n = baseline_samples.len().min(sample_samples.len());
476
477    // Check minimum sample requirement
478    const MIN_CALIBRATION_SAMPLES: usize = 100;
479    if n < MIN_CALIBRATION_SAMPLES {
480        return Err(CalibrationError::TooFewSamples {
481            collected: n,
482            minimum: MIN_CALIBRATION_SAMPLES,
483        });
484    }
485
486    // Convert to nanoseconds for analysis
487    let baseline_ns: Vec<f64> = baseline_samples[..n]
488        .iter()
489        .map(|&t| t as f64 * ns_per_tick)
490        .collect();
491    let sample_ns: Vec<f64> = sample_samples[..n]
492        .iter()
493        .map(|&t| t as f64 * ns_per_tick)
494        .collect();
495
496    // Check discrete mode (spec §3.6): < 10% unique values
497    let unique_baseline = count_unique(&baseline_ns);
498    let unique_sample = count_unique(&sample_ns);
499    let min_uniqueness = (unique_baseline as f64 / n as f64).min(unique_sample as f64 / n as f64);
500    let discrete_mode = config.force_discrete_mode || min_uniqueness < 0.10;
501
502    // Create acquisition stream for joint bootstrap (spec Section 2.3.1)
503    let mut acquisition_stream = AcquisitionStream::with_capacity(2 * n);
504    acquisition_stream.push_batch_interleaved(&baseline_ns, &sample_ns);
505    let interleaved = acquisition_stream.to_timing_samples();
506
507    // Bootstrap covariance estimation
508    let cov_estimate = if discrete_mode {
509        bootstrap_difference_covariance_discrete(
510            &baseline_ns,
511            &sample_ns,
512            config.bootstrap_iterations,
513            config.seed,
514        )
515    } else {
516        bootstrap_difference_covariance(
517            &interleaved,
518            config.bootstrap_iterations,
519            config.seed,
520            false,
521        )
522    };
523
524    // Check covariance validity
525    if !cov_estimate.is_stable() {
526        return Err(CalibrationError::CovarianceEstimationFailed {
527            reason: String::from("Covariance matrix is not positive definite"),
528        });
529    }
530
531    // Compute sigma rate: Sigma_rate = Sigma_cal * n_cal
532    let sigma_rate = cov_estimate.matrix * (n as f64);
533
534    // Compute MDE for prior setting (spec §3.3)
535    let mde = estimate_mde(&cov_estimate.matrix, config.alpha);
536
537    // Run preflight checks (unless skipped)
538    let preflight_result = if config.skip_preflight {
539        PreflightResult::new()
540    } else {
541        run_core_checks(
542            &baseline_ns,
543            &sample_ns,
544            config.timer_resolution_ns,
545            config.seed,
546        )
547    };
548
549    // Compute calibration statistics snapshot for drift detection
550    let calibration_snapshot = compute_calibration_snapshot(&baseline_ns, &sample_ns);
551
552    // Compute floor-rate constant: c_floor = q_95(max_k |Z_k|) where Z ~ N(0, Σ_rate)
553    let c_floor = compute_c_floor_9d(&sigma_rate, config.seed);
554
555    // Timer tick floor
556    let theta_tick = config.timer_resolution_ns;
557
558    // Initial measurement floor at calibration sample count
559    let theta_floor_initial = (c_floor / (n as f64).sqrt()).max(theta_tick);
560
561    // Effective threshold: max(user threshold, measurement floor)
562    let theta_eff = if config.theta_ns > 0.0 {
563        config.theta_ns.max(theta_floor_initial)
564    } else {
565        theta_floor_initial
566    };
567
568    // Student's t prior (ν=4) calibration
569    let (sigma_t, l_r) =
570        calibrate_t_prior_scale(&sigma_rate, theta_eff, n, discrete_mode, config.seed);
571
572    Ok(Calibration::with_preflight(
573        sigma_rate,
574        cov_estimate.block_size,
575        sigma_t,
576        l_r,
577        config.theta_ns,
578        n,
579        discrete_mode,
580        mde.mde_ns,
581        calibration_snapshot,
582        config.timer_resolution_ns,
583        samples_per_second,
584        c_floor,
585        cov_estimate.q_thresh,
586        theta_tick,
587        theta_eff,
588        theta_floor_initial,
589        config.seed,
590        1, // batch_k = 1 (no batching), updated by collector
591        preflight_result,
592    ))
593}
594
595/// Count unique values in a slice (for discrete mode detection).
596fn count_unique(values: &[f64]) -> usize {
597    use alloc::collections::BTreeSet;
598
599    // Discretize to avoid floating point comparison issues
600    // Use 0.001ns buckets (well below any meaningful timing difference)
601    let buckets: BTreeSet<i64> = values.iter().map(|&v| (v * 1000.0) as i64).collect();
602    buckets.len()
603}
604
605/// Compute calibration statistics snapshot for drift detection.
606fn compute_calibration_snapshot(baseline_ns: &[f64], sample_ns: &[f64]) -> CalibrationSnapshot {
607    let mut baseline_stats = OnlineStats::new();
608    let mut sample_stats = OnlineStats::new();
609
610    for &t in baseline_ns {
611        baseline_stats.update(t);
612    }
613    for &t in sample_ns {
614        sample_stats.update(t);
615    }
616
617    CalibrationSnapshot::new(baseline_stats.finalize(), sample_stats.finalize())
618}
619
620/// Compute correlation-shaped 9D prior covariance (spec v5.1).
621///
622/// Λ₀ = σ²_prior × R where R = Corr(Σ_rate) = D^(-1/2) Σ_rate D^(-1/2)
623///
624/// Since diag(R) = 1, σ_prior equals the marginal prior SD for all coordinates.
625/// This eliminates hidden heteroskedasticity that could cause pathological
626/// shrinkage for certain effect patterns.
627///
628/// # Arguments
629/// * `sigma_rate` - Covariance rate matrix from bootstrap
630/// * `sigma_prior` - Prior scale (calibrated for 62% exceedance)
631/// * `discrete_mode` - Whether discrete timer mode is active
632///
633/// # Returns
634/// The prior covariance matrix Λ₀.
635pub fn compute_prior_cov_9d(
636    sigma_rate: &Matrix9,
637    sigma_prior: f64,
638    discrete_mode: bool,
639) -> Matrix9 {
640    // Compute correlation matrix R = D^(-1/2) Σ_rate D^(-1/2)
641    let r = compute_correlation_matrix(sigma_rate);
642
643    // Apply two-step regularization (§3.3.5):
644    // 1. Robust shrinkage (if in fragile regime)
645    // 2. Numerical jitter (always, as needed)
646    let r = apply_correlation_regularization(&r, discrete_mode);
647
648    // Λ₀ = σ²_prior × R
649    r * (sigma_prior * sigma_prior)
650}
651
652/// Compute correlation matrix R = D^(-1/2) Σ D^(-1/2) from covariance matrix.
653///
654/// The correlation matrix has unit diagonal (diag(R) = 1) and encodes
655/// the correlation structure of Σ without the variance magnitudes.
656fn compute_correlation_matrix(sigma: &Matrix9) -> Matrix9 {
657    // Extract diagonal and apply floor for numerical stability
658    let mut d_inv_sqrt = [0.0_f64; 9];
659    for i in 0..9 {
660        let var = sigma[(i, i)].max(DIAGONAL_FLOOR);
661        d_inv_sqrt[i] = 1.0 / math::sqrt(var);
662    }
663
664    // R = D^(-1/2) × Σ × D^(-1/2)
665    let mut r = *sigma;
666    for i in 0..9 {
667        for j in 0..9 {
668            r[(i, j)] *= d_inv_sqrt[i] * d_inv_sqrt[j];
669        }
670    }
671
672    r
673}
674
675/// Estimate condition number of a symmetric matrix via power iteration.
676///
677/// Returns the ratio of largest to smallest eigenvalue magnitude.
678/// Uses a simple approach: max(|diag|) / min(|diag|) as a quick estimate,
679/// plus check for Cholesky failure which indicates poor conditioning.
680fn estimate_condition_number(r: &Matrix9) -> f64 {
681    // Quick estimate from diagonal ratio (exact for diagonal matrices)
682    let diag: Vec<f64> = (0..9).map(|i| r[(i, i)].abs()).collect();
683    let max_diag = diag.iter().cloned().fold(0.0_f64, f64::max);
684    let min_diag = diag.iter().cloned().fold(f64::INFINITY, f64::min);
685
686    if min_diag < DIAGONAL_FLOOR {
687        return f64::INFINITY;
688    }
689
690    // For correlation matrices, this underestimates the true condition number,
691    // but we also check Cholesky failure separately.
692    max_diag / min_diag
693}
694
695/// Check if we're in a "fragile regime" requiring robust shrinkage (§3.3.5).
696///
697/// Fragile regime is detected when:
698/// - Discrete timer mode is active, OR
699/// - Condition number of R exceeds 10⁴, OR
700/// - Cholesky factorization fails
701fn is_fragile_regime(r: &Matrix9, discrete_mode: bool) -> bool {
702    if discrete_mode {
703        return true;
704    }
705
706    let cond = estimate_condition_number(r);
707    if cond > CONDITION_NUMBER_THRESHOLD {
708        return true;
709    }
710
711    // Check if Cholesky would fail
712    Cholesky::new(*r).is_none()
713}
714
715/// Apply two-step regularization to correlation matrix (§3.3.5).
716///
717/// Step 1 (conditional): Robust shrinkage R ← (1-λ)R + λI for fragile regimes.
718/// Step 2 (always): Numerical jitter R ← R + εI to ensure SPD.
719fn apply_correlation_regularization(r: &Matrix9, discrete_mode: bool) -> Matrix9 {
720    let mut r = *r;
721
722    // Step 1: Robust shrinkage (conditional on fragile regime)
723    if is_fragile_regime(&r, discrete_mode) {
724        // Choose λ based on severity (§3.3.5 allows [0.01, 0.2])
725        let cond = estimate_condition_number(&r);
726        let lambda = if cond > CONDITION_NUMBER_THRESHOLD * 10.0 {
727            0.2 // Severe: aggressive shrinkage
728        } else if cond > CONDITION_NUMBER_THRESHOLD {
729            0.1 // Moderate
730        } else if discrete_mode {
731            0.05 // Mild: just discrete mode
732        } else {
733            0.01 // Minimal
734        };
735
736        let identity = Matrix9::identity();
737        r = r * (1.0 - lambda) + identity * lambda;
738    }
739
740    // Step 2: Numerical jitter (always, as needed for Cholesky)
741    // Try increasingly large epsilon until Cholesky succeeds
742    for &eps in &[1e-10, 1e-9, 1e-8, 1e-7, 1e-6] {
743        let r_jittered = r + Matrix9::identity() * eps;
744        if Cholesky::new(r_jittered).is_some() {
745            return r_jittered;
746        }
747    }
748
749    // Fallback: aggressive jitter
750    r + Matrix9::identity() * 1e-5
751}
752
753/// Compute median standard error from sigma_rate.
754fn compute_median_se(sigma_rate: &Matrix9, n_cal: usize) -> f64 {
755    let mut ses: Vec<f64> = (0..9)
756        .map(|i| {
757            let var = sigma_rate[(i, i)].max(DIAGONAL_FLOOR);
758            math::sqrt(var / n_cal.max(1) as f64)
759        })
760        .collect();
761    ses.sort_by(|a, b| a.total_cmp(b));
762    ses[4] // Median of 9 values
763}
764
765/// Calibrate Student's t prior scale σ so that P(max_k |δ_k| > θ_eff | δ ~ t_4(0, σ²R)) = 0.62 (v5.4).
766///
767/// Uses binary search to find σ such that the marginal exceedance probability
768/// matches the target 62%. The t-prior is sampled via scale mixture:
769/// - λ ~ Gamma(ν/2, ν/2) = Gamma(2, 2) for ν=4
770/// - z ~ N(0, I₉)
771/// - δ = (σ/√λ) L_R z
772///
773/// # Arguments
774/// * `sigma_rate` - Covariance rate matrix from calibration
775/// * `theta_eff` - Effective threshold in nanoseconds
776/// * `n_cal` - Number of calibration samples (for SE computation)
777/// * `discrete_mode` - Whether discrete timer mode is active
778/// * `seed` - Deterministic RNG seed
779///
780/// # Returns
781/// A tuple of (sigma_t, l_r) where sigma_t is the calibrated scale and l_r is
782/// the Cholesky factor of the regularized correlation matrix.
783pub fn calibrate_t_prior_scale(
784    sigma_rate: &Matrix9,
785    theta_eff: f64,
786    n_cal: usize,
787    discrete_mode: bool,
788    seed: u64,
789) -> (f64, Matrix9) {
790    let median_se = compute_median_se(sigma_rate, n_cal);
791
792    // Compute and cache L_R (Cholesky of regularized correlation matrix)
793    let r = compute_correlation_matrix(sigma_rate);
794    let r_reg = apply_correlation_regularization(&r, discrete_mode);
795    let l_r = match Cholesky::new(r_reg) {
796        Some(c) => c.l().into_owned(),
797        None => Matrix9::identity(),
798    };
799
800    // Precompute normalized effect magnitudes for sample reuse across bisection.
801    //
802    // For t_ν prior with scale mixture representation:
803    //   λ ~ Gamma(ν/2, ν/2), z ~ N(0, I₉), δ = (σ/√λ) L_R z
804    //
805    // So: max|δ| = σ · max|L_R z|/√λ = σ · m
806    // where m_i = max|L_R z_i|/√λ_i is precomputed once.
807    //
808    // Then: P(max|δ| > θ) = P(σ·m > θ) = P(m > θ/σ)
809    //
810    // This allows O(1) exceedance computation per bisection iteration
811    // instead of O(PRIOR_CALIBRATION_SAMPLES).
812    let normalized_effects = precompute_t_prior_effects(&l_r, seed);
813
814    // Search bounds (same as v5.1)
815    let mut lo = theta_eff * 0.05;
816    let mut hi = (theta_eff * 50.0).max(10.0 * median_se);
817
818    for _ in 0..MAX_CALIBRATION_ITERATIONS {
819        let mid = (lo + hi) / 2.0;
820
821        // Compute exceedance using precomputed samples: count(m_i > θ/σ)
822        let threshold = theta_eff / mid;
823        let count = normalized_effects
824            .iter()
825            .filter(|&&m| m > threshold)
826            .count();
827        let exceedance = count as f64 / normalized_effects.len() as f64;
828
829        if (exceedance - TARGET_EXCEEDANCE).abs() < 0.01 {
830            return (mid, l_r); // Close enough
831        }
832
833        if exceedance > TARGET_EXCEEDANCE {
834            // Too much exceedance -> reduce scale
835            hi = mid;
836        } else {
837            // Too little exceedance -> increase scale
838            lo = mid;
839        }
840    }
841
842    // Fallback to conservative value
843    (theta_eff * CONSERVATIVE_PRIOR_SCALE, l_r)
844}
845
846/// Precompute normalized effect magnitudes for t-prior calibration.
847///
848/// Returns a vector of m_i = max_k |L_R z_i|_k / √λ_i where:
849/// - λ_i ~ Gamma(ν/2, ν/2) for ν=4
850/// - z_i ~ N(0, I₉)
851///
852/// These can be reused across bisection iterations since:
853/// P(max|δ| > θ | σ) = P(σ·m > θ) = P(m > θ/σ)
854fn precompute_t_prior_effects(l_r: &Matrix9, seed: u64) -> Vec<f64> {
855    use rand_distr::Gamma;
856
857    #[cfg(feature = "parallel")]
858    {
859        let l_r = *l_r;
860        (0..PRIOR_CALIBRATION_SAMPLES)
861            .into_par_iter()
862            .map(|i| {
863                let mut rng = Xoshiro256PlusPlus::seed_from_u64(counter_rng_seed(seed, i as u64));
864                let gamma_dist = Gamma::new(NU / 2.0, 2.0 / NU).unwrap();
865
866                // Sample λ ~ Gamma(ν/2, ν/2)
867                let lambda: f64 = gamma_dist.sample(&mut rng);
868                let inv_sqrt_lambda = 1.0 / math::sqrt(lambda.max(DIAGONAL_FLOOR));
869
870                // Sample z ~ N(0, I_9)
871                let mut z = Vector9::zeros();
872                for j in 0..9 {
873                    z[j] = sample_standard_normal(&mut rng);
874                }
875
876                // Compute m = max|L_R z| / √λ
877                let w = l_r * z;
878                let max_w = w.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
879                max_w * inv_sqrt_lambda
880            })
881            .collect()
882    }
883
884    #[cfg(not(feature = "parallel"))]
885    {
886        let mut rng = Xoshiro256PlusPlus::seed_from_u64(seed);
887        let gamma_dist = Gamma::new(NU / 2.0, 2.0 / NU).unwrap();
888        let mut effects = Vec::with_capacity(PRIOR_CALIBRATION_SAMPLES);
889
890        for _ in 0..PRIOR_CALIBRATION_SAMPLES {
891            let lambda: f64 = gamma_dist.sample(&mut rng);
892            let inv_sqrt_lambda = 1.0 / math::sqrt(lambda.max(DIAGONAL_FLOOR));
893
894            let mut z = Vector9::zeros();
895            for i in 0..9 {
896                z[i] = sample_standard_normal(&mut rng);
897            }
898
899            let w = l_r * z;
900            let max_w = w.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
901            effects.push(max_w * inv_sqrt_lambda);
902        }
903        effects
904    }
905}
906
907/// Compute floor-rate constant c_floor for 9D model.
908///
909/// c_floor = q_95(max_k |Z_k|) where Z ~ N(0, Σ_rate)
910///
911/// Used for theta_floor computation: theta_floor_stat(n) = c_floor / sqrt(n)
912pub fn compute_c_floor_9d(sigma_rate: &Matrix9, seed: u64) -> f64 {
913    let chol = match Cholesky::new(*sigma_rate) {
914        Some(c) => c,
915        None => {
916            // Fallback: use trace-based approximation
917            let trace: f64 = (0..9).map(|i| sigma_rate[(i, i)]).sum();
918            return math::sqrt(trace / 9.0) * 2.5; // Approximate 95th percentile
919        }
920    };
921    let l = chol.l().into_owned();
922
923    // Parallel MC sampling when feature enabled
924    #[cfg(feature = "parallel")]
925    let mut max_effects: Vec<f64> = (0..PRIOR_CALIBRATION_SAMPLES)
926        .into_par_iter()
927        .map(|i| {
928            let mut rng = Xoshiro256PlusPlus::seed_from_u64(counter_rng_seed(seed, i as u64));
929            let mut z = Vector9::zeros();
930            for j in 0..9 {
931                z[j] = sample_standard_normal(&mut rng);
932            }
933            let sample = l * z;
934            sample.iter().map(|x| x.abs()).fold(0.0_f64, f64::max)
935        })
936        .collect();
937
938    #[cfg(not(feature = "parallel"))]
939    let mut max_effects: Vec<f64> = {
940        let mut rng = Xoshiro256PlusPlus::seed_from_u64(seed);
941        let mut effects = Vec::with_capacity(PRIOR_CALIBRATION_SAMPLES);
942        for _ in 0..PRIOR_CALIBRATION_SAMPLES {
943            let mut z = Vector9::zeros();
944            for i in 0..9 {
945                z[i] = sample_standard_normal(&mut rng);
946            }
947            let sample = l * z;
948            let max_effect = sample.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
949            effects.push(max_effect);
950        }
951        effects
952    };
953
954    // 95th percentile using O(n) selection instead of O(n log n) sort
955    let idx =
956        ((PRIOR_CALIBRATION_SAMPLES as f64 * 0.95) as usize).min(PRIOR_CALIBRATION_SAMPLES - 1);
957    let (_, &mut percentile_95, _) = max_effects.select_nth_unstable_by(idx, |a, b| a.total_cmp(b));
958    percentile_95
959}
960
961/// Sample from standard normal using Box-Muller transform.
962fn sample_standard_normal<R: Rng>(rng: &mut R) -> f64 {
963    let u1: f64 = rng.random();
964    let u2: f64 = rng.random();
965    math::sqrt(-2.0 * math::ln(u1.max(1e-12))) * math::cos(2.0 * PI * u2)
966}
967
968#[cfg(test)]
969mod tests {
970    use super::*;
971    use crate::statistics::StatsSnapshot;
972
973    fn make_test_calibration() -> Calibration {
974        let snapshot = CalibrationSnapshot::new(
975            StatsSnapshot {
976                count: 1000,
977                mean: 100.0,
978                variance: 25.0,
979                autocorr_lag1: 0.1,
980            },
981            StatsSnapshot {
982                count: 1000,
983                mean: 105.0,
984                variance: 30.0,
985                autocorr_lag1: 0.12,
986            },
987        );
988
989        let sigma_rate = Matrix9::identity() * 1000.0;
990        let theta_eff = 100.0;
991
992        Calibration::new(
993            sigma_rate,
994            10,                  // block_length
995            100.0,               // sigma_t
996            Matrix9::identity(), // l_r (identity for tests)
997            theta_eff,           // theta_ns
998            5000,                // calibration_samples
999            false,               // discrete_mode
1000            5.0,                 // mde_ns
1001            snapshot,            // calibration_snapshot
1002            1.0,                 // timer_resolution_ns
1003            10000.0,             // samples_per_second
1004            10.0,                // c_floor
1005            18.48,               // projection_mismatch_thresh
1006            0.001,               // theta_tick
1007            theta_eff,           // theta_eff
1008            0.1,                 // theta_floor_initial
1009            42,                  // rng_seed
1010            1,                   // batch_k
1011        )
1012    }
1013
1014    #[test]
1015    fn test_covariance_scaling() {
1016        let cal = make_test_calibration();
1017        // make_test_calibration uses sigma_rate[(0,0)] = 1000.0 and block_length = 10
1018
1019        // v5.6: covariance_for_n uses n_eff = n / block_length
1020        // At n=1000 with block_length=10: n_eff = 100
1021        // sigma_n[(0,0)] = sigma_rate[(0,0)] / n_eff = 1000 / 100 = 10.0
1022        let cov_1000 = cal.covariance_for_n(1000);
1023        assert!(
1024            (cov_1000[(0, 0)] - 10.0).abs() < 1e-10,
1025            "expected 10.0, got {}",
1026            cov_1000[(0, 0)]
1027        );
1028
1029        // At n=2000 with block_length=10: n_eff = 200
1030        // sigma_n[(0,0)] = 1000 / 200 = 5.0
1031        let cov_2000 = cal.covariance_for_n(2000);
1032        assert!(
1033            (cov_2000[(0, 0)] - 5.0).abs() < 1e-10,
1034            "expected 5.0, got {}",
1035            cov_2000[(0, 0)]
1036        );
1037    }
1038
1039    #[test]
1040    fn test_n_eff() {
1041        let cal = make_test_calibration();
1042        // make_test_calibration uses block_length = 10
1043
1044        // n_eff = max(1, floor(n / block_length))
1045        assert_eq!(cal.n_eff(100), 10);
1046        assert_eq!(cal.n_eff(1000), 100);
1047        assert_eq!(cal.n_eff(10), 1);
1048        assert_eq!(cal.n_eff(5), 1); // Clamped to 1 when n < block_length
1049        assert_eq!(cal.n_eff(0), 1); // Edge case: n=0 returns 1
1050    }
1051
1052    #[test]
1053    fn test_covariance_zero_n() {
1054        let cal = make_test_calibration();
1055        let cov = cal.covariance_for_n(0);
1056        // Should return sigma_rate unchanged (avoid NaN)
1057        assert!((cov[(0, 0)] - 1000.0).abs() < 1e-10);
1058    }
1059
1060    #[test]
1061    fn test_estimate_collection_time() {
1062        let cal = make_test_calibration();
1063
1064        // 10000 samples/sec -> 1000 samples takes 0.1 sec
1065        let time = cal.estimate_collection_time_secs(1000);
1066        assert!((time - 0.1).abs() < 1e-10);
1067    }
1068
1069    #[test]
1070    fn test_compute_prior_cov_9d_unit_diagonal() {
1071        // Use identity matrix - correlation of identity is identity
1072        let sigma_rate = Matrix9::identity();
1073        let prior = compute_prior_cov_9d(&sigma_rate, 10.0, false);
1074
1075        // R = Corr(I) = I (identity has unit diagonal, no off-diagonal correlation)
1076        // Λ₀ = σ²_prior × R = 100 × I
1077        // Each diagonal should be ~100 (σ² = 100)
1078        // Note: jitter adds ~1e-10 so expect ~100
1079        let expected = 100.0;
1080        for i in 0..9 {
1081            assert!(
1082                (prior[(i, i)] - expected).abs() < 1.0,
1083                "Diagonal {} was {}, expected ~{}",
1084                i,
1085                prior[(i, i)],
1086                expected
1087            );
1088        }
1089    }
1090
1091    #[test]
1092    fn test_c_floor_computation() {
1093        let sigma_rate = Matrix9::identity() * 100.0;
1094        let c_floor = compute_c_floor_9d(&sigma_rate, 42);
1095
1096        // c_floor should be roughly sqrt(100) * 2 to 3 for 95th percentile of max
1097        assert!(c_floor > 15.0, "c_floor {} should be > 15", c_floor);
1098        assert!(c_floor < 40.0, "c_floor {} should be < 40", c_floor);
1099    }
1100
1101    #[test]
1102    fn test_calibration_config_default() {
1103        let config = CalibrationConfig::default();
1104        assert_eq!(config.calibration_samples, 5000);
1105        assert_eq!(config.bootstrap_iterations, 200);
1106        assert!((config.theta_ns - 100.0).abs() < 1e-10);
1107        assert!((config.timer_resolution_ns - 1.0).abs() < 1e-10);
1108        assert!(!config.skip_preflight);
1109        assert!(!config.force_discrete_mode);
1110    }
1111
1112    // =========================================================================
1113    // Reference implementations for validation (original non-optimized versions)
1114    // =========================================================================
1115
1116    /// Reference implementation: compute t-prior exceedance without sample reuse.
1117    /// This generates fresh samples for each call, matching the original implementation.
1118    fn reference_t_prior_exceedance(l_r: &Matrix9, sigma: f64, theta: f64, seed: u64) -> f64 {
1119        use rand_distr::Gamma;
1120
1121        let mut rng = Xoshiro256PlusPlus::seed_from_u64(seed);
1122        let gamma_dist = Gamma::new(NU / 2.0, 2.0 / NU).unwrap();
1123        let mut count = 0usize;
1124
1125        for _ in 0..PRIOR_CALIBRATION_SAMPLES {
1126            let lambda: f64 = gamma_dist.sample(&mut rng);
1127            let scale = sigma / crate::math::sqrt(lambda.max(DIAGONAL_FLOOR));
1128
1129            let mut z = Vector9::zeros();
1130            for i in 0..9 {
1131                z[i] = sample_standard_normal(&mut rng);
1132            }
1133
1134            let delta = l_r * z * scale;
1135            let max_effect = delta.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
1136            if max_effect > theta {
1137                count += 1;
1138            }
1139        }
1140
1141        count as f64 / PRIOR_CALIBRATION_SAMPLES as f64
1142    }
1143
1144    /// Helper: compute exceedance using precomputed t-prior effects
1145    fn optimized_t_prior_exceedance(normalized_effects: &[f64], sigma: f64, theta: f64) -> f64 {
1146        let threshold = theta / sigma;
1147        let count = normalized_effects
1148            .iter()
1149            .filter(|&&m| m > threshold)
1150            .count();
1151        count as f64 / normalized_effects.len() as f64
1152    }
1153
1154    // =========================================================================
1155    // Tests verifying optimized implementations match reference
1156    // =========================================================================
1157
1158    #[test]
1159    fn test_t_prior_precompute_exceedance_matches_reference() {
1160        // Test that the optimized exceedance computation matches reference
1161        // for various sigma values
1162        let l_r = Matrix9::identity();
1163        let theta = 10.0;
1164        let seed = 12345u64;
1165
1166        // Precompute effects using the optimized method
1167        let normalized_effects = precompute_t_prior_effects(&l_r, seed);
1168
1169        // Test at multiple sigma values
1170        for sigma in [5.0, 10.0, 15.0, 20.0, 30.0] {
1171            let optimized = optimized_t_prior_exceedance(&normalized_effects, sigma, theta);
1172            let reference = reference_t_prior_exceedance(&l_r, sigma, theta, seed);
1173
1174            // Allow some tolerance due to different RNG sequences
1175            // The key property is that exceedance should be monotonically increasing with sigma
1176            assert!(
1177                (0.0..=1.0).contains(&optimized),
1178                "Optimized exceedance {} out of range for sigma={}",
1179                optimized,
1180                sigma
1181            );
1182            assert!(
1183                (0.0..=1.0).contains(&reference),
1184                "Reference exceedance {} out of range for sigma={}",
1185                reference,
1186                sigma
1187            );
1188
1189            // Both should be in similar ballpark (within 0.1 of each other)
1190            // Note: They won't be exactly equal because the optimized version
1191            // uses different random samples
1192            println!(
1193                "sigma={}: optimized={:.4}, reference={:.4}",
1194                sigma, optimized, reference
1195            );
1196        }
1197    }
1198
1199    #[test]
1200    fn test_t_prior_exceedance_monotonicity() {
1201        // Key property: exceedance should increase with sigma
1202        let l_r = Matrix9::identity();
1203        let theta = 10.0;
1204        let seed = 42u64;
1205
1206        let normalized_effects = precompute_t_prior_effects(&l_r, seed);
1207
1208        let mut prev_exceedance = 0.0;
1209        for sigma in [1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0] {
1210            let exceedance = optimized_t_prior_exceedance(&normalized_effects, sigma, theta);
1211
1212            assert!(
1213                exceedance >= prev_exceedance,
1214                "Exceedance should increase with sigma: sigma={}, exc={}, prev={}",
1215                sigma,
1216                exceedance,
1217                prev_exceedance
1218            );
1219            prev_exceedance = exceedance;
1220        }
1221
1222        // At very large sigma, exceedance should approach 1
1223        let large_sigma_exc = optimized_t_prior_exceedance(&normalized_effects, 1000.0, theta);
1224        assert!(
1225            large_sigma_exc > 0.99,
1226            "Exceedance at large sigma should be ~1, got {}",
1227            large_sigma_exc
1228        );
1229
1230        // At very small sigma, exceedance should approach 0
1231        let small_sigma_exc = optimized_t_prior_exceedance(&normalized_effects, 0.1, theta);
1232        assert!(
1233            small_sigma_exc < 0.01,
1234            "Exceedance at small sigma should be ~0, got {}",
1235            small_sigma_exc
1236        );
1237    }
1238
1239    #[test]
1240    fn test_calibrate_t_prior_scale_finds_target_exceedance() {
1241        // Test that the calibration finds a sigma_t that achieves ~62% exceedance
1242        let sigma_rate = Matrix9::identity() * 100.0;
1243        let theta_eff = 10.0;
1244        let n_cal = 5000;
1245        let discrete_mode = false;
1246        let seed = 42u64;
1247
1248        let (sigma_t, l_r) =
1249            calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, seed);
1250
1251        // Verify the calibrated sigma_t achieves target exceedance
1252        let normalized_effects = precompute_t_prior_effects(&l_r, seed);
1253        let exceedance = optimized_t_prior_exceedance(&normalized_effects, sigma_t, theta_eff);
1254
1255        assert!(
1256            (exceedance - TARGET_EXCEEDANCE).abs() < 0.05,
1257            "Calibrated t-prior exceedance {} should be near target {}",
1258            exceedance,
1259            TARGET_EXCEEDANCE
1260        );
1261    }
1262
1263    #[test]
1264    fn test_calibration_determinism() {
1265        // Same seed should give same results
1266        let sigma_rate = Matrix9::identity() * 100.0;
1267        let theta_eff = 10.0;
1268        let n_cal = 5000;
1269        let discrete_mode = false;
1270        let seed = 12345u64;
1271
1272        let (sigma_t_1, _) =
1273            calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, seed);
1274        let (sigma_t_2, _) =
1275            calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, seed);
1276
1277        assert!(
1278            (sigma_t_1 - sigma_t_2).abs() < 1e-10,
1279            "Same seed should give same sigma_t: {} vs {}",
1280            sigma_t_1,
1281            sigma_t_2
1282        );
1283    }
1284
1285    #[test]
1286    fn test_precomputed_effects_distribution() {
1287        // Test that precomputed effects follow expected distribution
1288        let l_r = Matrix9::identity();
1289        let seed = 42u64;
1290
1291        let effects = precompute_t_prior_effects(&l_r, seed);
1292
1293        // All effects should be positive (they're max of absolute values)
1294        assert!(
1295            effects.iter().all(|&m| m > 0.0),
1296            "All effects should be positive"
1297        );
1298
1299        // Compute mean and check it's reasonable
1300        let mean: f64 = effects.iter().sum::<f64>() / effects.len() as f64;
1301        // For t_4 with identity L_R, mean of max|z|/sqrt(lambda) should be roughly 2-4
1302        assert!(
1303            mean > 1.0 && mean < 10.0,
1304            "Mean effect {} should be in reasonable range",
1305            mean
1306        );
1307
1308        // Check variance is non-zero (samples are diverse)
1309        let variance: f64 =
1310            effects.iter().map(|&m| (m - mean).powi(2)).sum::<f64>() / (effects.len() - 1) as f64;
1311        assert!(variance > 0.1, "Effects should have non-trivial variance");
1312    }
1313
1314    #[test]
1315    #[ignore] // Slow benchmark - run with `cargo test -- --ignored`
1316    fn bench_calibration_timing() {
1317        use std::time::Instant;
1318
1319        let sigma_rate = Matrix9::identity() * 10000.0;
1320        let theta_eff = 100.0;
1321        let n_cal = 5000;
1322        let discrete_mode = false;
1323
1324        // Warm up
1325        let _ = calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, 1);
1326
1327        // Benchmark t-prior calibration
1328        let iterations = 10;
1329        let start = Instant::now();
1330        for i in 0..iterations {
1331            let _ = calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, i as u64);
1332        }
1333        let t_prior_time = start.elapsed();
1334
1335        println!(
1336            "\n=== Calibration Performance ===\n\
1337             \n\
1338             T-prior calibration: {:?} per call\n\
1339             ({} iterations averaged)",
1340            t_prior_time / iterations as u32,
1341            iterations
1342        );
1343    }
1344}