Skip to main content

tacet_core/adaptive/
calibration.rs

1//! Calibration data and computation for adaptive sampling (no_std compatible).
2//!
3//! This module defines the calibration results needed for the adaptive sampling loop,
4//! as well as the core calibration computation that can run in no_std environments.
5//!
6//! For no_std compatibility:
7//! - No `std::time::Instant` (throughput measured by caller)
8//! - Core preflight checks are no_std compatible
9//! - Uses only `alloc` for heap allocation
10//!
11//! # Usage
12//!
13//! The `calibrate()` function runs the calibration phase using pre-collected samples.
14//! Unlike the `tacet` crate version, it takes `samples_per_second` as a parameter
15//! instead of measuring it internally (to avoid `std::time::Instant`).
16
17extern crate alloc;
18
19use alloc::string::String;
20use alloc::vec::Vec;
21use core::f64::consts::PI;
22
23use nalgebra::Cholesky;
24use rand::prelude::*;
25use rand::SeedableRng;
26use rand_xoshiro::Xoshiro256PlusPlus;
27
28use crate::analysis::mde::estimate_mde;
29use crate::constants::DEFAULT_SEED;
30use crate::math;
31use crate::preflight::{run_core_checks, PreflightResult};
32use crate::statistics::{
33    bootstrap_difference_covariance, bootstrap_difference_covariance_discrete, AcquisitionStream,
34    OnlineStats,
35};
36use crate::types::{Matrix9, Vector9};
37
38use super::CalibrationSnapshot;
39
40#[cfg(feature = "parallel")]
41use rayon::prelude::*;
42
43/// Counter-based RNG seed generation using SplitMix64.
44/// Provides deterministic, well-distributed seeds for parallel MC sampling.
45#[cfg(feature = "parallel")]
46#[inline]
47fn counter_rng_seed(base_seed: u64, counter: u64) -> u64 {
48    let mut z = base_seed.wrapping_add(counter.wrapping_mul(0x9e3779b97f4a7c15));
49    z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
50    z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
51    z ^ (z >> 31)
52}
53
54/// Conservative prior scale factor used as fallback.
55/// Chosen to give higher exceedance probability than the target 62%.
56const CONSERVATIVE_PRIOR_SCALE: f64 = 1.5;
57
58/// Target prior exceedance probability.
59const TARGET_EXCEEDANCE: f64 = 0.62;
60
61/// Number of Monte Carlo samples for prior calibration.
62const PRIOR_CALIBRATION_SAMPLES: usize = 50_000;
63
64/// Maximum iterations for prior calibration root-finding.
65const MAX_CALIBRATION_ITERATIONS: usize = 20;
66
67/// Condition number threshold for triggering robust shrinkage (§3.3.5).
68const CONDITION_NUMBER_THRESHOLD: f64 = 1e4;
69
70/// Minimum diagonal floor for regularization (prevents division by zero).
71const DIAGONAL_FLOOR: f64 = 1e-12;
72
73/// Degrees of freedom for Student's t prior (v5.4).
74pub const NU: f64 = 4.0;
75
76/// Calibration results from the initial measurement phase (no_std compatible).
77///
78/// This struct contains the essential statistical data needed for the adaptive
79/// sampling loop. It is designed for use in no_std environments like SGX enclaves.
80///
81/// For full calibration with preflight checks, see `tacet::Calibration`.
82///
83/// ## Student's t Prior (v5.4+)
84///
85/// The prior is a Student's t distribution with ν=4 degrees of freedom:
86/// δ ~ t_ν(0, σ_t²R)
87///
88/// This is implemented via scale mixture: λ ~ Gamma(ν/2, ν/2), δ|λ ~ N(0, (σ_t²/λ)R).
89/// The marginal variance is (ν/(ν-2)) σ_t² R = 2σ_t² R for ν=4.
90#[derive(Debug, Clone)]
91pub struct Calibration {
92    /// Covariance "rate" - multiply by 1/n to get Sigma_n for n samples.
93    /// Computed as Sigma_cal * n_cal where Sigma_cal is calibration covariance.
94    /// This allows O(1) covariance scaling as samples accumulate.
95    pub sigma_rate: Matrix9,
96
97    /// Block length from Politis-White algorithm.
98    /// Used for block bootstrap to preserve autocorrelation structure.
99    pub block_length: usize,
100
101    /// Calibrated Student's t prior scale (v5.4).
102    /// This is the σ in δ|λ ~ N(0, (σ²/λ)R).
103    pub sigma_t: f64,
104
105    /// Cholesky factor L_R of correlation matrix R.
106    /// Used for Gibbs sampling: δ = (σ/√λ) L_R z.
107    pub l_r: Matrix9,
108
109    /// Marginal prior covariance: 2σ²R (for ν=4).
110    /// This is the unconditional prior variance of δ under the t-prior.
111    pub prior_cov_marginal: Matrix9,
112
113    /// The theta threshold being used (in nanoseconds).
114    pub theta_ns: f64,
115
116    /// Number of calibration samples collected per class.
117    pub calibration_samples: usize,
118
119    /// Whether discrete mode is active (< 10% unique values).
120    /// When true, use mid-quantile estimators and m-out-of-n bootstrap.
121    pub discrete_mode: bool,
122
123    /// Minimum detectable effect (shift component) from calibration.
124    pub mde_shift_ns: f64,
125
126    /// Minimum detectable effect (tail component) from calibration.
127    pub mde_tail_ns: f64,
128
129    /// Statistics snapshot from calibration phase for drift detection.
130    pub calibration_snapshot: CalibrationSnapshot,
131
132    /// Timer resolution in nanoseconds.
133    pub timer_resolution_ns: f64,
134
135    /// Measured throughput for time estimation (samples per second).
136    /// The caller is responsible for measuring this during calibration.
137    pub samples_per_second: f64,
138
139    /// Floor-rate constant.
140    /// Computed once at calibration: 95th percentile of max_k|Z_k| where Z ~ N(0, Σ_rate).
141    /// Used for analytical theta_floor computation: theta_floor_stat(n) = c_floor / sqrt(n).
142    pub c_floor: f64,
143
144    /// Projection mismatch threshold.
145    /// 99th percentile of bootstrap Q_proj distribution.
146    pub projection_mismatch_thresh: f64,
147
148    /// Timer resolution floor component.
149    /// theta_tick = (1 tick in ns) / K where K is the batch size.
150    pub theta_tick: f64,
151
152    /// Effective threshold for this run.
153    /// theta_eff = max(theta_user, theta_floor) or just theta_floor in research mode.
154    pub theta_eff: f64,
155
156    /// Initial measurement floor at calibration time.
157    pub theta_floor_initial: f64,
158
159    /// Deterministic RNG seed used for this run.
160    pub rng_seed: u64,
161
162    /// Batch size K for adaptive batching.
163    /// When K > 1, samples contain K iterations worth of timing.
164    /// Effect estimates must be divided by K to report per-call differences.
165    pub batch_k: u32,
166
167    /// Results of core preflight checks run during calibration (no_std compatible).
168    /// Platform-specific checks (like system configuration) are handled by the
169    /// `tacet` crate's wrapper.
170    pub preflight_result: PreflightResult,
171}
172
173impl Calibration {
174    /// Create a new Calibration with v5.4+ Student's t prior.
175    #[allow(clippy::too_many_arguments)]
176    pub fn new(
177        sigma_rate: Matrix9,
178        block_length: usize,
179        sigma_t: f64,
180        l_r: Matrix9,
181        theta_ns: f64,
182        calibration_samples: usize,
183        discrete_mode: bool,
184        mde_shift_ns: f64,
185        mde_tail_ns: f64,
186        calibration_snapshot: CalibrationSnapshot,
187        timer_resolution_ns: f64,
188        samples_per_second: f64,
189        c_floor: f64,
190        projection_mismatch_thresh: f64,
191        theta_tick: f64,
192        theta_eff: f64,
193        theta_floor_initial: f64,
194        rng_seed: u64,
195        batch_k: u32,
196    ) -> Self {
197        // Compute marginal prior covariance: 2σ²R (for ν=4)
198        // The marginal variance of t_ν(0, σ²R) is (ν/(ν-2)) σ²R = 2σ²R for ν=4
199        let r = l_r * l_r.transpose();
200        let prior_cov_marginal = r * (2.0 * sigma_t * sigma_t);
201
202        Self {
203            sigma_rate,
204            block_length,
205            sigma_t,
206            l_r,
207            prior_cov_marginal,
208            theta_ns,
209            calibration_samples,
210            discrete_mode,
211            mde_shift_ns,
212            mde_tail_ns,
213            calibration_snapshot,
214            timer_resolution_ns,
215            samples_per_second,
216            c_floor,
217            projection_mismatch_thresh,
218            theta_tick,
219            theta_eff,
220            theta_floor_initial,
221            rng_seed,
222            batch_k,
223            preflight_result: PreflightResult::new(),
224        }
225    }
226
227    /// Create a new Calibration with all fields including preflight results.
228    #[allow(clippy::too_many_arguments)]
229    pub fn with_preflight(
230        sigma_rate: Matrix9,
231        block_length: usize,
232        sigma_t: f64,
233        l_r: Matrix9,
234        theta_ns: f64,
235        calibration_samples: usize,
236        discrete_mode: bool,
237        mde_shift_ns: f64,
238        mde_tail_ns: f64,
239        calibration_snapshot: CalibrationSnapshot,
240        timer_resolution_ns: f64,
241        samples_per_second: f64,
242        c_floor: f64,
243        projection_mismatch_thresh: f64,
244        theta_tick: f64,
245        theta_eff: f64,
246        theta_floor_initial: f64,
247        rng_seed: u64,
248        batch_k: u32,
249        preflight_result: PreflightResult,
250    ) -> Self {
251        // Compute marginal prior covariance: 2σ²R (for ν=4)
252        // The marginal variance of t_ν(0, σ²R) is (ν/(ν-2)) σ²R = 2σ²R for ν=4
253        let r = l_r * l_r.transpose();
254        let prior_cov_marginal = r * (2.0 * sigma_t * sigma_t);
255
256        Self {
257            sigma_rate,
258            block_length,
259            sigma_t,
260            l_r,
261            prior_cov_marginal,
262            theta_ns,
263            calibration_samples,
264            discrete_mode,
265            mde_shift_ns,
266            mde_tail_ns,
267            calibration_snapshot,
268            timer_resolution_ns,
269            samples_per_second,
270            c_floor,
271            projection_mismatch_thresh,
272            theta_tick,
273            theta_eff,
274            theta_floor_initial,
275            rng_seed,
276            batch_k,
277            preflight_result,
278        }
279    }
280
281    /// Compute effective sample size accounting for dependence (spec §3.3.2 v5.6).
282    ///
283    /// Under strong temporal dependence, n samples do not provide n independent observations.
284    /// The effective sample size approximates the number of effectively independent blocks.
285    ///
286    /// n_eff = max(1, floor(n / block_length))
287    ///
288    /// where block_length is the estimated dependence length from Politis-White selector.
289    pub fn n_eff(&self, n: usize) -> usize {
290        if self.block_length == 0 {
291            return n.max(1);
292        }
293        (n / self.block_length).max(1)
294    }
295
296    /// Scale sigma_rate to get covariance for n samples using effective sample size (v5.6).
297    ///
298    /// Σ_n = Σ_rate / n_eff
299    ///
300    /// where n_eff = max(1, floor(n / block_length)) to correctly account for reduced
301    /// information under temporal dependence.
302    pub fn covariance_for_n(&self, n: usize) -> Matrix9 {
303        if n == 0 {
304            return self.sigma_rate; // Avoid division by zero
305        }
306        let n_eff = self.n_eff(n);
307        self.sigma_rate / (n_eff as f64)
308    }
309
310    /// Scale sigma_rate to get covariance using raw n samples (ignoring dependence).
311    ///
312    /// Σ_n = Σ_rate / n
313    ///
314    /// Use this only when you need the raw scaling without n_eff correction.
315    pub fn covariance_for_n_raw(&self, n: usize) -> Matrix9 {
316        if n == 0 {
317            return self.sigma_rate; // Avoid division by zero
318        }
319        self.sigma_rate / (n as f64)
320    }
321
322    /// Estimate time to collect `n` additional samples based on calibration throughput.
323    ///
324    /// Returns estimated seconds. Caller should add any overhead.
325    pub fn estimate_collection_time_secs(&self, n: usize) -> f64 {
326        if self.samples_per_second <= 0.0 {
327            return 0.0;
328        }
329        n as f64 / self.samples_per_second
330    }
331
332    /// Convert to an FFI-friendly summary containing only scalar fields.
333    pub fn to_summary(&self) -> crate::ffi_summary::CalibrationSummary {
334        crate::ffi_summary::CalibrationSummary {
335            block_length: self.block_length,
336            calibration_samples: self.calibration_samples,
337            discrete_mode: self.discrete_mode,
338            timer_resolution_ns: self.timer_resolution_ns,
339            theta_ns: self.theta_ns,
340            theta_eff: self.theta_eff,
341            theta_floor_initial: self.theta_floor_initial,
342            theta_tick: self.theta_tick,
343            mde_shift_ns: self.mde_shift_ns,
344            mde_tail_ns: self.mde_tail_ns,
345            projection_mismatch_thresh: self.projection_mismatch_thresh,
346            samples_per_second: self.samples_per_second,
347        }
348    }
349}
350
351/// Configuration for calibration phase (no_std compatible).
352///
353/// This contains parameters for calibration that don't require std features.
354#[derive(Debug, Clone)]
355pub struct CalibrationConfig {
356    /// Number of samples to collect per class during calibration.
357    pub calibration_samples: usize,
358
359    /// Number of bootstrap iterations for covariance estimation.
360    pub bootstrap_iterations: usize,
361
362    /// Timer resolution in nanoseconds.
363    pub timer_resolution_ns: f64,
364
365    /// Theta threshold in nanoseconds.
366    pub theta_ns: f64,
367
368    /// Alpha level for MDE computation.
369    pub alpha: f64,
370
371    /// Random seed for bootstrap.
372    pub seed: u64,
373
374    /// Whether to skip preflight checks.
375    pub skip_preflight: bool,
376
377    /// Force discrete mode regardless of uniqueness ratio.
378    pub force_discrete_mode: bool,
379}
380
381impl Default for CalibrationConfig {
382    fn default() -> Self {
383        Self {
384            calibration_samples: 5000,
385            bootstrap_iterations: 200, // Fewer iterations for calibration phase
386            timer_resolution_ns: 1.0,
387            theta_ns: 100.0,
388            alpha: 0.01,
389            seed: DEFAULT_SEED,
390            skip_preflight: false,
391            force_discrete_mode: false,
392        }
393    }
394}
395
396/// Errors that can occur during calibration.
397#[derive(Debug, Clone)]
398pub enum CalibrationError {
399    /// Too few samples collected for reliable calibration.
400    TooFewSamples {
401        /// Number of samples actually collected.
402        collected: usize,
403        /// Minimum required samples.
404        minimum: usize,
405    },
406
407    /// Covariance estimation failed (e.g., singular matrix).
408    CovarianceEstimationFailed {
409        /// Reason for failure.
410        reason: String,
411    },
412
413    /// A preflight check failed before calibration.
414    PreflightCheckFailed {
415        /// Which check failed.
416        check: String,
417        /// Error message.
418        message: String,
419    },
420
421    /// Timer is too coarse to measure this operation.
422    TimerTooCoarse {
423        /// Timer resolution in nanoseconds.
424        resolution_ns: f64,
425        /// Measured operation time in nanoseconds.
426        operation_ns: f64,
427    },
428}
429
430impl core::fmt::Display for CalibrationError {
431    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
432        match self {
433            CalibrationError::TooFewSamples { collected, minimum } => {
434                write!(
435                    f,
436                    "Too few samples: collected {}, need at least {}",
437                    collected, minimum
438                )
439            }
440            CalibrationError::CovarianceEstimationFailed { reason } => {
441                write!(f, "Covariance estimation failed: {}", reason)
442            }
443            CalibrationError::PreflightCheckFailed { check, message } => {
444                write!(f, "Preflight check '{}' failed: {}", check, message)
445            }
446            CalibrationError::TimerTooCoarse {
447                resolution_ns,
448                operation_ns,
449            } => {
450                write!(
451                    f,
452                    "Timer resolution ({:.1}ns) too coarse for operation ({:.1}ns)",
453                    resolution_ns, operation_ns
454                )
455            }
456        }
457    }
458}
459
460/// Run calibration phase to estimate covariance and set priors (no_std compatible).
461///
462/// This is the core calibration function that can run in no_std environments.
463/// Unlike the `tacet` crate version, throughput (`samples_per_second`) must be
464/// provided by the caller since `std::time::Instant` is not available.
465///
466/// # Arguments
467///
468/// * `baseline_samples` - Pre-collected baseline timing samples (in native units)
469/// * `sample_samples` - Pre-collected sample timing samples (in native units)
470/// * `ns_per_tick` - Conversion factor from native units to nanoseconds
471/// * `config` - Calibration configuration
472/// * `samples_per_second` - Throughput measured by caller (e.g., via performance.now())
473///
474/// # Returns
475///
476/// A `Calibration` struct with all computed quantities, or a `CalibrationError`.
477pub fn calibrate(
478    baseline_samples: &[u64],
479    sample_samples: &[u64],
480    ns_per_tick: f64,
481    config: &CalibrationConfig,
482    samples_per_second: f64,
483) -> Result<Calibration, CalibrationError> {
484    let n = baseline_samples.len().min(sample_samples.len());
485
486    // Check minimum sample requirement
487    const MIN_CALIBRATION_SAMPLES: usize = 100;
488    if n < MIN_CALIBRATION_SAMPLES {
489        return Err(CalibrationError::TooFewSamples {
490            collected: n,
491            minimum: MIN_CALIBRATION_SAMPLES,
492        });
493    }
494
495    // Convert to nanoseconds for analysis
496    let baseline_ns: Vec<f64> = baseline_samples[..n]
497        .iter()
498        .map(|&t| t as f64 * ns_per_tick)
499        .collect();
500    let sample_ns: Vec<f64> = sample_samples[..n]
501        .iter()
502        .map(|&t| t as f64 * ns_per_tick)
503        .collect();
504
505    // Check discrete mode (spec Section 2.4): < 10% unique values
506    let unique_baseline = count_unique(&baseline_ns);
507    let unique_sample = count_unique(&sample_ns);
508    let min_uniqueness = (unique_baseline as f64 / n as f64).min(unique_sample as f64 / n as f64);
509    let discrete_mode = config.force_discrete_mode || min_uniqueness < 0.10;
510
511    // Create acquisition stream for joint bootstrap (spec Section 2.3.1)
512    let mut acquisition_stream = AcquisitionStream::with_capacity(2 * n);
513    acquisition_stream.push_batch_interleaved(&baseline_ns, &sample_ns);
514    let interleaved = acquisition_stream.to_timing_samples();
515
516    // Bootstrap covariance estimation
517    let cov_estimate = if discrete_mode {
518        bootstrap_difference_covariance_discrete(
519            &baseline_ns,
520            &sample_ns,
521            config.bootstrap_iterations,
522            config.seed,
523        )
524    } else {
525        bootstrap_difference_covariance(
526            &interleaved,
527            config.bootstrap_iterations,
528            config.seed,
529            false,
530        )
531    };
532
533    // Check covariance validity
534    if !cov_estimate.is_stable() {
535        return Err(CalibrationError::CovarianceEstimationFailed {
536            reason: String::from("Covariance matrix is not positive definite"),
537        });
538    }
539
540    // Compute sigma rate: Sigma_rate = Sigma_cal * n_cal
541    let sigma_rate = cov_estimate.matrix * (n as f64);
542
543    // Compute MDE for prior setting (spec Section 2.7)
544    let mde = estimate_mde(&cov_estimate.matrix, config.alpha);
545
546    // Run preflight checks (unless skipped)
547    let preflight_result = if config.skip_preflight {
548        PreflightResult::new()
549    } else {
550        run_core_checks(
551            &baseline_ns,
552            &sample_ns,
553            config.timer_resolution_ns,
554            config.seed,
555        )
556    };
557
558    // Compute calibration statistics snapshot for drift detection
559    let calibration_snapshot = compute_calibration_snapshot(&baseline_ns, &sample_ns);
560
561    // Compute floor-rate constant: c_floor = q_95(max_k |Z_k|) where Z ~ N(0, Σ_rate)
562    let c_floor = compute_c_floor_9d(&sigma_rate, config.seed);
563
564    // Timer tick floor
565    let theta_tick = config.timer_resolution_ns;
566
567    // Initial measurement floor at calibration sample count
568    let theta_floor_initial = (c_floor / (n as f64).sqrt()).max(theta_tick);
569
570    // Effective threshold: max(user threshold, measurement floor)
571    let theta_eff = if config.theta_ns > 0.0 {
572        config.theta_ns.max(theta_floor_initial)
573    } else {
574        theta_floor_initial
575    };
576
577    // Student's t prior (ν=4) calibration
578    let (sigma_t, l_r) =
579        calibrate_t_prior_scale(&sigma_rate, theta_eff, n, discrete_mode, config.seed);
580
581    Ok(Calibration::with_preflight(
582        sigma_rate,
583        cov_estimate.block_size,
584        sigma_t,
585        l_r,
586        config.theta_ns,
587        n,
588        discrete_mode,
589        mde.shift_ns,
590        mde.tail_ns,
591        calibration_snapshot,
592        config.timer_resolution_ns,
593        samples_per_second,
594        c_floor,
595        cov_estimate.q_thresh,
596        theta_tick,
597        theta_eff,
598        theta_floor_initial,
599        config.seed,
600        1, // batch_k = 1 (no batching), updated by collector
601        preflight_result,
602    ))
603}
604
605/// Count unique values in a slice (for discrete mode detection).
606fn count_unique(values: &[f64]) -> usize {
607    use alloc::collections::BTreeSet;
608
609    // Discretize to avoid floating point comparison issues
610    // Use 0.001ns buckets (well below any meaningful timing difference)
611    let buckets: BTreeSet<i64> = values.iter().map(|&v| (v * 1000.0) as i64).collect();
612    buckets.len()
613}
614
615/// Compute calibration statistics snapshot for drift detection.
616fn compute_calibration_snapshot(baseline_ns: &[f64], sample_ns: &[f64]) -> CalibrationSnapshot {
617    let mut baseline_stats = OnlineStats::new();
618    let mut sample_stats = OnlineStats::new();
619
620    for &t in baseline_ns {
621        baseline_stats.update(t);
622    }
623    for &t in sample_ns {
624        sample_stats.update(t);
625    }
626
627    CalibrationSnapshot::new(baseline_stats.finalize(), sample_stats.finalize())
628}
629
630/// Compute correlation-shaped 9D prior covariance (spec v5.1).
631///
632/// Λ₀ = σ²_prior × R where R = Corr(Σ_rate) = D^(-1/2) Σ_rate D^(-1/2)
633///
634/// Since diag(R) = 1, σ_prior equals the marginal prior SD for all coordinates.
635/// This eliminates hidden heteroskedasticity that could cause pathological
636/// shrinkage for certain effect patterns.
637///
638/// # Arguments
639/// * `sigma_rate` - Covariance rate matrix from bootstrap
640/// * `sigma_prior` - Prior scale (calibrated for 62% exceedance)
641/// * `discrete_mode` - Whether discrete timer mode is active
642///
643/// # Returns
644/// The prior covariance matrix Λ₀.
645pub fn compute_prior_cov_9d(
646    sigma_rate: &Matrix9,
647    sigma_prior: f64,
648    discrete_mode: bool,
649) -> Matrix9 {
650    // Compute correlation matrix R = D^(-1/2) Σ_rate D^(-1/2)
651    let r = compute_correlation_matrix(sigma_rate);
652
653    // Apply two-step regularization (§3.3.5):
654    // 1. Robust shrinkage (if in fragile regime)
655    // 2. Numerical jitter (always, as needed)
656    let r = apply_correlation_regularization(&r, discrete_mode);
657
658    // Λ₀ = σ²_prior × R
659    r * (sigma_prior * sigma_prior)
660}
661
662/// Compute correlation matrix R = D^(-1/2) Σ D^(-1/2) from covariance matrix.
663///
664/// The correlation matrix has unit diagonal (diag(R) = 1) and encodes
665/// the correlation structure of Σ without the variance magnitudes.
666fn compute_correlation_matrix(sigma: &Matrix9) -> Matrix9 {
667    // Extract diagonal and apply floor for numerical stability
668    let mut d_inv_sqrt = [0.0_f64; 9];
669    for i in 0..9 {
670        let var = sigma[(i, i)].max(DIAGONAL_FLOOR);
671        d_inv_sqrt[i] = 1.0 / math::sqrt(var);
672    }
673
674    // R = D^(-1/2) × Σ × D^(-1/2)
675    let mut r = *sigma;
676    for i in 0..9 {
677        for j in 0..9 {
678            r[(i, j)] *= d_inv_sqrt[i] * d_inv_sqrt[j];
679        }
680    }
681
682    r
683}
684
685/// Estimate condition number of a symmetric matrix via power iteration.
686///
687/// Returns the ratio of largest to smallest eigenvalue magnitude.
688/// Uses a simple approach: max(|diag|) / min(|diag|) as a quick estimate,
689/// plus check for Cholesky failure which indicates poor conditioning.
690fn estimate_condition_number(r: &Matrix9) -> f64 {
691    // Quick estimate from diagonal ratio (exact for diagonal matrices)
692    let diag: Vec<f64> = (0..9).map(|i| r[(i, i)].abs()).collect();
693    let max_diag = diag.iter().cloned().fold(0.0_f64, f64::max);
694    let min_diag = diag.iter().cloned().fold(f64::INFINITY, f64::min);
695
696    if min_diag < DIAGONAL_FLOOR {
697        return f64::INFINITY;
698    }
699
700    // For correlation matrices, this underestimates the true condition number,
701    // but we also check Cholesky failure separately.
702    max_diag / min_diag
703}
704
705/// Check if we're in a "fragile regime" requiring robust shrinkage (§3.3.5).
706///
707/// Fragile regime is detected when:
708/// - Discrete timer mode is active, OR
709/// - Condition number of R exceeds 10⁴, OR
710/// - Cholesky factorization fails
711fn is_fragile_regime(r: &Matrix9, discrete_mode: bool) -> bool {
712    if discrete_mode {
713        return true;
714    }
715
716    let cond = estimate_condition_number(r);
717    if cond > CONDITION_NUMBER_THRESHOLD {
718        return true;
719    }
720
721    // Check if Cholesky would fail
722    Cholesky::new(*r).is_none()
723}
724
725/// Apply two-step regularization to correlation matrix (§3.3.5).
726///
727/// Step 1 (conditional): Robust shrinkage R ← (1-λ)R + λI for fragile regimes.
728/// Step 2 (always): Numerical jitter R ← R + εI to ensure SPD.
729fn apply_correlation_regularization(r: &Matrix9, discrete_mode: bool) -> Matrix9 {
730    let mut r = *r;
731
732    // Step 1: Robust shrinkage (conditional on fragile regime)
733    if is_fragile_regime(&r, discrete_mode) {
734        // Choose λ based on severity (§3.3.5 allows [0.01, 0.2])
735        let cond = estimate_condition_number(&r);
736        let lambda = if cond > CONDITION_NUMBER_THRESHOLD * 10.0 {
737            0.2 // Severe: aggressive shrinkage
738        } else if cond > CONDITION_NUMBER_THRESHOLD {
739            0.1 // Moderate
740        } else if discrete_mode {
741            0.05 // Mild: just discrete mode
742        } else {
743            0.01 // Minimal
744        };
745
746        let identity = Matrix9::identity();
747        r = r * (1.0 - lambda) + identity * lambda;
748    }
749
750    // Step 2: Numerical jitter (always, as needed for Cholesky)
751    // Try increasingly large epsilon until Cholesky succeeds
752    for &eps in &[1e-10, 1e-9, 1e-8, 1e-7, 1e-6] {
753        let r_jittered = r + Matrix9::identity() * eps;
754        if Cholesky::new(r_jittered).is_some() {
755            return r_jittered;
756        }
757    }
758
759    // Fallback: aggressive jitter
760    r + Matrix9::identity() * 1e-5
761}
762
763/// Compute median standard error from sigma_rate.
764fn compute_median_se(sigma_rate: &Matrix9, n_cal: usize) -> f64 {
765    let mut ses: Vec<f64> = (0..9)
766        .map(|i| {
767            let var = sigma_rate[(i, i)].max(DIAGONAL_FLOOR);
768            math::sqrt(var / n_cal.max(1) as f64)
769        })
770        .collect();
771    ses.sort_by(|a, b| a.total_cmp(b));
772    ses[4] // Median of 9 values
773}
774
775/// Calibrate Student's t prior scale σ so that P(max_k |δ_k| > θ_eff | δ ~ t_4(0, σ²R)) = 0.62 (v5.4).
776///
777/// Uses binary search to find σ such that the marginal exceedance probability
778/// matches the target 62%. The t-prior is sampled via scale mixture:
779/// - λ ~ Gamma(ν/2, ν/2) = Gamma(2, 2) for ν=4
780/// - z ~ N(0, I₉)
781/// - δ = (σ/√λ) L_R z
782///
783/// # Arguments
784/// * `sigma_rate` - Covariance rate matrix from calibration
785/// * `theta_eff` - Effective threshold in nanoseconds
786/// * `n_cal` - Number of calibration samples (for SE computation)
787/// * `discrete_mode` - Whether discrete timer mode is active
788/// * `seed` - Deterministic RNG seed
789///
790/// # Returns
791/// A tuple of (sigma_t, l_r) where sigma_t is the calibrated scale and l_r is
792/// the Cholesky factor of the regularized correlation matrix.
793pub fn calibrate_t_prior_scale(
794    sigma_rate: &Matrix9,
795    theta_eff: f64,
796    n_cal: usize,
797    discrete_mode: bool,
798    seed: u64,
799) -> (f64, Matrix9) {
800    let median_se = compute_median_se(sigma_rate, n_cal);
801
802    // Compute and cache L_R (Cholesky of regularized correlation matrix)
803    let r = compute_correlation_matrix(sigma_rate);
804    let r_reg = apply_correlation_regularization(&r, discrete_mode);
805    let l_r = match Cholesky::new(r_reg) {
806        Some(c) => c.l().into_owned(),
807        None => Matrix9::identity(),
808    };
809
810    // Precompute normalized effect magnitudes for sample reuse across bisection.
811    //
812    // For t_ν prior with scale mixture representation:
813    //   λ ~ Gamma(ν/2, ν/2), z ~ N(0, I₉), δ = (σ/√λ) L_R z
814    //
815    // So: max|δ| = σ · max|L_R z|/√λ = σ · m
816    // where m_i = max|L_R z_i|/√λ_i is precomputed once.
817    //
818    // Then: P(max|δ| > θ) = P(σ·m > θ) = P(m > θ/σ)
819    //
820    // This allows O(1) exceedance computation per bisection iteration
821    // instead of O(PRIOR_CALIBRATION_SAMPLES).
822    let normalized_effects = precompute_t_prior_effects(&l_r, seed);
823
824    // Search bounds (same as v5.1)
825    let mut lo = theta_eff * 0.05;
826    let mut hi = (theta_eff * 50.0).max(10.0 * median_se);
827
828    for _ in 0..MAX_CALIBRATION_ITERATIONS {
829        let mid = (lo + hi) / 2.0;
830
831        // Compute exceedance using precomputed samples: count(m_i > θ/σ)
832        let threshold = theta_eff / mid;
833        let count = normalized_effects
834            .iter()
835            .filter(|&&m| m > threshold)
836            .count();
837        let exceedance = count as f64 / normalized_effects.len() as f64;
838
839        if (exceedance - TARGET_EXCEEDANCE).abs() < 0.01 {
840            return (mid, l_r); // Close enough
841        }
842
843        if exceedance > TARGET_EXCEEDANCE {
844            // Too much exceedance -> reduce scale
845            hi = mid;
846        } else {
847            // Too little exceedance -> increase scale
848            lo = mid;
849        }
850    }
851
852    // Fallback to conservative value
853    (theta_eff * CONSERVATIVE_PRIOR_SCALE, l_r)
854}
855
856/// Precompute normalized effect magnitudes for t-prior calibration.
857///
858/// Returns a vector of m_i = max_k |L_R z_i|_k / √λ_i where:
859/// - λ_i ~ Gamma(ν/2, ν/2) for ν=4
860/// - z_i ~ N(0, I₉)
861///
862/// These can be reused across bisection iterations since:
863/// P(max|δ| > θ | σ) = P(σ·m > θ) = P(m > θ/σ)
864fn precompute_t_prior_effects(l_r: &Matrix9, seed: u64) -> Vec<f64> {
865    use rand_distr::Gamma;
866
867    #[cfg(feature = "parallel")]
868    {
869        let l_r = *l_r;
870        (0..PRIOR_CALIBRATION_SAMPLES)
871            .into_par_iter()
872            .map(|i| {
873                let mut rng = Xoshiro256PlusPlus::seed_from_u64(counter_rng_seed(seed, i as u64));
874                let gamma_dist = Gamma::new(NU / 2.0, 2.0 / NU).unwrap();
875
876                // Sample λ ~ Gamma(ν/2, ν/2)
877                let lambda: f64 = gamma_dist.sample(&mut rng);
878                let inv_sqrt_lambda = 1.0 / math::sqrt(lambda.max(DIAGONAL_FLOOR));
879
880                // Sample z ~ N(0, I_9)
881                let mut z = Vector9::zeros();
882                for j in 0..9 {
883                    z[j] = sample_standard_normal(&mut rng);
884                }
885
886                // Compute m = max|L_R z| / √λ
887                let w = l_r * z;
888                let max_w = w.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
889                max_w * inv_sqrt_lambda
890            })
891            .collect()
892    }
893
894    #[cfg(not(feature = "parallel"))]
895    {
896        let mut rng = Xoshiro256PlusPlus::seed_from_u64(seed);
897        let gamma_dist = Gamma::new(NU / 2.0, 2.0 / NU).unwrap();
898        let mut effects = Vec::with_capacity(PRIOR_CALIBRATION_SAMPLES);
899
900        for _ in 0..PRIOR_CALIBRATION_SAMPLES {
901            let lambda: f64 = gamma_dist.sample(&mut rng);
902            let inv_sqrt_lambda = 1.0 / math::sqrt(lambda.max(DIAGONAL_FLOOR));
903
904            let mut z = Vector9::zeros();
905            for i in 0..9 {
906                z[i] = sample_standard_normal(&mut rng);
907            }
908
909            let w = l_r * z;
910            let max_w = w.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
911            effects.push(max_w * inv_sqrt_lambda);
912        }
913        effects
914    }
915}
916
917/// Compute floor-rate constant c_floor for 9D model.
918///
919/// c_floor = q_95(max_k |Z_k|) where Z ~ N(0, Σ_rate)
920///
921/// Used for theta_floor computation: theta_floor_stat(n) = c_floor / sqrt(n)
922pub fn compute_c_floor_9d(sigma_rate: &Matrix9, seed: u64) -> f64 {
923    let chol = match Cholesky::new(*sigma_rate) {
924        Some(c) => c,
925        None => {
926            // Fallback: use trace-based approximation
927            let trace: f64 = (0..9).map(|i| sigma_rate[(i, i)]).sum();
928            return math::sqrt(trace / 9.0) * 2.5; // Approximate 95th percentile
929        }
930    };
931    let l = chol.l().into_owned();
932
933    // Parallel MC sampling when feature enabled
934    #[cfg(feature = "parallel")]
935    let mut max_effects: Vec<f64> = (0..PRIOR_CALIBRATION_SAMPLES)
936        .into_par_iter()
937        .map(|i| {
938            let mut rng = Xoshiro256PlusPlus::seed_from_u64(counter_rng_seed(seed, i as u64));
939            let mut z = Vector9::zeros();
940            for j in 0..9 {
941                z[j] = sample_standard_normal(&mut rng);
942            }
943            let sample = l * z;
944            sample.iter().map(|x| x.abs()).fold(0.0_f64, f64::max)
945        })
946        .collect();
947
948    #[cfg(not(feature = "parallel"))]
949    let mut max_effects: Vec<f64> = {
950        let mut rng = Xoshiro256PlusPlus::seed_from_u64(seed);
951        let mut effects = Vec::with_capacity(PRIOR_CALIBRATION_SAMPLES);
952        for _ in 0..PRIOR_CALIBRATION_SAMPLES {
953            let mut z = Vector9::zeros();
954            for i in 0..9 {
955                z[i] = sample_standard_normal(&mut rng);
956            }
957            let sample = l * z;
958            let max_effect = sample.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
959            effects.push(max_effect);
960        }
961        effects
962    };
963
964    // 95th percentile using O(n) selection instead of O(n log n) sort
965    let idx =
966        ((PRIOR_CALIBRATION_SAMPLES as f64 * 0.95) as usize).min(PRIOR_CALIBRATION_SAMPLES - 1);
967    let (_, &mut percentile_95, _) = max_effects.select_nth_unstable_by(idx, |a, b| a.total_cmp(b));
968    percentile_95
969}
970
971/// Sample from standard normal using Box-Muller transform.
972fn sample_standard_normal<R: Rng>(rng: &mut R) -> f64 {
973    let u1: f64 = rng.random();
974    let u2: f64 = rng.random();
975    math::sqrt(-2.0 * math::ln(u1.max(1e-12))) * math::cos(2.0 * PI * u2)
976}
977
978#[cfg(test)]
979mod tests {
980    use super::*;
981    use crate::statistics::StatsSnapshot;
982
983    fn make_test_calibration() -> Calibration {
984        let snapshot = CalibrationSnapshot::new(
985            StatsSnapshot {
986                count: 1000,
987                mean: 100.0,
988                variance: 25.0,
989                autocorr_lag1: 0.1,
990            },
991            StatsSnapshot {
992                count: 1000,
993                mean: 105.0,
994                variance: 30.0,
995                autocorr_lag1: 0.12,
996            },
997        );
998
999        let sigma_rate = Matrix9::identity() * 1000.0;
1000        let theta_eff = 100.0;
1001
1002        Calibration::new(
1003            sigma_rate,
1004            10,                  // block_length
1005            100.0,               // sigma_t
1006            Matrix9::identity(), // l_r (identity for tests)
1007            theta_eff,           // theta_ns
1008            5000,                // calibration_samples
1009            false,               // discrete_mode
1010            5.0,                 // mde_shift_ns
1011            10.0,                // mde_tail_ns
1012            snapshot,            // calibration_snapshot
1013            1.0,                 // timer_resolution_ns
1014            10000.0,             // samples_per_second
1015            10.0,                // c_floor
1016            18.48,               // projection_mismatch_thresh
1017            0.001,               // theta_tick
1018            theta_eff,           // theta_eff
1019            0.1,                 // theta_floor_initial
1020            42,                  // rng_seed
1021            1,                   // batch_k (no batching in tests)
1022        )
1023    }
1024
1025    #[test]
1026    fn test_covariance_scaling() {
1027        let cal = make_test_calibration();
1028        // make_test_calibration uses sigma_rate[(0,0)] = 1000.0 and block_length = 10
1029
1030        // v5.6: covariance_for_n uses n_eff = n / block_length
1031        // At n=1000 with block_length=10: n_eff = 100
1032        // sigma_n[(0,0)] = sigma_rate[(0,0)] / n_eff = 1000 / 100 = 10.0
1033        let cov_1000 = cal.covariance_for_n(1000);
1034        assert!(
1035            (cov_1000[(0, 0)] - 10.0).abs() < 1e-10,
1036            "expected 10.0, got {}",
1037            cov_1000[(0, 0)]
1038        );
1039
1040        // At n=2000 with block_length=10: n_eff = 200
1041        // sigma_n[(0,0)] = 1000 / 200 = 5.0
1042        let cov_2000 = cal.covariance_for_n(2000);
1043        assert!(
1044            (cov_2000[(0, 0)] - 5.0).abs() < 1e-10,
1045            "expected 5.0, got {}",
1046            cov_2000[(0, 0)]
1047        );
1048    }
1049
1050    #[test]
1051    fn test_n_eff() {
1052        let cal = make_test_calibration();
1053        // make_test_calibration uses block_length = 10
1054
1055        // n_eff = max(1, floor(n / block_length))
1056        assert_eq!(cal.n_eff(100), 10);
1057        assert_eq!(cal.n_eff(1000), 100);
1058        assert_eq!(cal.n_eff(10), 1);
1059        assert_eq!(cal.n_eff(5), 1); // Clamped to 1 when n < block_length
1060        assert_eq!(cal.n_eff(0), 1); // Edge case: n=0 returns 1
1061    }
1062
1063    #[test]
1064    fn test_covariance_zero_n() {
1065        let cal = make_test_calibration();
1066        let cov = cal.covariance_for_n(0);
1067        // Should return sigma_rate unchanged (avoid NaN)
1068        assert!((cov[(0, 0)] - 1000.0).abs() < 1e-10);
1069    }
1070
1071    #[test]
1072    fn test_estimate_collection_time() {
1073        let cal = make_test_calibration();
1074
1075        // 10000 samples/sec -> 1000 samples takes 0.1 sec
1076        let time = cal.estimate_collection_time_secs(1000);
1077        assert!((time - 0.1).abs() < 1e-10);
1078    }
1079
1080    #[test]
1081    fn test_compute_prior_cov_9d_unit_diagonal() {
1082        // Use identity matrix - correlation of identity is identity
1083        let sigma_rate = Matrix9::identity();
1084        let prior = compute_prior_cov_9d(&sigma_rate, 10.0, false);
1085
1086        // R = Corr(I) = I (identity has unit diagonal, no off-diagonal correlation)
1087        // Λ₀ = σ²_prior × R = 100 × I
1088        // Each diagonal should be ~100 (σ² = 100)
1089        // Note: jitter adds ~1e-10 so expect ~100
1090        let expected = 100.0;
1091        for i in 0..9 {
1092            assert!(
1093                (prior[(i, i)] - expected).abs() < 1.0,
1094                "Diagonal {} was {}, expected ~{}",
1095                i,
1096                prior[(i, i)],
1097                expected
1098            );
1099        }
1100    }
1101
1102    #[test]
1103    fn test_c_floor_computation() {
1104        let sigma_rate = Matrix9::identity() * 100.0;
1105        let c_floor = compute_c_floor_9d(&sigma_rate, 42);
1106
1107        // c_floor should be roughly sqrt(100) * 2 to 3 for 95th percentile of max
1108        assert!(c_floor > 15.0, "c_floor {} should be > 15", c_floor);
1109        assert!(c_floor < 40.0, "c_floor {} should be < 40", c_floor);
1110    }
1111
1112    #[test]
1113    fn test_calibration_config_default() {
1114        let config = CalibrationConfig::default();
1115        assert_eq!(config.calibration_samples, 5000);
1116        assert_eq!(config.bootstrap_iterations, 200);
1117        assert!((config.theta_ns - 100.0).abs() < 1e-10);
1118        assert!((config.timer_resolution_ns - 1.0).abs() < 1e-10);
1119        assert!(!config.skip_preflight);
1120        assert!(!config.force_discrete_mode);
1121    }
1122
1123    // =========================================================================
1124    // Reference implementations for validation (original non-optimized versions)
1125    // =========================================================================
1126
1127    /// Reference implementation: compute t-prior exceedance without sample reuse.
1128    /// This generates fresh samples for each call, matching the original implementation.
1129    fn reference_t_prior_exceedance(l_r: &Matrix9, sigma: f64, theta: f64, seed: u64) -> f64 {
1130        use rand_distr::Gamma;
1131
1132        let mut rng = Xoshiro256PlusPlus::seed_from_u64(seed);
1133        let gamma_dist = Gamma::new(NU / 2.0, 2.0 / NU).unwrap();
1134        let mut count = 0usize;
1135
1136        for _ in 0..PRIOR_CALIBRATION_SAMPLES {
1137            let lambda: f64 = gamma_dist.sample(&mut rng);
1138            let scale = sigma / crate::math::sqrt(lambda.max(DIAGONAL_FLOOR));
1139
1140            let mut z = Vector9::zeros();
1141            for i in 0..9 {
1142                z[i] = sample_standard_normal(&mut rng);
1143            }
1144
1145            let delta = l_r * z * scale;
1146            let max_effect = delta.iter().map(|x| x.abs()).fold(0.0_f64, f64::max);
1147            if max_effect > theta {
1148                count += 1;
1149            }
1150        }
1151
1152        count as f64 / PRIOR_CALIBRATION_SAMPLES as f64
1153    }
1154
1155    /// Helper: compute exceedance using precomputed t-prior effects
1156    fn optimized_t_prior_exceedance(normalized_effects: &[f64], sigma: f64, theta: f64) -> f64 {
1157        let threshold = theta / sigma;
1158        let count = normalized_effects
1159            .iter()
1160            .filter(|&&m| m > threshold)
1161            .count();
1162        count as f64 / normalized_effects.len() as f64
1163    }
1164
1165    // =========================================================================
1166    // Tests verifying optimized implementations match reference
1167    // =========================================================================
1168
1169    #[test]
1170    fn test_t_prior_precompute_exceedance_matches_reference() {
1171        // Test that the optimized exceedance computation matches reference
1172        // for various sigma values
1173        let l_r = Matrix9::identity();
1174        let theta = 10.0;
1175        let seed = 12345u64;
1176
1177        // Precompute effects using the optimized method
1178        let normalized_effects = precompute_t_prior_effects(&l_r, seed);
1179
1180        // Test at multiple sigma values
1181        for sigma in [5.0, 10.0, 15.0, 20.0, 30.0] {
1182            let optimized = optimized_t_prior_exceedance(&normalized_effects, sigma, theta);
1183            let reference = reference_t_prior_exceedance(&l_r, sigma, theta, seed);
1184
1185            // Allow some tolerance due to different RNG sequences
1186            // The key property is that exceedance should be monotonically increasing with sigma
1187            assert!(
1188                (0.0..=1.0).contains(&optimized),
1189                "Optimized exceedance {} out of range for sigma={}",
1190                optimized,
1191                sigma
1192            );
1193            assert!(
1194                (0.0..=1.0).contains(&reference),
1195                "Reference exceedance {} out of range for sigma={}",
1196                reference,
1197                sigma
1198            );
1199
1200            // Both should be in similar ballpark (within 0.1 of each other)
1201            // Note: They won't be exactly equal because the optimized version
1202            // uses different random samples
1203            println!(
1204                "sigma={}: optimized={:.4}, reference={:.4}",
1205                sigma, optimized, reference
1206            );
1207        }
1208    }
1209
1210    #[test]
1211    fn test_t_prior_exceedance_monotonicity() {
1212        // Key property: exceedance should increase with sigma
1213        let l_r = Matrix9::identity();
1214        let theta = 10.0;
1215        let seed = 42u64;
1216
1217        let normalized_effects = precompute_t_prior_effects(&l_r, seed);
1218
1219        let mut prev_exceedance = 0.0;
1220        for sigma in [1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0] {
1221            let exceedance = optimized_t_prior_exceedance(&normalized_effects, sigma, theta);
1222
1223            assert!(
1224                exceedance >= prev_exceedance,
1225                "Exceedance should increase with sigma: sigma={}, exc={}, prev={}",
1226                sigma,
1227                exceedance,
1228                prev_exceedance
1229            );
1230            prev_exceedance = exceedance;
1231        }
1232
1233        // At very large sigma, exceedance should approach 1
1234        let large_sigma_exc = optimized_t_prior_exceedance(&normalized_effects, 1000.0, theta);
1235        assert!(
1236            large_sigma_exc > 0.99,
1237            "Exceedance at large sigma should be ~1, got {}",
1238            large_sigma_exc
1239        );
1240
1241        // At very small sigma, exceedance should approach 0
1242        let small_sigma_exc = optimized_t_prior_exceedance(&normalized_effects, 0.1, theta);
1243        assert!(
1244            small_sigma_exc < 0.01,
1245            "Exceedance at small sigma should be ~0, got {}",
1246            small_sigma_exc
1247        );
1248    }
1249
1250    #[test]
1251    fn test_calibrate_t_prior_scale_finds_target_exceedance() {
1252        // Test that the calibration finds a sigma_t that achieves ~62% exceedance
1253        let sigma_rate = Matrix9::identity() * 100.0;
1254        let theta_eff = 10.0;
1255        let n_cal = 5000;
1256        let discrete_mode = false;
1257        let seed = 42u64;
1258
1259        let (sigma_t, l_r) =
1260            calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, seed);
1261
1262        // Verify the calibrated sigma_t achieves target exceedance
1263        let normalized_effects = precompute_t_prior_effects(&l_r, seed);
1264        let exceedance = optimized_t_prior_exceedance(&normalized_effects, sigma_t, theta_eff);
1265
1266        assert!(
1267            (exceedance - TARGET_EXCEEDANCE).abs() < 0.05,
1268            "Calibrated t-prior exceedance {} should be near target {}",
1269            exceedance,
1270            TARGET_EXCEEDANCE
1271        );
1272    }
1273
1274    #[test]
1275    fn test_calibration_determinism() {
1276        // Same seed should give same results
1277        let sigma_rate = Matrix9::identity() * 100.0;
1278        let theta_eff = 10.0;
1279        let n_cal = 5000;
1280        let discrete_mode = false;
1281        let seed = 12345u64;
1282
1283        let (sigma_t_1, _) =
1284            calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, seed);
1285        let (sigma_t_2, _) =
1286            calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, seed);
1287
1288        assert!(
1289            (sigma_t_1 - sigma_t_2).abs() < 1e-10,
1290            "Same seed should give same sigma_t: {} vs {}",
1291            sigma_t_1,
1292            sigma_t_2
1293        );
1294    }
1295
1296    #[test]
1297    fn test_precomputed_effects_distribution() {
1298        // Test that precomputed effects follow expected distribution
1299        let l_r = Matrix9::identity();
1300        let seed = 42u64;
1301
1302        let effects = precompute_t_prior_effects(&l_r, seed);
1303
1304        // All effects should be positive (they're max of absolute values)
1305        assert!(
1306            effects.iter().all(|&m| m > 0.0),
1307            "All effects should be positive"
1308        );
1309
1310        // Compute mean and check it's reasonable
1311        let mean: f64 = effects.iter().sum::<f64>() / effects.len() as f64;
1312        // For t_4 with identity L_R, mean of max|z|/sqrt(lambda) should be roughly 2-4
1313        assert!(
1314            mean > 1.0 && mean < 10.0,
1315            "Mean effect {} should be in reasonable range",
1316            mean
1317        );
1318
1319        // Check variance is non-zero (samples are diverse)
1320        let variance: f64 =
1321            effects.iter().map(|&m| (m - mean).powi(2)).sum::<f64>() / (effects.len() - 1) as f64;
1322        assert!(variance > 0.1, "Effects should have non-trivial variance");
1323    }
1324
1325    #[test]
1326    #[ignore] // Slow benchmark - run with `cargo test -- --ignored`
1327    fn bench_calibration_timing() {
1328        use std::time::Instant;
1329
1330        let sigma_rate = Matrix9::identity() * 10000.0;
1331        let theta_eff = 100.0;
1332        let n_cal = 5000;
1333        let discrete_mode = false;
1334
1335        // Warm up
1336        let _ = calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, 1);
1337
1338        // Benchmark t-prior calibration
1339        let iterations = 10;
1340        let start = Instant::now();
1341        for i in 0..iterations {
1342            let _ = calibrate_t_prior_scale(&sigma_rate, theta_eff, n_cal, discrete_mode, i as u64);
1343        }
1344        let t_prior_time = start.elapsed();
1345
1346        println!(
1347            "\n=== Calibration Performance ===\n\
1348             \n\
1349             T-prior calibration: {:?} per call\n\
1350             ({} iterations averaged)",
1351            t_prior_time / iterations as u32,
1352            iterations
1353        );
1354    }
1355}