use std::time::{Duration, Instant};
use tacet_core::adaptive::{
calibrate_t_prior_scale, compute_achievable_at_max, compute_c_floor_9d, compute_prior_cov_9d,
is_threshold_elevated,
};
use tacet_core::analysis::{compute_bayes_gibbs, compute_effect_estimate, estimate_mde};
use tacet_core::constants::{
DEFAULT_BOOTSTRAP_ITERATIONS, DEFAULT_FAIL_THRESHOLD, DEFAULT_PASS_THRESHOLD,
};
use tacet_core::result::{
Diagnostics, EffectEstimate, Exploitability, InconclusiveReason, IssueCode, MeasurementQuality,
Outcome, QualityIssue,
};
use tacet_core::statistics::{
bootstrap_difference_covariance, bootstrap_difference_covariance_discrete,
compute_deciles_inplace, AcquisitionStream,
};
use tacet_core::types::AttackerModel;
use tacet_core::Vector9;
#[derive(Debug, Clone)]
pub struct SinglePassConfig {
pub theta_ns: f64,
pub pass_threshold: f64,
pub fail_threshold: f64,
pub bootstrap_iterations: usize,
pub timer_resolution_ns: f64,
pub seed: u64,
pub max_variance_ratio: f64,
}
impl Default for SinglePassConfig {
fn default() -> Self {
Self {
theta_ns: 100.0, pass_threshold: DEFAULT_PASS_THRESHOLD,
fail_threshold: DEFAULT_FAIL_THRESHOLD,
bootstrap_iterations: DEFAULT_BOOTSTRAP_ITERATIONS,
timer_resolution_ns: 1.0, seed: 0xDEADBEEF,
max_variance_ratio: 0.95,
}
}
}
impl SinglePassConfig {
pub fn for_attacker(model: AttackerModel) -> Self {
Self {
theta_ns: model.to_threshold_ns(),
..Default::default()
}
}
pub fn with_timer_resolution(mut self, resolution_ns: f64) -> Self {
self.timer_resolution_ns = resolution_ns;
self
}
}
#[derive(Debug, Clone)]
pub struct SinglePassResult {
pub outcome: Outcome,
pub leak_probability: f64,
pub effect_estimate: EffectEstimate,
pub quality: MeasurementQuality,
pub samples_used: usize,
pub analysis_time: Duration,
}
pub fn analyze_single_pass(
baseline_ns: &[f64],
test_ns: &[f64],
config: &SinglePassConfig,
) -> SinglePassResult {
let start_time = Instant::now();
let n = baseline_ns.len().min(test_ns.len());
const MIN_SAMPLES: usize = 100;
if n < MIN_SAMPLES {
let effect = EffectEstimate::default();
return SinglePassResult {
outcome: Outcome::Inconclusive {
reason: InconclusiveReason::DataTooNoisy {
message: format!(
"Insufficient samples: {} (need at least {})",
n, MIN_SAMPLES
),
guidance: "Collect more timing measurements".to_string(),
},
leak_probability: 0.5,
effect: effect.clone(),
quality: MeasurementQuality::TooNoisy,
diagnostics: make_default_diagnostics(config.timer_resolution_ns),
samples_used: n,
theta_user: config.theta_ns,
theta_eff: config.theta_ns,
theta_floor: f64::INFINITY,
},
leak_probability: 0.5,
effect_estimate: effect,
quality: MeasurementQuality::TooNoisy,
samples_used: n,
analysis_time: start_time.elapsed(),
};
}
let baseline = &baseline_ns[..n];
let test = &test_ns[..n];
let mut baseline_sorted = baseline.to_vec();
let mut test_sorted = test.to_vec();
let q_baseline = compute_deciles_inplace(&mut baseline_sorted);
let q_test = compute_deciles_inplace(&mut test_sorted);
let delta_hat: Vector9 = q_baseline - q_test;
let unique_baseline: std::collections::HashSet<i64> =
baseline.iter().map(|&v| v as i64).collect();
let unique_test: std::collections::HashSet<i64> = test.iter().map(|&v| v as i64).collect();
let min_uniqueness =
(unique_baseline.len() as f64 / n as f64).min(unique_test.len() as f64 / n as f64);
let discrete_mode = min_uniqueness < 0.10;
let cov_estimate = if discrete_mode {
bootstrap_difference_covariance_discrete(
baseline,
test,
config.bootstrap_iterations,
config.seed,
)
} else {
let mut acquisition_stream = AcquisitionStream::with_capacity(2 * n);
acquisition_stream.push_batch_interleaved(baseline, test);
let interleaved = acquisition_stream.to_timing_samples();
bootstrap_difference_covariance(
&interleaved,
config.bootstrap_iterations,
config.seed,
false, )
};
let sigma = cov_estimate.matrix;
if !cov_estimate.is_stable() {
let effect = EffectEstimate::default();
return SinglePassResult {
outcome: Outcome::Inconclusive {
reason: InconclusiveReason::DataTooNoisy {
message: "Covariance matrix estimation failed".to_string(),
guidance: "Data may have too little variance or numerical issues".to_string(),
},
leak_probability: 0.5,
effect: effect.clone(),
quality: MeasurementQuality::TooNoisy,
diagnostics: make_default_diagnostics(config.timer_resolution_ns),
samples_used: n,
theta_user: config.theta_ns,
theta_eff: config.theta_ns,
theta_floor: f64::INFINITY,
},
leak_probability: 0.5,
effect_estimate: effect,
quality: MeasurementQuality::TooNoisy,
samples_used: n,
analysis_time: start_time.elapsed(),
};
}
let sigma_rate = sigma * (n as f64);
let c_floor = compute_c_floor_9d(&sigma_rate, config.seed);
let theta_floor_stat = c_floor / (n as f64).sqrt();
let theta_tick = config.timer_resolution_ns;
let theta_floor = theta_floor_stat.max(theta_tick);
let theta_eff = if config.theta_ns > 0.0 {
config.theta_ns.max(theta_floor)
} else {
theta_floor
};
let (sigma_t, l_r) =
calibrate_t_prior_scale(&sigma_rate, theta_eff, n, discrete_mode, config.seed);
let prior_cov_marginal = compute_prior_cov_9d(
&sigma_rate,
sigma_t * std::f64::consts::SQRT_2,
discrete_mode,
);
let mde = estimate_mde(&sigma, 0.05); let quality = MeasurementQuality::from_mde_ns(mde.mde_ns);
if std::env::var("TIMING_ORACLE_DEBUG").is_ok() {
eprintln!(
"[DEBUG] n = {}, discrete_mode = {}, block_length = {}",
n, discrete_mode, cov_estimate.block_size
);
eprintln!("[DEBUG] theta_user = {:.2} ns, theta_floor_stat = {:.2} ns, theta_tick = {:.2} ns, theta_floor = {:.2} ns, theta_eff = {:.2} ns",
config.theta_ns, theta_floor_stat, theta_tick, theta_floor, theta_eff);
eprintln!("[DEBUG] c_floor = {:.2} ns·√n", c_floor);
eprintln!("[DEBUG] MDE: {:.2} ns", mde.mde_ns);
eprintln!("[DEBUG] delta_hat = {:?}", delta_hat.as_slice());
eprintln!(
"[DEBUG] sigma diagonal = [{:.2e}, {:.2e}, {:.2e}, ..., {:.2e}]",
sigma[(0, 0)],
sigma[(1, 1)],
sigma[(2, 2)],
sigma[(8, 8)]
);
eprintln!("[DEBUG] sigma_t = {:.2e}", sigma_t);
}
let bayes_result = compute_bayes_gibbs(
&delta_hat,
&sigma,
sigma_t,
&l_r,
theta_eff,
Some(config.seed),
);
let leak_probability = bayes_result.leak_probability;
if std::env::var("TIMING_ORACLE_DEBUG").is_ok() {
eprintln!(
"[DEBUG] bayes_result.leak_probability = {}",
leak_probability
);
eprintln!(
"[DEBUG] lambda: mean={:.3}, sd={:.3}, cv={:.3}, ess={:.1}, mixing_ok={}",
bayes_result.lambda_mean,
bayes_result.lambda_sd,
bayes_result.lambda_cv,
bayes_result.lambda_ess,
bayes_result.lambda_mixing_ok
);
eprintln!(
"[DEBUG] delta_post = [{:.1}, {:.1}, {:.1}, ..., {:.1}]",
bayes_result.delta_post[0],
bayes_result.delta_post[1],
bayes_result.delta_post[2],
bayes_result.delta_post[8]
);
}
let effect_estimate = compute_effect_estimate(&bayes_result.delta_draws, theta_eff);
let variance_ratio = compute_variance_ratio(&bayes_result.lambda_post, &prior_cov_marginal);
let data_too_noisy = variance_ratio > config.max_variance_ratio;
if std::env::var("TIMING_ORACLE_DEBUG").is_ok() {
eprintln!(
"[DEBUG] variance_ratio = {:.3}, max = {:.3}, data_too_noisy = {}",
variance_ratio, config.max_variance_ratio, data_too_noisy
);
}
let mut quality_issues = Vec::new();
if theta_eff > config.theta_ns && config.theta_ns > 0.0 {
quality_issues.push(QualityIssue {
code: IssueCode::ThresholdIssue,
message: format!(
"Threshold elevated from {:.0} ns to {:.1} ns (measurement floor)",
config.theta_ns, theta_eff
),
guidance: "For better resolution, use more samples or a higher-resolution timer."
.to_string(),
});
}
if !bayes_result.kappa_mixing_ok {
quality_issues.push(QualityIssue {
code: IssueCode::NumericalIssue,
message: format!(
"κ chain mixing poor (CV={:.2}, ESS={:.0})",
bayes_result.kappa_cv, bayes_result.kappa_ess
),
guidance: "Posterior may be unreliable; consider longer time budget.".to_string(),
});
}
if bayes_result.kappa_mean < 0.3 {
quality_issues.push(QualityIssue {
code: IssueCode::LikelihoodInflated,
message: format!(
"Likelihood covariance inflated ~{:.1}x due to data/model mismatch",
1.0 / bayes_result.kappa_mean
),
guidance: "Uncertainty was increased for robustness. Effect estimates remain valid."
.to_string(),
});
}
let block_length = cov_estimate.block_size;
let diagnostics = Diagnostics {
dependence_length: block_length,
effective_sample_size: n / block_length.max(1),
stationarity_ratio: 1.0, stationarity_ok: true,
outlier_rate_baseline: 0.0, outlier_rate_sample: 0.0,
outlier_asymmetry_ok: true,
discrete_mode,
timer_resolution_ns: config.timer_resolution_ns,
duplicate_fraction: 1.0 - min_uniqueness,
preflight_ok: true, preflight_warnings: Vec::new(),
calibration_samples: n,
total_time_secs: start_time.elapsed().as_secs_f64(),
warnings: Vec::new(),
quality_issues,
seed: Some(config.seed),
attacker_model: None,
threshold_ns: config.theta_ns,
timer_name: "external".to_string(),
platform: format!("{}-{}", std::env::consts::OS, std::env::consts::ARCH),
timer_fallback_reason: None, gibbs_iters_total: 256,
gibbs_burnin: 64,
gibbs_retained: 192,
lambda_mean: bayes_result.lambda_mean,
lambda_sd: bayes_result.lambda_sd,
lambda_cv: bayes_result.lambda_cv,
lambda_ess: bayes_result.lambda_ess,
lambda_mixing_ok: bayes_result.lambda_mixing_ok,
kappa_mean: bayes_result.kappa_mean,
kappa_sd: bayes_result.kappa_sd,
kappa_cv: bayes_result.kappa_cv,
kappa_ess: bayes_result.kappa_ess,
kappa_mixing_ok: bayes_result.kappa_mixing_ok,
};
let outcome = if data_too_noisy {
Outcome::Inconclusive {
reason: InconclusiveReason::DataTooNoisy {
message: format!(
"Posterior variance is {:.0}% of prior; data not informative",
variance_ratio * 100.0
),
guidance: "Try: more samples, higher-resolution timer, reduce system load"
.to_string(),
},
leak_probability,
effect: effect_estimate.clone(),
quality,
diagnostics,
samples_used: n,
theta_user: config.theta_ns,
theta_eff,
theta_floor,
}
} else if leak_probability > config.fail_threshold {
let exploitability = Exploitability::from_effect_ns(effect_estimate.max_effect_ns.abs());
Outcome::Fail {
leak_probability,
effect: effect_estimate.clone(),
exploitability,
quality,
diagnostics,
samples_used: n,
theta_user: config.theta_ns,
theta_eff,
theta_floor,
}
} else if leak_probability < config.pass_threshold {
if is_threshold_elevated(theta_eff, config.theta_ns, theta_tick) {
let achievable_at_max = compute_achievable_at_max(
c_floor,
theta_tick,
config.theta_ns,
n, block_length, );
Outcome::Inconclusive {
reason: InconclusiveReason::ThresholdElevated {
theta_user: config.theta_ns,
theta_eff,
leak_probability_at_eff: leak_probability,
meets_pass_criterion_at_eff: true,
achievable_at_max,
message: format!(
"Threshold elevated from {:.0}ns to {:.1}ns; P={:.1}% at elevated threshold",
config.theta_ns, theta_eff, leak_probability * 100.0
),
guidance: "Use more samples or a higher-resolution timer to achieve the requested threshold.".to_string(),
},
leak_probability,
effect: effect_estimate.clone(),
quality,
diagnostics,
samples_used: n,
theta_user: config.theta_ns,
theta_eff,
theta_floor,
}
} else {
Outcome::Pass {
leak_probability,
effect: effect_estimate.clone(),
quality,
diagnostics,
samples_used: n,
theta_user: config.theta_ns,
theta_eff,
theta_floor,
}
}
} else {
Outcome::Inconclusive {
reason: InconclusiveReason::SampleBudgetExceeded {
current_probability: leak_probability,
samples_collected: n,
},
leak_probability,
effect: effect_estimate.clone(),
quality,
diagnostics,
samples_used: n,
theta_user: config.theta_ns,
theta_eff,
theta_floor,
}
};
SinglePassResult {
outcome,
leak_probability,
effect_estimate,
quality,
samples_used: n,
analysis_time: start_time.elapsed(),
}
}
fn compute_variance_ratio(
posterior_cov: &tacet_core::Matrix9,
prior_cov: &tacet_core::Matrix9,
) -> f64 {
let mut sum = 0.0;
for i in 0..9 {
let prior_var = prior_cov[(i, i)];
let post_var = posterior_cov[(i, i)];
if prior_var > 1e-12 {
sum += post_var / prior_var;
}
}
sum / 9.0
}
fn make_default_diagnostics(timer_resolution_ns: f64) -> Diagnostics {
Diagnostics {
dependence_length: 1,
effective_sample_size: 0,
stationarity_ratio: 1.0,
stationarity_ok: true,
outlier_rate_baseline: 0.0,
outlier_rate_sample: 0.0,
outlier_asymmetry_ok: true,
discrete_mode: false,
timer_resolution_ns,
duplicate_fraction: 0.0,
preflight_ok: true,
preflight_warnings: Vec::new(),
calibration_samples: 0,
total_time_secs: 0.0,
warnings: Vec::new(),
quality_issues: Vec::new(),
seed: Some(0),
attacker_model: None,
threshold_ns: 0.0,
timer_name: "external".to_string(),
platform: format!("{}-{}", std::env::consts::OS, std::env::consts::ARCH),
timer_fallback_reason: None,
gibbs_iters_total: 0,
gibbs_burnin: 0,
gibbs_retained: 0,
lambda_mean: 1.0,
lambda_sd: 0.0,
lambda_cv: 0.0,
lambda_ess: 0.0,
lambda_mixing_ok: false,
kappa_mean: 1.0,
kappa_sd: 0.0,
kappa_cv: 0.0,
kappa_ess: 0.0,
kappa_mixing_ok: false,
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::SeedableRng;
use rand_distr::{Distribution, Normal};
fn generate_samples(mean: f64, std: f64, n: usize, seed: u64) -> Vec<f64> {
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
let dist = Normal::new(mean, std).unwrap();
(0..n).map(|_| dist.sample(&mut rng)).collect()
}
#[test]
fn test_no_effect_passes() {
let baseline = generate_samples(1000.0, 50.0, 1000, 42);
let test = generate_samples(1000.0, 50.0, 1000, 43);
let config = SinglePassConfig {
theta_ns: 100.0,
..Default::default()
};
let result = analyze_single_pass(&baseline, &test, &config);
assert!(
result.leak_probability < 0.5,
"Expected low leak probability for null effect, got {}",
result.leak_probability
);
}
#[test]
fn test_large_effect_fails() {
let baseline = generate_samples(1000.0, 50.0, 1000, 42);
let test = generate_samples(1200.0, 50.0, 1000, 43);
let config = SinglePassConfig {
theta_ns: 100.0,
..Default::default()
};
let result = analyze_single_pass(&baseline, &test, &config);
assert!(
result.leak_probability > 0.9,
"Expected high leak probability for 200ns effect, got {}",
result.leak_probability
);
assert!(matches!(result.outcome, Outcome::Fail { .. }));
}
#[test]
fn test_effect_below_threshold_passes() {
let baseline = generate_samples(1000.0, 50.0, 1000, 42);
let test = generate_samples(1050.0, 50.0, 1000, 43);
let config = SinglePassConfig {
theta_ns: 100.0, ..Default::default()
};
let result = analyze_single_pass(&baseline, &test, &config);
assert!(
result.leak_probability < 0.95,
"Expected lower leak probability for sub-threshold effect, got {}",
result.leak_probability
);
}
#[test]
fn test_insufficient_samples() {
let baseline = vec![100.0; 50]; let test = vec![100.0; 50];
let config = SinglePassConfig::default();
let result = analyze_single_pass(&baseline, &test, &config);
assert!(matches!(
result.outcome,
Outcome::Inconclusive {
reason: InconclusiveReason::DataTooNoisy { .. },
..
}
));
}
#[test]
fn test_diagnostics_populated() {
let baseline = generate_samples(1000.0, 50.0, 1000, 42);
let test = generate_samples(1000.0, 50.0, 1000, 43);
let config = SinglePassConfig::default();
let result = analyze_single_pass(&baseline, &test, &config);
match &result.outcome {
Outcome::Pass { diagnostics, .. }
| Outcome::Fail { diagnostics, .. }
| Outcome::Inconclusive { diagnostics, .. } => {
assert!(
diagnostics.dependence_length > 0,
"Block length should be > 0"
);
assert!(diagnostics.effective_sample_size > 0, "ESS should be > 0");
}
_ => {}
}
}
#[test]
fn test_timer_resolution_config() {
let baseline = generate_samples(1000.0, 50.0, 1000, 42);
let test = generate_samples(1000.0, 50.0, 1000, 43);
let config = SinglePassConfig::for_attacker(AttackerModel::SharedHardware)
.with_timer_resolution(42.0);
let result = analyze_single_pass(&baseline, &test, &config);
assert!(result.samples_used == 1000);
}
}