use std::env;
use std::hash::Hash;
use std::time::{Duration, Instant};
use rand::seq::SliceRandom;
use rand::SeedableRng;
use crate::adaptive::{
calibrate, run_adaptive, AdaptiveConfig, AdaptiveOutcome, AdaptiveState, Calibration,
CalibrationConfig, InconclusiveReason as AdaptiveInconclusiveReason,
};
use crate::analysis::compute_max_effect_ci;
use crate::config::Config;
use crate::constants::DEFAULT_SEED;
use crate::helpers::InputPair;
use crate::measurement::{BoxedTimer, TimerFallbackReason, TimerSpec};
use crate::result::{
BatchingInfo, Diagnostics, EffectEstimate, Exploitability, InconclusiveReason, IssueCode,
MeasurementQuality, Outcome, QualityIssue, ResearchOutcome, ResearchStatus,
};
use crate::types::{AttackerModel, Class};
#[derive(Debug, Clone)]
pub struct TimingOracle {
config: Config,
timer_spec: TimerSpec,
}
impl TimingOracle {
pub fn for_attacker(model: AttackerModel) -> Self {
Self {
config: Config {
attacker_model: Some(model),
..Config::default()
},
timer_spec: TimerSpec::Auto,
}
}
pub fn timer_spec(mut self, spec: TimerSpec) -> Self {
self.timer_spec = spec;
self
}
pub fn system_timer(self) -> Self {
self.timer_spec(TimerSpec::SystemTimer)
}
pub fn require_high_precision(self) -> Self {
self.timer_spec(TimerSpec::RequireHighPrecision)
}
pub fn require_cycle_accurate(self) -> Self {
self.timer_spec(TimerSpec::RequireCycleAccurate)
}
pub fn time_budget(mut self, duration: Duration) -> Self {
self.config.time_budget = duration;
self
}
pub fn time_budget_secs(mut self, secs: u64) -> Self {
self.config.time_budget = Duration::from_secs(secs);
self
}
pub fn max_samples(mut self, n: usize) -> Self {
assert!(n > 0, "max_samples must be > 0 (got {})", n);
self.config.max_samples = n;
self
}
pub fn batch_size(mut self, n: usize) -> Self {
assert!(n > 0, "batch_size must be > 0 (got {})", n);
self.config.batch_size = n;
self
}
pub fn calibration_samples(mut self, n: usize) -> Self {
assert!(n > 0, "calibration_samples must be > 0 (got {})", n);
self.config.calibration_samples = n;
self
}
pub fn pass_threshold(mut self, threshold: f64) -> Self {
assert!(
threshold > 0.0 && threshold < 1.0,
"pass_threshold must be in (0, 1), got {}",
threshold
);
assert!(
threshold < self.config.fail_threshold,
"pass_threshold must be < fail_threshold"
);
self.config.pass_threshold = threshold;
self
}
pub fn fail_threshold(mut self, threshold: f64) -> Self {
assert!(
threshold > 0.0 && threshold < 1.0,
"fail_threshold must be in (0, 1), got {}",
threshold
);
assert!(
threshold > self.config.pass_threshold,
"fail_threshold must be > pass_threshold"
);
self.config.fail_threshold = threshold;
self
}
pub fn warmup(mut self, n: usize) -> Self {
self.config.warmup = n;
self
}
pub fn cov_bootstrap_iterations(mut self, n: usize) -> Self {
assert!(n > 0, "cov_bootstrap_iterations must be > 0, got {}", n);
self.config.cov_bootstrap_iterations = n;
self
}
pub fn outlier_percentile(mut self, p: f64) -> Self {
assert!(
p > 0.0 && p <= 1.0,
"outlier_percentile must be in (0, 1], got {}",
p
);
self.config.outlier_percentile = p;
self
}
pub fn prior_no_leak(mut self, p: f64) -> Self {
assert!(
p > 0.0 && p < 1.0,
"prior_no_leak must be in (0, 1), got {}",
p
);
self.config.prior_no_leak = p;
self
}
pub fn seed(mut self, seed: u64) -> Self {
self.config.measurement_seed = Some(seed);
self
}
pub fn force_discrete_mode(mut self, force: bool) -> Self {
self.config.force_discrete_mode = force;
self
}
pub fn cpu_affinity(mut self, enabled: bool) -> Self {
self.config.cpu_affinity = enabled;
self
}
pub fn thread_priority(mut self, enabled: bool) -> Self {
self.config.thread_priority = enabled;
self
}
pub fn frequency_stabilization_ms(mut self, ms: u64) -> Self {
self.config.frequency_stabilization_ms = ms;
self
}
pub fn config(&self) -> &Config {
&self.config
}
pub fn from_env(mut self) -> Self {
if let Some(secs) = parse_u64_env("TO_TIME_BUDGET_SECS") {
self.config.time_budget = Duration::from_secs(secs);
}
if let Some(n) = parse_usize_env("TO_MAX_SAMPLES") {
self.config.max_samples = n;
}
if let Some(n) = parse_usize_env("TO_BATCH_SIZE") {
self.config.batch_size = n;
}
if let Some(n) = parse_usize_env("TO_CALIBRATION_SAMPLES") {
self.config.calibration_samples = n;
}
if let Some(p) = parse_f64_env("TO_PASS_THRESHOLD") {
if p > 0.0 && p < 1.0 && p < self.config.fail_threshold {
self.config.pass_threshold = p;
}
}
if let Some(p) = parse_f64_env("TO_FAIL_THRESHOLD") {
if p > 0.0 && p < 1.0 && p > self.config.pass_threshold {
self.config.fail_threshold = p;
}
}
if let Some(ns) = parse_f64_env("TO_MIN_EFFECT_NS") {
if ns >= 0.0 {
self.config.min_effect_of_concern_ns = ns;
}
}
if let Some(seed) = parse_u64_env("TO_SEED") {
self.config.measurement_seed = Some(seed);
}
self
}
pub fn test<T, F1, F2, F>(self, inputs: InputPair<T, F1, F2>, mut operation: F) -> Outcome
where
T: Clone + Hash,
F1: FnMut() -> T,
F2: FnMut() -> T,
F: FnMut(&T),
{
let start_time = Instant::now();
let _affinity_guard = if self.config.cpu_affinity {
match crate::measurement::affinity::AffinityGuard::try_pin() {
crate::measurement::affinity::AffinityResult::Pinned(guard) => Some(guard),
crate::measurement::affinity::AffinityResult::NotPinned { reason } => {
tracing::debug!("CPU affinity not available: {}", reason);
None
}
}
} else {
None
};
#[cfg(feature = "thread-priority")]
let _priority_guard = if self.config.thread_priority {
match crate::measurement::priority::PriorityGuard::try_elevate() {
crate::measurement::priority::PriorityResult::Elevated(guard) => Some(guard),
crate::measurement::priority::PriorityResult::NotElevated { reason } => {
tracing::debug!("Thread priority elevation not available: {}", reason);
None
}
}
} else {
None
};
#[cfg(not(feature = "thread-priority"))]
let _priority_guard: Option<()> = None;
if self.config.frequency_stabilization_ms > 0 {
let stabilization_duration =
Duration::from_millis(self.config.frequency_stabilization_ms);
let stabilization_start = Instant::now();
let mut counter = 0u64;
while stabilization_start.elapsed() < stabilization_duration {
counter = counter.wrapping_add(1);
std::hint::black_box(counter);
}
tracing::debug!(
"Frequency stabilization complete ({} ms, {} iterations)",
self.config.frequency_stabilization_ms,
counter
);
}
let mut rng: rand::rngs::StdRng = if let Some(seed) = self.config.measurement_seed {
SeedableRng::seed_from_u64(seed)
} else {
SeedableRng::from_rng(&mut rand::rng())
};
let (mut timer, fallback_reason) = self.timer_spec.create_timer();
let raw_theta_ns = self.config.resolve_min_effect_ns();
let theta_ns = raw_theta_ns.max(timer.resolution_ns());
const CHUNK_SIZE: usize = 5_000;
let initial_samples = self.config.calibration_samples + CHUNK_SIZE;
let max_samples_total = self.config.calibration_samples + self.config.max_samples;
let mut baseline_inputs: Vec<T> = (0..initial_samples).map(|_| inputs.baseline()).collect();
let mut sample_inputs: Vec<T> = (0..initial_samples)
.map(|_| {
let value = inputs.generate_sample();
inputs.track_value(&value);
value
})
.collect();
for i in 0..self.config.warmup.min(initial_samples) {
operation(&baseline_inputs[i % baseline_inputs.len()]);
std::hint::black_box(());
operation(&sample_inputs[i % sample_inputs.len()]);
std::hint::black_box(());
}
const PILOT_SAMPLES: usize = 100;
let mut pilot_cycles = Vec::with_capacity(PILOT_SAMPLES * 2);
for i in 0..PILOT_SAMPLES.min(initial_samples) {
let result = timer.measure_cycles(|| {
operation(&baseline_inputs[i]);
std::hint::black_box(());
});
if let Ok(cycles) = result {
pilot_cycles.push(cycles);
}
let result = timer.measure_cycles(|| {
operation(&sample_inputs[i]);
std::hint::black_box(());
});
if let Ok(cycles) = result {
pilot_cycles.push(cycles);
}
}
pilot_cycles.sort_unstable();
let median_cycles = pilot_cycles[pilot_cycles.len() / 2];
let median_ns = timer.cycles_to_ns(median_cycles);
let resolution_ns = timer.resolution_ns();
let ticks_per_call = median_ns / resolution_ns;
if ticks_per_call <= 0.0 || !ticks_per_call.is_finite() {
let threshold_ns = resolution_ns * crate::measurement::MIN_TICKS_SINGLE_CALL;
let platform = format!(
"{} ({}, {:.1}ns resolution)",
std::env::consts::OS,
timer.name(),
timer.resolution_ns()
);
return Outcome::Unmeasurable {
operation_ns: median_ns,
threshold_ns,
platform,
recommendation:
"Timer returned non-finite measurements; retry on a more stable system."
.to_string(),
};
}
let (k, _batching): (u32, BatchingInfo) = match self.config.iterations_per_sample {
crate::config::IterationsPerSample::Fixed(k) => {
let k = k.max(1) as u32;
(
k,
BatchingInfo {
enabled: k > 1,
k,
ticks_per_batch: ticks_per_call * k as f64,
rationale: format!("fixed batching K={}", k),
unmeasurable: None,
},
)
}
crate::config::IterationsPerSample::Auto => {
if ticks_per_call >= crate::measurement::TARGET_TICKS_PER_BATCH {
(
1,
BatchingInfo {
enabled: false,
k: 1,
ticks_per_batch: ticks_per_call,
rationale: format!(
"no batching needed ({:.1} ticks/call)",
ticks_per_call
),
unmeasurable: None,
},
)
} else {
let k_raw =
(crate::measurement::TARGET_TICKS_PER_BATCH / ticks_per_call).ceil() as u32;
let k = k_raw.clamp(1, crate::measurement::MAX_BATCH_SIZE);
let ticks_per_batch = ticks_per_call * k as f64;
let partial = ticks_per_batch < crate::measurement::TARGET_TICKS_PER_BATCH;
if partial {
let platform = format!(
"{} ({}, {:.1}ns resolution)",
std::env::consts::OS,
timer.name(),
timer.resolution_ns()
);
return Outcome::Unmeasurable {
operation_ns: median_ns,
threshold_ns: resolution_ns
* crate::measurement::TARGET_TICKS_PER_BATCH
/ k as f64,
platform,
recommendation: generate_unmeasurable_recommendation(fallback_reason),
};
}
(
k,
BatchingInfo {
enabled: k > 1,
k,
ticks_per_batch,
rationale: format!("K={} ({:.1} ticks/batch)", k, ticks_per_batch),
unmeasurable: None,
},
)
}
}
};
let n_cal = self
.config
.calibration_samples
.min(self.config.max_samples / 2);
let mut calibration_baseline_cycles = Vec::with_capacity(n_cal);
let mut calibration_sample_cycles = Vec::with_capacity(n_cal);
let mut cal_schedule: Vec<(Class, usize)> = Vec::with_capacity(n_cal * 2);
for i in 0..n_cal {
cal_schedule.push((Class::Baseline, i));
cal_schedule.push((Class::Sample, i));
}
cal_schedule.shuffle(&mut rng);
for (class, idx) in cal_schedule {
match class {
Class::Baseline => {
let result = timer.measure_cycles(|| {
for _ in 0..k {
operation(&baseline_inputs[idx]);
std::hint::black_box(());
}
});
if let Ok(cycles) = result {
calibration_baseline_cycles.push(cycles);
}
}
Class::Sample => {
let result = timer.measure_cycles(|| {
for _ in 0..k {
operation(&sample_inputs[idx]);
std::hint::black_box(());
}
});
if let Ok(cycles) = result {
calibration_sample_cycles.push(cycles);
}
}
}
}
let skip_preflight = std::env::var("TIMING_ORACLE_SKIP_PREFLIGHT").is_ok();
let ns_per_tick = timer.resolution_ns();
let cal_config = CalibrationConfig {
calibration_samples: n_cal,
bootstrap_iterations: self.config.cov_bootstrap_iterations.min(200), timer_resolution_ns: ns_per_tick,
theta_ns,
alpha: 0.01,
seed: self.config.measurement_seed.unwrap_or(DEFAULT_SEED),
skip_preflight,
force_discrete_mode: self.config.force_discrete_mode,
};
let calibration = match calibrate(
&calibration_baseline_cycles,
&calibration_sample_cycles,
1.0 / timer.cycles_per_ns(), &cal_config,
) {
Ok(cal) => {
Calibration { batch_k: k, ..cal }
}
Err(e) => {
let diagnostics = Diagnostics {
calibration_samples: n_cal,
total_time_secs: start_time.elapsed().as_secs_f64(),
warnings: vec![format!("Calibration failed: {}", e)],
..Diagnostics::default()
};
return Outcome::Inconclusive {
reason: InconclusiveReason::DataTooNoisy {
message: format!("Calibration failed: {}", e),
guidance: "Try increasing calibration_samples or reducing system noise"
.to_string(),
},
leak_probability: 0.5,
effect: EffectEstimate::default(),
samples_used: n_cal,
quality: MeasurementQuality::TooNoisy,
diagnostics,
theta_user: theta_ns,
theta_eff: theta_ns,
theta_floor: 0.0, };
}
};
if matches!(self.config.attacker_model, Some(AttackerModel::Research)) {
return self.run_research_mode(
calibration,
&calibration_baseline_cycles,
&calibration_sample_cycles,
&baseline_inputs,
&sample_inputs,
n_cal,
k,
&mut timer,
fallback_reason,
&mut operation,
&mut rng,
initial_samples,
start_time,
);
}
let adaptive_config = AdaptiveConfig::with_theta(theta_ns)
.pass_threshold(self.config.pass_threshold)
.fail_threshold(self.config.fail_threshold)
.time_budget(self.config.time_budget)
.max_samples(self.config.max_samples);
let adaptive_config = AdaptiveConfig {
batch_size: self.config.batch_size,
seed: self.config.measurement_seed.unwrap_or(DEFAULT_SEED),
outlier_percentile: self.config.outlier_percentile, ..adaptive_config
};
let mut adaptive_state = AdaptiveState::with_capacity(self.config.max_samples);
adaptive_state.add_batch(
calibration_baseline_cycles.clone(),
calibration_sample_cycles.clone(),
);
let ns_per_cycle = 1.0 / timer.cycles_per_ns();
let tracker_seed = self
.config
.measurement_seed
.unwrap_or(DEFAULT_SEED)
.wrapping_add(0xDEAD);
let mut stationarity_tracker = crate::analysis::StationarityTracker::new(
self.config.max_samples * 2, tracker_seed,
);
for &cycles in &calibration_baseline_cycles {
stationarity_tracker.push(cycles as f64 * ns_per_cycle);
}
for &cycles in &calibration_sample_cycles {
stationarity_tracker.push(cycles as f64 * ns_per_cycle);
}
let mut input_idx = n_cal; loop {
if adaptive_state.elapsed() > self.config.time_budget {
let posterior = adaptive_state.current_posterior();
let leak_probability = posterior.map(|p| p.leak_probability).unwrap_or(0.5);
let stationarity = stationarity_tracker.compute();
return self.build_inconclusive_outcome(
InconclusiveReason::TimeBudgetExceeded {
current_probability: leak_probability,
samples_collected: adaptive_state.n_total(),
},
&adaptive_state,
&calibration,
&timer,
fallback_reason,
start_time,
theta_ns,
stationarity,
);
}
if adaptive_state.n_total() >= self.config.max_samples {
let posterior = adaptive_state.current_posterior();
let leak_probability = posterior.map(|p| p.leak_probability).unwrap_or(0.5);
let stationarity = stationarity_tracker.compute();
return self.build_inconclusive_outcome(
InconclusiveReason::SampleBudgetExceeded {
current_probability: leak_probability,
samples_collected: adaptive_state.n_total(),
},
&adaptive_state,
&calibration,
&timer,
fallback_reason,
start_time,
theta_ns,
stationarity,
);
}
let samples_available = baseline_inputs.len();
if input_idx >= samples_available {
if samples_available >= max_samples_total {
let posterior = adaptive_state.current_posterior();
let leak_probability = posterior.map(|p| p.leak_probability).unwrap_or(0.5);
let stationarity = stationarity_tracker.compute();
return self.build_inconclusive_outcome(
InconclusiveReason::SampleBudgetExceeded {
current_probability: leak_probability,
samples_collected: adaptive_state.n_total(),
},
&adaptive_state,
&calibration,
&timer,
fallback_reason,
start_time,
theta_ns,
stationarity,
);
}
let chunk_to_generate = CHUNK_SIZE.min(max_samples_total - samples_available);
for _ in 0..chunk_to_generate {
baseline_inputs.push(inputs.baseline());
let value = inputs.generate_sample();
inputs.track_value(&value);
sample_inputs.push(value);
}
}
let batch_size = self
.config
.batch_size
.min(baseline_inputs.len() - input_idx);
let mut batch_baseline = Vec::with_capacity(batch_size);
let mut batch_sample = Vec::with_capacity(batch_size);
let mut batch_schedule: Vec<(Class, usize)> = Vec::with_capacity(batch_size * 2);
for i in 0..batch_size {
let global_idx = input_idx + i;
batch_schedule.push((Class::Baseline, global_idx));
batch_schedule.push((Class::Sample, global_idx));
}
batch_schedule.shuffle(&mut rng);
for (class, idx) in batch_schedule {
match class {
Class::Baseline => {
let result = timer.measure_cycles(|| {
for _ in 0..k {
operation(&baseline_inputs[idx]);
std::hint::black_box(());
}
});
if let Ok(cycles) = result {
batch_baseline.push(cycles);
stationarity_tracker.push(cycles as f64 * ns_per_cycle);
}
}
Class::Sample => {
let result = timer.measure_cycles(|| {
for _ in 0..k {
operation(&sample_inputs[idx]);
std::hint::black_box(());
}
});
if let Ok(cycles) = result {
batch_sample.push(cycles);
stationarity_tracker.push(cycles as f64 * ns_per_cycle);
}
}
}
}
input_idx += batch_size;
adaptive_state.add_batch(batch_baseline, batch_sample);
let outcome = run_adaptive(
&calibration,
&mut adaptive_state,
1.0 / timer.cycles_per_ns(),
&adaptive_config,
);
match outcome {
AdaptiveOutcome::LeakDetected {
posterior,
samples_per_class,
elapsed: _,
} => {
let stationarity = stationarity_tracker.compute();
return self.build_fail_outcome(
&posterior,
samples_per_class,
&calibration,
&timer,
fallback_reason,
start_time,
theta_ns,
stationarity,
);
}
AdaptiveOutcome::NoLeakDetected {
posterior,
samples_per_class,
elapsed: _,
} => {
let stationarity = stationarity_tracker.compute();
return self.build_pass_outcome(
&posterior,
samples_per_class,
&calibration,
&timer,
fallback_reason,
start_time,
theta_ns,
stationarity,
);
}
AdaptiveOutcome::Continue { posterior, .. } => {
adaptive_state.update_posterior(posterior);
continue;
}
AdaptiveOutcome::ThresholdElevated {
posterior,
theta_user,
theta_eff,
achievable_at_max,
samples_per_class: _,
elapsed: _,
..
} => {
let stationarity = stationarity_tracker.compute();
let guidance = generate_threshold_elevated_guidance(fallback_reason);
let reason = InconclusiveReason::ThresholdElevated {
theta_user,
theta_eff,
leak_probability_at_eff: posterior.leak_probability,
meets_pass_criterion_at_eff: true, achievable_at_max,
message: format!(
"Threshold elevated from {:.0}ns to {:.1}ns; P={:.1}% at elevated threshold",
theta_user, theta_eff, posterior.leak_probability * 100.0
),
guidance,
};
return self.build_inconclusive_outcome(
reason,
&adaptive_state,
&calibration,
&timer,
fallback_reason,
start_time,
theta_ns,
stationarity,
);
}
AdaptiveOutcome::Inconclusive { reason, .. } => {
let result_reason = convert_adaptive_reason(&reason);
let stationarity = stationarity_tracker.compute();
return self.build_inconclusive_outcome(
result_reason,
&adaptive_state,
&calibration,
&timer,
fallback_reason,
start_time,
theta_ns,
stationarity,
);
}
}
}
}
#[allow(clippy::too_many_arguments)]
fn build_pass_outcome(
&self,
posterior: &Posterior,
samples_used: usize,
calibration: &Calibration,
timer: &BoxedTimer,
fallback_reason: TimerFallbackReason,
start_time: Instant,
theta_ns: f64,
stationarity: Option<crate::analysis::StationarityResult>,
) -> Outcome {
let effect = build_effect_estimate(posterior, theta_ns, calibration.batch_k);
let quality = MeasurementQuality::from_mde_ns(calibration.mde_ns);
let diagnostics = build_diagnostics(
calibration,
timer,
fallback_reason,
start_time,
&self.config,
theta_ns,
stationarity,
);
Outcome::Pass {
leak_probability: posterior.leak_probability,
effect,
samples_used,
quality,
diagnostics,
theta_user: theta_ns,
theta_eff: calibration.theta_eff,
theta_floor: (calibration.c_floor / (samples_used as f64).sqrt())
.max(calibration.theta_tick),
}
}
#[allow(clippy::too_many_arguments)]
fn build_fail_outcome(
&self,
posterior: &Posterior,
samples_used: usize,
calibration: &Calibration,
timer: &BoxedTimer,
fallback_reason: TimerFallbackReason,
start_time: Instant,
theta_ns: f64,
stationarity: Option<crate::analysis::StationarityResult>,
) -> Outcome {
let effect = build_effect_estimate(posterior, theta_ns, calibration.batch_k);
let exploitability = Exploitability::from_effect_ns(effect.total_effect_ns());
let quality = MeasurementQuality::from_mde_ns(calibration.mde_ns);
let diagnostics = build_diagnostics(
calibration,
timer,
fallback_reason,
start_time,
&self.config,
theta_ns,
stationarity,
);
Outcome::Fail {
leak_probability: posterior.leak_probability,
effect,
exploitability,
samples_used,
quality,
diagnostics,
theta_user: theta_ns,
theta_eff: calibration.theta_eff,
theta_floor: (calibration.c_floor / (samples_used as f64).sqrt())
.max(calibration.theta_tick),
}
}
#[allow(clippy::too_many_arguments)]
fn build_inconclusive_outcome(
&self,
reason: InconclusiveReason,
state: &AdaptiveState,
calibration: &Calibration,
timer: &BoxedTimer,
fallback_reason: TimerFallbackReason,
start_time: Instant,
theta_ns: f64,
stationarity: Option<crate::analysis::StationarityResult>,
) -> Outcome {
let posterior = state.current_posterior();
let leak_probability = posterior.map(|p| p.leak_probability).unwrap_or(0.5);
let effect = posterior
.map(|p| build_effect_estimate(p, theta_ns, calibration.batch_k))
.unwrap_or_default();
let quality = MeasurementQuality::from_mde_ns(calibration.mde_ns);
let diagnostics = build_diagnostics(
calibration,
timer,
fallback_reason,
start_time,
&self.config,
theta_ns,
stationarity,
);
Outcome::Inconclusive {
reason,
leak_probability,
effect,
samples_used: state.n_total(),
quality,
diagnostics,
theta_user: theta_ns,
theta_eff: calibration.theta_eff,
theta_floor: (calibration.c_floor / (state.n_total() as f64).sqrt())
.max(calibration.theta_tick),
}
}
#[allow(clippy::too_many_arguments)]
fn run_research_mode<T, F, R>(
&self,
calibration: Calibration,
calibration_baseline_cycles: &[u64],
calibration_sample_cycles: &[u64],
baseline_inputs: &[T],
sample_inputs: &[T],
n_cal: usize,
k: u32,
timer: &mut BoxedTimer,
fallback_reason: TimerFallbackReason,
operation: &mut F,
rng: &mut R,
total_samples_needed: usize,
start_time: Instant,
) -> Outcome
where
T: Clone + Hash,
F: FnMut(&T),
R: rand::Rng,
{
use crate::adaptive::{run_adaptive, AdaptiveConfig, AdaptiveOutcome};
let mut adaptive_state = AdaptiveState::with_capacity(self.config.max_samples);
adaptive_state.add_batch(
calibration_baseline_cycles.to_vec(),
calibration_sample_cycles.to_vec(),
);
let theta_ns = timer.resolution_ns();
let adaptive_config = AdaptiveConfig::with_theta(theta_ns)
.pass_threshold(0.0) .fail_threshold(1.0) .time_budget(self.config.time_budget)
.max_samples(self.config.max_samples);
let adaptive_config = AdaptiveConfig {
batch_size: self.config.batch_size,
seed: self.config.measurement_seed.unwrap_or(DEFAULT_SEED),
outlier_percentile: self.config.outlier_percentile, ..adaptive_config
};
let mut input_idx = n_cal;
loop {
if adaptive_state.elapsed() > self.config.time_budget {
return self.build_research_outcome(
ResearchStatus::BudgetExhausted,
&adaptive_state,
&calibration,
timer,
fallback_reason,
start_time,
);
}
if adaptive_state.n_total() >= self.config.max_samples {
return self.build_research_outcome(
ResearchStatus::BudgetExhausted,
&adaptive_state,
&calibration,
timer,
fallback_reason,
start_time,
);
}
let batch_size = self.config.batch_size.min(total_samples_needed - input_idx);
if batch_size == 0 {
return self.build_research_outcome(
ResearchStatus::BudgetExhausted,
&adaptive_state,
&calibration,
timer,
fallback_reason,
start_time,
);
}
let mut batch_baseline = Vec::with_capacity(batch_size);
let mut batch_sample = Vec::with_capacity(batch_size);
let mut batch_schedule: Vec<(Class, usize)> = Vec::with_capacity(batch_size * 2);
for i in 0..batch_size {
let global_idx = input_idx + i;
batch_schedule.push((Class::Baseline, global_idx));
batch_schedule.push((Class::Sample, global_idx));
}
batch_schedule.shuffle(rng);
for (class, idx) in batch_schedule {
match class {
Class::Baseline => {
let result = timer.measure_cycles(|| {
for _ in 0..k {
operation(&baseline_inputs[idx]);
std::hint::black_box(());
}
});
if let Ok(cycles) = result {
batch_baseline.push(cycles);
}
}
Class::Sample => {
let result = timer.measure_cycles(|| {
for _ in 0..k {
operation(&sample_inputs[idx]);
std::hint::black_box(());
}
});
if let Ok(cycles) = result {
batch_sample.push(cycles);
}
}
}
}
input_idx += batch_size;
adaptive_state.add_batch(batch_baseline, batch_sample);
let outcome = run_adaptive(
&calibration,
&mut adaptive_state,
1.0 / timer.cycles_per_ns(),
&adaptive_config,
);
let posterior = match &outcome {
AdaptiveOutcome::Continue { posterior, .. } => posterior,
AdaptiveOutcome::LeakDetected { posterior, .. } => posterior,
AdaptiveOutcome::NoLeakDetected { posterior, .. } => posterior,
AdaptiveOutcome::ThresholdElevated { posterior, .. } => {
posterior
}
AdaptiveOutcome::Inconclusive { reason, .. } => {
let inconclusive_reason = convert_adaptive_reason(reason);
return self.build_research_outcome(
ResearchStatus::QualityIssue(inconclusive_reason),
&adaptive_state,
&calibration,
timer,
fallback_reason,
start_time,
);
}
};
let n = adaptive_state.n_total() as f64;
let theta_floor = (calibration.c_floor / n.sqrt()).max(calibration.theta_tick);
if theta_floor <= calibration.theta_tick * 1.01 {
return self.build_research_outcome(
ResearchStatus::ResolutionLimitReached,
&adaptive_state,
&calibration,
timer,
fallback_reason,
start_time,
);
}
let max_effect_ci = compute_max_effect_ci(
&posterior.delta_post,
&posterior.lambda_post,
self.config.measurement_seed.unwrap_or(DEFAULT_SEED),
);
if max_effect_ci.ci.0 > 1.1 * theta_floor {
return self.build_research_outcome(
ResearchStatus::EffectDetected,
&adaptive_state,
&calibration,
timer,
fallback_reason,
start_time,
);
}
if max_effect_ci.ci.1 < 0.9 * theta_floor {
return self.build_research_outcome(
ResearchStatus::NoEffectDetected,
&adaptive_state,
&calibration,
timer,
fallback_reason,
start_time,
);
}
adaptive_state.update_posterior(posterior.clone());
}
}
fn build_research_outcome(
&self,
status: ResearchStatus,
state: &AdaptiveState,
calibration: &Calibration,
timer: &BoxedTimer,
fallback_reason: TimerFallbackReason,
start_time: Instant,
) -> Outcome {
let posterior = state.current_posterior();
let theta_ns = timer.resolution_ns();
let n = state.n_total() as f64;
let theta_floor = (calibration.c_floor / n.sqrt()).max(calibration.theta_tick);
let (max_effect_ns, max_effect_ci, detectable) = if let Some(p) = posterior {
let ci = compute_max_effect_ci(
&p.delta_post,
&p.lambda_post,
self.config.measurement_seed.unwrap_or(DEFAULT_SEED),
);
let detectable = ci.ci.0 > theta_floor;
(ci.mean, ci.ci, detectable)
} else {
(0.0, (0.0, 0.0), false)
};
let model_mismatch = false;
let effect = posterior
.map(|p| build_effect_estimate(p, theta_ns, calibration.batch_k))
.unwrap_or_default();
let quality = MeasurementQuality::from_mde_ns(calibration.mde_ns);
let diagnostics = build_diagnostics(
calibration,
timer,
fallback_reason,
start_time,
&self.config,
theta_ns,
None,
);
Outcome::Research(ResearchOutcome {
status,
max_effect_ns,
max_effect_ci,
theta_floor,
detectable,
model_mismatch,
effect,
samples_used: state.n_total(),
quality,
diagnostics,
})
}
pub fn analyze_raw_samples(&self, baseline_ns: &[f64], test_ns: &[f64]) -> Outcome {
use crate::adaptive::single_pass::{analyze_single_pass, SinglePassConfig};
let theta_ns = self.config.resolve_min_effect_ns();
let config = SinglePassConfig {
theta_ns,
pass_threshold: self.config.pass_threshold,
fail_threshold: self.config.fail_threshold,
bootstrap_iterations: 2000,
timer_resolution_ns: 1.0, seed: self.config.measurement_seed.unwrap_or(DEFAULT_SEED),
max_variance_ratio: 0.95,
};
let result = analyze_single_pass(baseline_ns, test_ns, &config);
result.outcome
}
pub fn analyze_timing_data(
&self,
data: &crate::data::TimingData,
cpu_freq_ghz: Option<f64>,
) -> Outcome {
let ns_per_unit = data.unit.ns_per_unit(cpu_freq_ghz);
let (baseline_ns, test_ns) = data.to_nanoseconds(ns_per_unit);
self.analyze_raw_samples(&baseline_ns, &test_ns)
}
}
use crate::adaptive::Posterior;
fn build_effect_estimate(posterior: &Posterior, _theta_ns: f64, batch_k: u32) -> EffectEstimate {
let k = batch_k.max(1) as f64;
let effect = posterior.to_effect_estimate();
EffectEstimate {
max_effect_ns: effect.max_effect_ns / k,
credible_interval_ns: (
effect.credible_interval_ns.0 / k,
effect.credible_interval_ns.1 / k,
),
top_quantiles: effect
.top_quantiles
.into_iter()
.map(|tq| crate::result::TopQuantile {
quantile_p: tq.quantile_p,
mean_ns: tq.mean_ns / k,
ci95_ns: (tq.ci95_ns.0 / k, tq.ci95_ns.1 / k),
exceed_prob: tq.exceed_prob,
})
.collect(),
}
}
#[allow(clippy::too_many_arguments)]
fn build_diagnostics(
calibration: &Calibration,
timer: &BoxedTimer,
fallback_reason: TimerFallbackReason,
start_time: Instant,
config: &Config,
theta_ns: f64,
stationarity: Option<crate::analysis::StationarityResult>,
) -> Diagnostics {
let preflight = &calibration.preflight_result;
let mut preflight_warnings = Vec::new();
for warning in &preflight.warnings.sanity {
preflight_warnings.push(warning.to_warning_info());
}
for warning in &preflight.warnings.autocorr {
preflight_warnings.push(warning.to_warning_info());
}
for warning in &preflight.warnings.resolution {
preflight_warnings.push(warning.to_warning_info());
}
let attacker_model = config.attacker_model.as_ref().map(|m| format!("{:?}", m));
let platform = format!("{}-{}", std::env::consts::OS, std::env::consts::ARCH);
let mut quality_issues = Vec::new();
if calibration.theta_ns > 0.0 && calibration.theta_eff > calibration.theta_ns {
let guidance = generate_threshold_elevated_guidance(fallback_reason);
quality_issues.push(QualityIssue {
code: IssueCode::ThresholdIssue,
message: format!(
"Threshold elevated from {:.0} ns to {:.1} ns (measurement floor)",
calibration.theta_ns, calibration.theta_eff
),
guidance,
});
}
Diagnostics {
dependence_length: calibration.block_length,
effective_sample_size: calibration.calibration_samples / calibration.block_length.max(1),
stationarity_ratio: stationarity.map(|s| s.ratio).unwrap_or(1.0),
stationarity_ok: stationarity.map(|s| s.ok).unwrap_or(true),
outlier_rate_baseline: 0.0,
outlier_rate_sample: 0.0,
outlier_asymmetry_ok: true,
discrete_mode: calibration.discrete_mode,
timer_resolution_ns: timer.resolution_ns(),
duplicate_fraction: 0.0,
preflight_ok: preflight.is_valid,
calibration_samples: calibration.calibration_samples,
total_time_secs: start_time.elapsed().as_secs_f64(),
warnings: Vec::new(),
quality_issues,
preflight_warnings,
seed: config.measurement_seed,
attacker_model,
threshold_ns: theta_ns,
timer_name: timer.name().to_string(),
platform,
timer_fallback_reason: fallback_reason.as_str().map(String::from),
gibbs_iters_total: 256,
gibbs_burnin: 64,
gibbs_retained: 192,
lambda_mean: 1.0,
lambda_sd: 0.0,
lambda_cv: 0.0,
lambda_ess: 0.0,
lambda_mixing_ok: true,
kappa_mean: 1.0,
kappa_sd: 0.0,
kappa_cv: 0.0,
kappa_ess: 0.0,
kappa_mixing_ok: true,
}
}
fn convert_adaptive_reason(reason: &AdaptiveInconclusiveReason) -> InconclusiveReason {
match reason {
AdaptiveInconclusiveReason::DataTooNoisy {
message, guidance, ..
} => InconclusiveReason::DataTooNoisy {
message: message.clone(),
guidance: guidance.clone(),
},
AdaptiveInconclusiveReason::NotLearning {
message, guidance, ..
} => InconclusiveReason::NotLearning {
message: message.clone(),
guidance: guidance.clone(),
},
AdaptiveInconclusiveReason::WouldTakeTooLong {
estimated_time_secs,
samples_needed,
guidance,
} => InconclusiveReason::WouldTakeTooLong {
estimated_time_secs: *estimated_time_secs,
samples_needed: *samples_needed,
guidance: guidance.clone(),
},
AdaptiveInconclusiveReason::TimeBudgetExceeded {
current_probability,
samples_collected,
..
} => InconclusiveReason::TimeBudgetExceeded {
current_probability: *current_probability,
samples_collected: *samples_collected,
},
AdaptiveInconclusiveReason::SampleBudgetExceeded {
current_probability,
samples_collected,
} => InconclusiveReason::SampleBudgetExceeded {
current_probability: *current_probability,
samples_collected: *samples_collected,
},
AdaptiveInconclusiveReason::ConditionsChanged {
message, guidance, ..
} => InconclusiveReason::ConditionsChanged {
message: message.clone(),
guidance: guidance.clone(),
},
AdaptiveInconclusiveReason::ThresholdElevated {
theta_user,
theta_eff,
leak_probability_at_eff,
meets_pass_criterion_at_eff,
achievable_at_max,
message,
guidance,
} => InconclusiveReason::ThresholdElevated {
theta_user: *theta_user,
theta_eff: *theta_eff,
leak_probability_at_eff: *leak_probability_at_eff,
meets_pass_criterion_at_eff: *meets_pass_criterion_at_eff,
achievable_at_max: *achievable_at_max,
message: message.clone(),
guidance: guidance.clone(),
},
}
}
fn generate_threshold_elevated_guidance(fallback_reason: TimerFallbackReason) -> String {
#[cfg(target_arch = "x86_64")]
{
let _ = fallback_reason;
"Increase max_samples to improve measurement floor, or test at a higher abstraction level."
.to_string()
}
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
{
match fallback_reason {
TimerFallbackReason::ConcurrentAccess => {
"High-precision timing is locked. If using cargo test, run with --test-threads=1.".to_string()
}
TimerFallbackReason::NoPrivileges => {
"Run with sudo to enable high-precision timing, or increase max_samples.".to_string()
}
TimerFallbackReason::CycleCounterUnavailable | TimerFallbackReason::Requested => {
"High-precision timing unavailable. Increase max_samples or test at a higher abstraction level.".to_string()
}
TimerFallbackReason::None => {
"Increase max_samples or test at a higher abstraction level.".to_string()
}
}
}
#[cfg(all(target_os = "linux", target_arch = "aarch64"))]
{
match fallback_reason {
TimerFallbackReason::NoPrivileges => {
"Run with sudo, set kernel.perf_event_paranoid=1, or grant CAP_PERFMON for high-precision timing. Or increase max_samples.".to_string()
}
TimerFallbackReason::CycleCounterUnavailable | TimerFallbackReason::Requested => {
"High-precision timing unavailable. Increase max_samples or test at a higher abstraction level.".to_string()
}
TimerFallbackReason::ConcurrentAccess | TimerFallbackReason::None => {
"Increase max_samples or test at a higher abstraction level.".to_string()
}
}
}
#[cfg(not(any(
target_arch = "x86_64",
all(target_os = "macos", target_arch = "aarch64"),
all(target_os = "linux", target_arch = "aarch64")
)))]
{
let _ = fallback_reason;
"Increase max_samples or test at a higher abstraction level.".to_string()
}
}
fn generate_unmeasurable_recommendation(fallback_reason: TimerFallbackReason) -> String {
#[cfg(target_arch = "x86_64")]
{
let _ = fallback_reason;
"This operation is too fast to measure reliably, even with cycle-accurate timing (~0.3ns). \
Consider testing at a higher abstraction level (e.g., full API calls rather than individual primitives).".to_string()
}
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
{
match fallback_reason {
TimerFallbackReason::ConcurrentAccess => {
"High-precision timing is locked by another process. \
If using cargo test, run with --test-threads=1."
.to_string()
}
TimerFallbackReason::NoPrivileges => {
"Run with sudo to enable high-precision timing (~0.3ns resolution).".to_string()
}
TimerFallbackReason::CycleCounterUnavailable | TimerFallbackReason::Requested => {
"High-precision timing unavailable. Consider testing at a higher abstraction level \
(e.g., full API calls rather than individual primitives)."
.to_string()
}
TimerFallbackReason::None => "Consider testing at a higher abstraction level \
(e.g., full API calls rather than individual primitives)."
.to_string(),
}
}
#[cfg(all(target_os = "linux", target_arch = "aarch64"))]
{
match fallback_reason {
TimerFallbackReason::NoPrivileges => {
"Run with sudo to enable high-precision timing (~0.3ns resolution). \
Alternatively, set kernel.perf_event_paranoid=1 or grant CAP_PERFMON."
.to_string()
}
TimerFallbackReason::CycleCounterUnavailable | TimerFallbackReason::Requested => {
"High-precision timing unavailable. Check kernel perf_event support, \
or test at a higher abstraction level."
.to_string()
}
TimerFallbackReason::ConcurrentAccess | TimerFallbackReason::None => {
"Consider testing at a higher abstraction level \
(e.g., full API calls rather than individual primitives)."
.to_string()
}
}
}
#[cfg(not(any(
target_arch = "x86_64",
all(target_os = "macos", target_arch = "aarch64"),
all(target_os = "linux", target_arch = "aarch64")
)))]
{
let _ = fallback_reason;
"Consider testing at a higher abstraction level \
(e.g., full API calls rather than individual primitives)."
.to_string()
}
}
pub fn compute_min_uniqueness_ratio(baseline: &[f64], sample: &[f64]) -> f64 {
use std::collections::HashSet;
let unique_baseline: HashSet<i64> = baseline.iter().map(|&v| (v * 1000.0) as i64).collect();
let unique_sample: HashSet<i64> = sample.iter().map(|&v| (v * 1000.0) as i64).collect();
let ratio_baseline = unique_baseline.len() as f64 / baseline.len().max(1) as f64;
let ratio_sample = unique_sample.len() as f64 / sample.len().max(1) as f64;
ratio_baseline.min(ratio_sample)
}
fn parse_usize_env(name: &str) -> Option<usize> {
env::var(name).ok()?.parse().ok()
}
fn parse_u64_env(name: &str) -> Option<u64> {
env::var(name).ok()?.parse().ok()
}
fn parse_f64_env(name: &str) -> Option<f64> {
env::var(name).ok()?.parse().ok()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_for_attacker() {
let oracle = TimingOracle::for_attacker(AttackerModel::AdjacentNetwork);
assert!(oracle.config.attacker_model.is_some());
}
#[test]
fn test_builder_methods() {
let oracle = TimingOracle::for_attacker(AttackerModel::AdjacentNetwork)
.time_budget_secs(30)
.max_samples(50_000)
.batch_size(500)
.pass_threshold(0.01)
.fail_threshold(0.99);
assert_eq!(oracle.config.time_budget, Duration::from_secs(30));
assert_eq!(oracle.config.max_samples, 50_000);
assert_eq!(oracle.config.batch_size, 500);
assert_eq!(oracle.config.pass_threshold, 0.01);
assert_eq!(oracle.config.fail_threshold, 0.99);
}
#[test]
fn test_compute_min_uniqueness_ratio() {
let continuous: Vec<f64> = (0..1000).map(|i| i as f64 * 0.001).collect();
let ratio = compute_min_uniqueness_ratio(&continuous, &continuous);
assert!(ratio > 0.9);
let discrete: Vec<f64> = (0..1000).map(|i| (i % 5) as f64).collect();
let ratio = compute_min_uniqueness_ratio(&discrete, &discrete);
assert!(ratio < 0.1);
}
}