use fluxbench::{Bencher, BootstrapConfig, MetricContext, compute_bootstrap, compute_summary};
use fluxbench_logic::{Severity, Verification, VerificationContext, run_verifications};
use fluxbench_stats::{
ComparisonConfig, OutlierMethod, compare_distributions, compute_cycles_stats,
};
#[test]
fn test_bencher_collects_samples() {
let mut bencher = Bencher::new(false);
for _ in 0..5 {
bencher.iter(|| {
let mut sum = 0u64;
for i in 0..1000 {
sum += i;
}
sum
});
}
bencher.start_measurement(1_000_000_000); bencher.set_iters_per_sample(1);
for _ in 0..10 {
bencher.iter(|| {
let mut sum = 0u64;
for i in 0..1000 {
sum += i;
}
sum
});
}
let samples = bencher.samples();
assert_eq!(samples.len(), 10);
for sample in samples {
assert!(sample.duration_nanos > 0);
}
}
#[test]
fn test_iter_with_setup() {
let mut bencher = Bencher::new(false);
for _ in 0..5 {
bencher.iter_with_setup(
|| vec![1u64; 1000], |v| v.iter().sum::<u64>(), );
}
assert_eq!(bencher.iteration_count(), 5);
}
#[test]
fn test_summary_statistics() {
let samples = vec![100.0, 102.0, 98.0, 101.0, 99.0, 100.0, 101.0, 99.0];
let summary = compute_summary(&samples, OutlierMethod::Iqr { k: 3 });
assert!((summary.mean - 100.0).abs() < 1.0);
assert!((summary.median - 100.0).abs() < 1.0);
assert!(summary.std_dev < 5.0);
assert_eq!(summary.min, 98.0);
assert_eq!(summary.max, 102.0);
}
#[test]
fn test_bootstrap_confidence_intervals() {
let samples: Vec<f64> = (0..100).map(|i| 100.0 + (i as f64 * 0.1)).collect();
let config = BootstrapConfig {
iterations: 1000,
confidence_level: 0.95,
..Default::default()
};
let result = compute_bootstrap(&samples, &config).unwrap();
assert!(result.confidence_interval.lower <= result.point_estimate);
assert!(result.confidence_interval.upper >= result.point_estimate);
let ci_width = result.confidence_interval.upper - result.confidence_interval.lower;
assert!(ci_width > 0.0);
assert!(ci_width < 20.0); }
#[test]
fn test_ab_comparison() {
let baseline: Vec<f64> = (0..50).map(|i| 100.0 + (i as f64 * 0.1)).collect();
let candidate: Vec<f64> = (0..50).map(|i| 150.0 + (i as f64 * 0.1)).collect();
let config = ComparisonConfig {
bootstrap_iterations: 1000,
confidence_level: 0.95,
significance_threshold: 5.0,
..Default::default()
};
let result = compare_distributions(&baseline, &candidate, &config).unwrap();
assert!(result.probability_regression > 0.9);
assert!(result.relative_change > 40.0); assert!(result.is_significant);
}
#[test]
fn test_cycles_statistics() {
let cycles: Vec<u64> = vec![3000, 3100, 2900, 3050, 2950];
let nanos: Vec<f64> = vec![1000.0, 1033.0, 967.0, 1017.0, 983.0];
let stats = compute_cycles_stats(&cycles, &nanos);
assert!((stats.mean_cycles - 3000.0).abs() < 100.0);
assert_eq!(stats.min_cycles, 2900);
assert_eq!(stats.max_cycles, 3100);
assert!((stats.cycles_per_ns - 3.0).abs() < 0.5);
}
#[test]
fn test_verification_expression() {
let mut context = MetricContext::new();
context.set("fast_bench", 100.0);
context.set("slow_bench", 500.0);
let verifications = vec![
Verification {
id: "fast_is_fast".to_string(),
expression: "fast_bench < 200".to_string(),
severity: Severity::Critical,
margin: 0.0,
},
Verification {
id: "fast_beats_slow".to_string(),
expression: "fast_bench < slow_bench".to_string(),
severity: Severity::Warning,
margin: 0.0,
},
];
let verification_context = VerificationContext::new(&context, Default::default());
let results = run_verifications(&verifications, &verification_context);
assert_eq!(results.len(), 2);
assert!(results.iter().all(|r| r.passed()));
}
#[test]
fn test_verification_failure() {
let mut context = MetricContext::new();
context.set("my_bench", 1000.0);
let verifications = vec![Verification {
id: "too_slow".to_string(),
expression: "my_bench < 100".to_string(), severity: Severity::Critical,
margin: 0.0,
}];
let verification_context = VerificationContext::new(&context, Default::default());
let results = run_verifications(&verifications, &verification_context);
assert_eq!(results.len(), 1);
assert!(!results[0].passed());
}
#[test]
fn test_outlier_preserves_tail() {
let mut samples: Vec<f64> = (0..90).map(|i| 100.0 + (i as f64 * 0.1)).collect();
samples.push(500.0); samples.push(600.0); samples.push(700.0); samples.push(800.0); samples.push(1000.0);
let summary = compute_summary(&samples, OutlierMethod::Iqr { k: 3 });
assert!(summary.mean < 200.0);
assert_eq!(summary.max, 1000.0);
assert!(summary.outlier_count > 0);
}
#[test]
fn test_effect_size_interpretation() {
use fluxbench_stats::EffectInterpretation;
let baseline: Vec<f64> = (0..100).map(|i| 100.0 + (i as f64 % 10.0)).collect();
let small_diff: Vec<f64> = (0..100).map(|i| 110.0 + (i as f64 % 10.0)).collect(); let large_diff: Vec<f64> = (0..100).map(|i| 200.0 + (i as f64 % 10.0)).collect();
let config = ComparisonConfig {
bootstrap_iterations: 100,
..Default::default()
};
let small_result = compare_distributions(&baseline, &small_diff, &config).unwrap();
let large_result = compare_distributions(&baseline, &large_diff, &config).unwrap();
assert_eq!(
large_result.effect_interpretation,
EffectInterpretation::Large
);
assert!(large_result.effect_size.abs() > small_result.effect_size.abs());
}
#[test]
fn test_benchmark_result_finalization() {
let mut bencher = Bencher::new(true);
for _ in 0..3 {
bencher.iter(|| {
let v: Vec<u64> = (0..100).collect();
v.len()
});
}
bencher.start_measurement(1_000_000_000);
bencher.set_iters_per_sample(1);
for _ in 0..3 {
bencher.iter(|| {
let v: Vec<u64> = (0..100).collect();
v.len()
});
}
let result = bencher.finish();
assert_eq!(result.iterations, 6); assert_eq!(result.samples.len(), 3); assert!(result.total_time_ns > 0);
}
#[test]
fn test_improvement_detection() {
let baseline: Vec<f64> = (0..50).map(|i| 200.0 + (i as f64 * 0.1)).collect();
let candidate: Vec<f64> = (0..50).map(|i| 100.0 + (i as f64 * 0.1)).collect();
let config = ComparisonConfig {
bootstrap_iterations: 1000,
..Default::default()
};
let result = compare_distributions(&baseline, &candidate, &config).unwrap();
assert!(result.probability_regression < 0.1);
assert!(result.relative_change < -40.0); assert!(result.is_significant);
}