use crate::error::Result;
#[cfg(not(feature = "std"))]
use alloc::{
format,
string::{String, ToString},
vec,
vec::Vec,
};
#[cfg(feature = "std")]
use std::time::Instant;
#[cfg(feature = "std")]
use std::collections::HashMap;
#[cfg(not(feature = "std"))]
use alloc::collections::BTreeMap as HashMap;
#[derive(Debug, Clone)]
pub struct PerfMeasurement {
pub operation: String,
pub duration_ns: u64,
pub throughput: Option<f64>,
pub memory_bytes: Option<usize>,
pub metadata: HashMap<String, String>,
}
impl PerfMeasurement {
pub fn new(operation: String, duration_ns: u64) -> Self {
Self {
operation,
duration_ns,
throughput: None,
memory_bytes: None,
metadata: HashMap::new(),
}
}
pub fn with_throughput(mut self, ops_per_sec: f64) -> Self {
self.throughput = Some(ops_per_sec);
self
}
pub fn with_memory(mut self, bytes: usize) -> Self {
self.memory_bytes = Some(bytes);
self
}
pub fn with_metadata(mut self, key: String, value: String) -> Self {
self.metadata.insert(key, value);
self
}
pub fn duration_ms(&self) -> f64 {
self.duration_ns as f64 / 1_000_000.0
}
pub fn duration_secs(&self) -> f64 {
self.duration_ns as f64 / 1_000_000_000.0
}
}
#[derive(Debug, Clone)]
pub struct PerfStatistics {
pub count: usize,
pub mean_ns: f64,
pub median_ns: f64,
pub std_dev_ns: f64,
pub min_ns: u64,
pub max_ns: u64,
pub p95_ns: f64,
pub p99_ns: f64,
}
impl PerfStatistics {
pub fn from_measurements(measurements: &[PerfMeasurement]) -> Self {
if measurements.is_empty() {
return Self {
count: 0,
mean_ns: 0.0,
median_ns: 0.0,
std_dev_ns: 0.0,
min_ns: 0,
max_ns: 0,
p95_ns: 0.0,
p99_ns: 0.0,
};
}
let mut durations: Vec<u64> = measurements.iter().map(|m| m.duration_ns).collect();
durations.sort_unstable();
let count = durations.len();
let sum: u64 = durations.iter().sum();
let mean = sum as f64 / count as f64;
let median = if count % 2 == 0 {
(durations[count / 2 - 1] + durations[count / 2]) as f64 / 2.0
} else {
durations[count / 2] as f64
};
let variance = durations
.iter()
.map(|&d| {
let diff = d as f64 - mean;
diff * diff
})
.sum::<f64>()
/ count as f64;
let std_dev = variance.sqrt();
let min = durations[0];
let max = durations[count - 1];
let p95_idx = ((count as f64 * 0.95) as usize).min(count - 1);
let p99_idx = ((count as f64 * 0.99) as usize).min(count - 1);
Self {
count,
mean_ns: mean,
median_ns: median,
std_dev_ns: std_dev,
min_ns: min,
max_ns: max,
p95_ns: durations[p95_idx] as f64,
p99_ns: durations[p99_idx] as f64,
}
}
pub fn coefficient_of_variation(&self) -> f64 {
if self.mean_ns > 0.0 {
self.std_dev_ns / self.mean_ns
} else {
0.0
}
}
pub fn is_stable(&self, cv_threshold: f64) -> bool {
self.coefficient_of_variation() < cv_threshold
}
}
#[derive(Debug, Clone)]
pub struct PerfBaseline {
pub operation: String,
pub platform: String,
pub version: String,
pub statistics: PerfStatistics,
pub timestamp: u64,
}
impl PerfBaseline {
pub fn new(
operation: String,
platform: String,
version: String,
statistics: PerfStatistics,
) -> Self {
Self {
operation,
platform,
version,
statistics,
timestamp: current_timestamp(),
}
}
pub fn compare(&self, current: &PerfStatistics) -> PerfComparison {
let mean_ratio = current.mean_ns / self.statistics.mean_ns;
let median_ratio = current.median_ns / self.statistics.median_ns;
let p95_ratio = current.p95_ns / self.statistics.p95_ns;
let mean_change_pct = (mean_ratio - 1.0) * 100.0;
let median_change_pct = (median_ratio - 1.0) * 100.0;
let p95_change_pct = (p95_ratio - 1.0) * 100.0;
PerfComparison {
baseline: self.clone(),
current: current.clone(),
mean_ratio,
median_ratio,
p95_ratio,
mean_change_pct,
median_change_pct,
p95_change_pct,
}
}
}
#[derive(Debug, Clone)]
pub struct PerfComparison {
pub baseline: PerfBaseline,
pub current: PerfStatistics,
pub mean_ratio: f64,
pub median_ratio: f64,
pub p95_ratio: f64,
pub mean_change_pct: f64,
pub median_change_pct: f64,
pub p95_change_pct: f64,
}
impl PerfComparison {
pub fn is_regression(&self, threshold_pct: f64) -> bool {
self.mean_change_pct > threshold_pct
|| self.median_change_pct > threshold_pct
|| self.p95_change_pct > threshold_pct
}
pub fn is_improvement(&self, threshold_pct: f64) -> bool {
const EPSILON: f64 = 1e-10;
self.mean_change_pct <= -threshold_pct + EPSILON
&& self.median_change_pct <= -threshold_pct + EPSILON
&& self.p95_change_pct <= -threshold_pct + EPSILON
}
pub fn regression_severity(&self) -> RegressionSeverity {
let max_change = self
.mean_change_pct
.max(self.median_change_pct)
.max(self.p95_change_pct);
if max_change < 5.0 {
RegressionSeverity::None
} else if max_change < 10.0 {
RegressionSeverity::Minor
} else if max_change < 25.0 {
RegressionSeverity::Moderate
} else if max_change < 50.0 {
RegressionSeverity::Major
} else {
RegressionSeverity::Critical
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum RegressionSeverity {
None,
Minor, Moderate, Major, Critical, }
impl RegressionSeverity {
pub fn description(&self) -> &str {
match self {
Self::None => "No regression",
Self::Minor => "Minor regression (5-10%)",
Self::Moderate => "Moderate regression (10-25%)",
Self::Major => "Major regression (25-50%)",
Self::Critical => "Critical regression (>50%)",
}
}
}
#[derive(Debug, Clone)]
pub struct RegressionTracker {
baselines: HashMap<String, HashMap<String, PerfBaseline>>,
threshold_pct: f64,
platform: String,
version: String,
}
impl RegressionTracker {
pub fn new(platform: String, version: String) -> Self {
Self {
baselines: HashMap::new(),
threshold_pct: 10.0, platform,
version,
}
}
pub fn with_threshold(mut self, threshold_pct: f64) -> Self {
self.threshold_pct = threshold_pct;
self
}
pub fn add_baseline(&mut self, baseline: PerfBaseline) {
self.baselines
.entry(baseline.operation.clone())
.or_insert_with(HashMap::new)
.insert(baseline.platform.clone(), baseline);
}
pub fn check_regression(
&self,
operation: &str,
current: &PerfStatistics,
) -> Option<PerfComparison> {
let baseline = self
.baselines
.get(operation)
.and_then(|platforms| platforms.get(&self.platform))?;
Some(baseline.compare(current))
}
pub fn find_regressions(
&self,
measurements: &[(String, PerfStatistics)],
) -> Vec<PerfComparison> {
let mut regressions = Vec::new();
for (operation, stats) in measurements {
if let Some(comparison) = self.check_regression(operation, stats) {
if comparison.is_regression(self.threshold_pct) {
regressions.push(comparison);
}
}
}
regressions
}
pub fn generate_report(&self, measurements: &[(String, PerfStatistics)]) -> RegressionReport {
let comparisons: Vec<_> = measurements
.iter()
.filter_map(|(op, stats)| self.check_regression(op, stats))
.collect();
let regressions: Vec<_> = comparisons
.iter()
.filter(|c| c.is_regression(self.threshold_pct))
.cloned()
.collect();
let improvements: Vec<_> = comparisons
.iter()
.filter(|c| c.is_improvement(self.threshold_pct))
.cloned()
.collect();
RegressionReport {
platform: self.platform.clone(),
version: self.version.clone(),
total_operations: measurements.len(),
regressions,
improvements,
threshold_pct: self.threshold_pct,
timestamp: current_timestamp(),
}
}
}
#[derive(Debug, Clone)]
pub struct RegressionReport {
pub platform: String,
pub version: String,
pub total_operations: usize,
pub regressions: Vec<PerfComparison>,
pub improvements: Vec<PerfComparison>,
pub threshold_pct: f64,
pub timestamp: u64,
}
impl RegressionReport {
pub fn has_regressions(&self) -> bool {
!self.regressions.is_empty()
}
pub fn worst_regression(&self) -> Option<&PerfComparison> {
self.regressions.iter().max_by(|a, b| {
a.mean_change_pct
.partial_cmp(&b.mean_change_pct)
.expect("mean_change_pct should be comparable (no NaN)")
})
}
pub fn best_improvement(&self) -> Option<&PerfComparison> {
self.improvements.iter().min_by(|a, b| {
a.mean_change_pct
.partial_cmp(&b.mean_change_pct)
.expect("mean_change_pct should be comparable (no NaN)")
})
}
pub fn regressions_by_severity(&self) -> HashMap<RegressionSeverity, usize> {
let mut counts = HashMap::new();
for regression in &self.regressions {
let severity = regression.regression_severity();
*counts.entry(severity).or_insert(0) += 1;
}
counts
}
}
#[cfg(feature = "std")]
pub struct BenchmarkRunner {
warmup_iterations: usize,
measurement_iterations: usize,
min_samples: usize,
}
#[cfg(feature = "std")]
impl BenchmarkRunner {
pub fn new() -> Self {
Self {
warmup_iterations: 10,
measurement_iterations: 100,
min_samples: 50,
}
}
pub fn with_warmup(mut self, iterations: usize) -> Self {
self.warmup_iterations = iterations;
self
}
pub fn with_iterations(mut self, iterations: usize) -> Self {
self.measurement_iterations = iterations;
self
}
pub fn run<F>(&self, operation: &str, mut f: F) -> Result<PerfMeasurement>
where
F: FnMut() -> Result<()>,
{
for _ in 0..self.warmup_iterations {
f()?;
}
let start = Instant::now();
for _ in 0..self.measurement_iterations {
f()?;
}
let duration = start.elapsed();
let duration_ns = duration.as_nanos() as u64;
let avg_duration_ns = duration_ns / self.measurement_iterations as u64;
let throughput = 1_000_000_000.0 / avg_duration_ns as f64;
Ok(
PerfMeasurement::new(operation.to_string(), avg_duration_ns)
.with_throughput(throughput),
)
}
pub fn run_with_samples<F>(&self, operation: &str, mut f: F) -> Result<Vec<PerfMeasurement>>
where
F: FnMut() -> Result<()>,
{
let mut measurements = Vec::with_capacity(self.min_samples);
for _ in 0..self.min_samples {
measurements.push(self.run(operation, &mut f)?);
}
Ok(measurements)
}
}
#[cfg(feature = "std")]
impl Default for BenchmarkRunner {
fn default() -> Self {
Self::new()
}
}
fn current_timestamp() -> u64 {
#[cfg(feature = "std")]
{
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("system time should be after UNIX epoch")
.as_secs()
}
#[cfg(not(feature = "std"))]
{
0 }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_perf_measurement() {
let measurement = PerfMeasurement::new("test_op".to_string(), 1_000_000)
.with_throughput(1000.0)
.with_memory(1024);
assert_eq!(measurement.duration_ms(), 1.0);
assert_eq!(measurement.throughput, Some(1000.0));
assert_eq!(measurement.memory_bytes, Some(1024));
}
#[test]
fn test_perf_statistics() {
let measurements = vec![
PerfMeasurement::new("op".to_string(), 100),
PerfMeasurement::new("op".to_string(), 200),
PerfMeasurement::new("op".to_string(), 150),
PerfMeasurement::new("op".to_string(), 180),
PerfMeasurement::new("op".to_string(), 120),
];
let stats = PerfStatistics::from_measurements(&measurements);
assert_eq!(stats.count, 5);
assert_eq!(stats.min_ns, 100);
assert_eq!(stats.max_ns, 200);
assert_eq!(stats.median_ns, 150.0);
}
#[test]
fn test_regression_detection() {
let baseline_stats = PerfStatistics {
count: 100,
mean_ns: 1000.0,
median_ns: 1000.0,
std_dev_ns: 50.0,
min_ns: 900,
max_ns: 1100,
p95_ns: 1050.0,
p99_ns: 1080.0,
};
let baseline = PerfBaseline::new(
"test_op".to_string(),
"test_platform".to_string(),
"1.0.0".to_string(),
baseline_stats,
);
let current_stats = PerfStatistics {
count: 100,
mean_ns: 1200.0, median_ns: 1200.0,
std_dev_ns: 60.0,
min_ns: 1000,
max_ns: 1300,
p95_ns: 1260.0,
p99_ns: 1290.0,
};
let comparison = baseline.compare(¤t_stats);
assert!(comparison.is_regression(10.0));
assert_eq!(
comparison.regression_severity(),
RegressionSeverity::Moderate
);
}
#[test]
fn test_regression_tracker() {
let mut tracker = RegressionTracker::new("test_platform".to_string(), "1.0.0".to_string())
.with_threshold(15.0);
let baseline_stats = PerfStatistics {
count: 100,
mean_ns: 1000.0,
median_ns: 1000.0,
std_dev_ns: 50.0,
min_ns: 900,
max_ns: 1100,
p95_ns: 1050.0,
p99_ns: 1080.0,
};
let baseline = PerfBaseline::new(
"test_op".to_string(),
"test_platform".to_string(),
"0.9.0".to_string(),
baseline_stats,
);
tracker.add_baseline(baseline);
let current_stats = PerfStatistics {
count: 100,
mean_ns: 1100.0, median_ns: 1100.0,
std_dev_ns: 55.0,
min_ns: 950,
max_ns: 1200,
p95_ns: 1155.0,
p99_ns: 1188.0,
};
let comparison = tracker
.check_regression("test_op", ¤t_stats)
.expect("check_regression should succeed");
assert!(!comparison.is_regression(15.0));
let worse_stats = PerfStatistics {
count: 100,
mean_ns: 1200.0, median_ns: 1200.0,
std_dev_ns: 60.0,
min_ns: 1000,
max_ns: 1300,
p95_ns: 1260.0,
p99_ns: 1290.0,
};
let comparison = tracker
.check_regression("test_op", &worse_stats)
.expect("check_regression should succeed");
assert!(comparison.is_regression(15.0));
}
#[test]
fn test_regression_report() {
let mut tracker = RegressionTracker::new("test_platform".to_string(), "1.0.0".to_string());
let baseline_stats = PerfStatistics {
count: 100,
mean_ns: 1000.0,
median_ns: 1000.0,
std_dev_ns: 50.0,
min_ns: 900,
max_ns: 1100,
p95_ns: 1050.0,
p99_ns: 1080.0,
};
tracker.add_baseline(PerfBaseline::new(
"op1".to_string(),
"test_platform".to_string(),
"0.9.0".to_string(),
baseline_stats.clone(),
));
tracker.add_baseline(PerfBaseline::new(
"op2".to_string(),
"test_platform".to_string(),
"0.9.0".to_string(),
baseline_stats,
));
let measurements = vec![
(
"op1".to_string(),
PerfStatistics {
count: 100,
mean_ns: 1200.0, median_ns: 1200.0,
std_dev_ns: 60.0,
min_ns: 1000,
max_ns: 1300,
p95_ns: 1260.0,
p99_ns: 1290.0,
},
),
(
"op2".to_string(),
PerfStatistics {
count: 100,
mean_ns: 900.0, median_ns: 900.0,
std_dev_ns: 45.0,
min_ns: 800,
max_ns: 1000,
p95_ns: 945.0,
p99_ns: 972.0,
},
),
];
let report = tracker.generate_report(&measurements);
assert!(report.has_regressions());
assert_eq!(report.regressions.len(), 1);
assert_eq!(report.improvements.len(), 1);
assert!(report.worst_regression().is_some());
assert!(report.best_improvement().is_some());
}
}