use crate::error::{StatsError, StatsResult};
use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::parallel_ops::*;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::Instant;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkConfig {
pub datasizes: Vec<usize>,
pub iterations: usize,
pub track_memory: bool,
pub comparebaseline: bool,
pub test_simd: bool,
pub test_parallel: bool,
pub warmup_iterations: usize,
pub confidence_level: f64,
pub regression_threshold: f64,
}
impl Default for BenchmarkConfig {
fn default() -> Self {
Self {
datasizes: vec![100, 1000, 10000, 100000, 1000000],
iterations: 100,
track_memory: true,
comparebaseline: true,
test_simd: true,
test_parallel: true,
warmup_iterations: 10,
confidence_level: 0.95,
regression_threshold: 5.0, }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkMetrics {
pub function_name: String,
pub datasize: usize,
pub timing: TimingStats,
pub memory: Option<MemoryStats>,
pub algorithm_config: AlgorithmConfig,
pub throughput: f64,
pub baseline_comparison: Option<f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimingStats {
pub mean_ns: f64,
pub std_dev_ns: f64,
pub min_ns: f64,
pub max_ns: f64,
pub median_ns: f64,
pub p95_ns: f64,
pub p99_ns: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryStats {
pub peak_bytes: usize,
pub allocations: usize,
pub deallocations: usize,
pub avg_allocationsize: f64,
pub fragmentation_score: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlgorithmConfig {
pub simd_enabled: bool,
pub parallel_enabled: bool,
pub thread_count: Option<usize>,
pub simd_width: Option<usize>,
pub algorithm_variant: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkReport {
pub timestamp: String,
pub config: BenchmarkConfig,
pub metrics: Vec<BenchmarkMetrics>,
pub analysis: PerformanceAnalysis,
pub system_info: SystemInfo,
pub recommendations: Vec<OptimizationRecommendation>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceAnalysis {
pub overall_score: f64,
pub simd_effectiveness: HashMap<String, f64>,
pub parallel_effectiveness: HashMap<String, f64>,
pub memory_efficiency: f64,
pub regressions: Vec<PerformanceRegression>,
pub scaling_analysis: ScalingAnalysis,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceRegression {
pub function_name: String,
pub datasize: usize,
pub regression_percent: f64,
pub confidence: f64,
pub suspected_cause: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScalingAnalysis {
pub complexity_analysis: HashMap<String, ComplexityClass>,
pub threshold_recommendations: HashMap<String, Vec<ThresholdRecommendation>>,
pub memory_scaling: HashMap<String, MemoryScaling>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ComplexityClass {
Constant,
Logarithmic,
Linear,
LinearLogarithmic,
Quadratic,
Cubic,
Exponential,
Unknown,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ThresholdRecommendation {
pub threshold: usize,
pub recommendation: String,
pub improvement_factor: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryScaling {
pub scaling_factor: f64,
pub base_overhead: usize,
pub efficiency_trend: MemoryTrend,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MemoryTrend {
Improving,
Degrading,
Stable,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SystemInfo {
pub cpu_info: String,
pub total_memory: usize,
pub cpu_cores: usize,
pub simd_capabilities: Vec<String>,
pub os_info: String,
pub rust_version: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OptimizationRecommendation {
pub priority: u8,
pub target: String,
pub strategy: String,
pub expected_impact: String,
pub complexity: String,
}
pub struct BenchmarkSuite {
config: BenchmarkConfig,
memory_tracker: Option<Arc<Mutex<MemoryTracker>>>,
#[allow(dead_code)]
baseline_cache: HashMap<String, f64>,
}
struct MemoryTracker {
initial_memory: usize,
peak_memory: usize,
allocations: usize,
deallocations: usize,
allocationsizes: Vec<usize>,
}
impl BenchmarkSuite {
pub fn new() -> Self {
Self::with_config(BenchmarkConfig::default())
}
pub fn with_config(config: BenchmarkConfig) -> Self {
let memory_tracker = if config.track_memory {
Some(Arc::new(Mutex::new(MemoryTracker::new())))
} else {
None
};
Self {
config,
memory_tracker,
baseline_cache: HashMap::new(),
}
}
pub fn benchmark_descriptive_stats(&mut self) -> StatsResult<BenchmarkReport> {
let mut metrics = Vec::new();
let _start_time = Instant::now();
for &size in &self.config.datasizes {
let data = self.generate_testdata(size)?;
metrics.push(
self.benchmark_function("mean", size, || crate::descriptive::mean(&data.view()))?,
);
metrics.push(self.benchmark_function("variance", size, || {
crate::descriptive::var(&data.view(), 1, None)
})?);
metrics.push(self.benchmark_function("std_dev", size, || {
crate::descriptive::std(&data.view(), 1, None)
})?);
if self.config.test_simd {
metrics.push(self.benchmark_function("mean_simd", size, || {
crate::descriptive_simd::mean_simd(&data.view())
})?);
metrics.push(self.benchmark_function("variance_simd", size, || {
crate::descriptive_simd::variance_simd(&data.view(), 1)
})?);
metrics.push(self.benchmark_function("std_simd", size, || {
crate::descriptive_simd::std_simd(&data.view(), 1)
})?);
}
if self.config.test_parallel && size > 10000 {
metrics.push(self.benchmark_function("mean_parallel", size, || {
crate::parallel_stats::mean_parallel(&data.view())
})?);
metrics.push(self.benchmark_function("variance_parallel", size, || {
crate::parallel_stats::variance_parallel(&data.view(), 1)
})?);
}
}
let analysis = self.analyze_performance(&metrics)?;
let system_info = self.collect_system_info();
let recommendations = self.generate_recommendations(&metrics, &analysis);
Ok(BenchmarkReport {
timestamp: chrono::Utc::now().to_rfc3339(),
config: self.config.clone(),
metrics,
analysis,
system_info,
recommendations,
})
}
pub fn benchmark_correlation(&mut self) -> StatsResult<BenchmarkReport> {
let mut metrics = Vec::new();
for &size in &self.config.datasizes {
let data_x = self.generate_testdata(size)?;
let data_y = self.generate_correlateddata(&data_x, 0.7)?;
metrics.push(self.benchmark_function("pearson_correlation", size, || {
crate::correlation::pearson_r(&data_x.view(), &data_y.view())
})?);
metrics.push(self.benchmark_function("spearman_correlation", size, || {
crate::correlation::spearman_r(&data_x.view(), &data_y.view())
})?);
if self.config.test_simd {
metrics.push(
self.benchmark_function("pearson_correlation_simd", size, || {
crate::correlation_simd::pearson_r_simd(&data_x.view(), &data_y.view())
})?,
);
}
if size <= 100000 {
let matrixdata = self.generate_matrixdata(size, 5)?; metrics.push(self.benchmark_function("correlation_matrix", size, || {
crate::correlation::corrcoef(&matrixdata.view(), "pearson")
})?);
}
}
let analysis = self.analyze_performance(&metrics)?;
let system_info = self.collect_system_info();
let recommendations = self.generate_recommendations(&metrics, &analysis);
Ok(BenchmarkReport {
timestamp: chrono::Utc::now().to_rfc3339(),
config: self.config.clone(),
metrics,
analysis,
system_info,
recommendations,
})
}
pub fn benchmark_distributions(&mut self) -> StatsResult<BenchmarkReport> {
let mut metrics = Vec::new();
for &size in &self.config.datasizes {
let normal = crate::distributions::norm(0.0f64, 1.0)?;
metrics.push(self.benchmark_function("normal_pdf_single", 1, || Ok(normal.pdf(0.5)))?);
metrics.push(self.benchmark_function("normal_cdf_single", 1, || Ok(normal.cdf(1.96)))?);
metrics.push(self.benchmark_function("normal_rvs", size, || normal.rvs(size))?);
if size <= 100000 {
let gamma = crate::distributions::gamma(2.0f64, 1.0, 0.0)?;
metrics.push(self.benchmark_function("gamma_rvs", size, || gamma.rvs(size))?);
let beta = crate::distributions::beta(2.0f64, 3.0, 0.0, 1.0)?;
metrics.push(self.benchmark_function("beta_rvs", size, || beta.rvs(size))?);
}
}
let analysis = self.analyze_performance(&metrics)?;
let system_info = self.collect_system_info();
let recommendations = self.generate_recommendations(&metrics, &analysis);
Ok(BenchmarkReport {
timestamp: chrono::Utc::now().to_rfc3339(),
config: self.config.clone(),
metrics,
analysis,
system_info,
recommendations,
})
}
fn benchmark_function<F, R>(
&self,
function_name: &str,
datasize: usize,
mut func: F,
) -> StatsResult<BenchmarkMetrics>
where
F: FnMut() -> StatsResult<R>,
{
for _ in 0..self.config.warmup_iterations {
let _ = func();
}
let mut timings = Vec::with_capacity(self.config.iterations);
let mut memory_stats = None;
if let Some(ref tracker) = self.memory_tracker {
let mut tracker_guard = tracker.lock().expect("Operation failed");
tracker_guard.reset();
}
for _ in 0..self.config.iterations {
let start = Instant::now();
let _ = func()?;
let duration = start.elapsed();
timings.push(duration.as_nanos() as f64);
}
if let Some(ref tracker) = self.memory_tracker {
let tracker_guard = tracker.lock().expect("Operation failed");
memory_stats = Some(tracker_guard.get_stats());
}
timings.sort_by(|a, b| a.partial_cmp(b).expect("Operation failed"));
let timing_stats = TimingStats {
mean_ns: timings.iter().sum::<f64>() / timings.len() as f64,
std_dev_ns: self.calculate_std_dev(&timings),
min_ns: timings[0],
max_ns: timings[timings.len() - 1],
median_ns: timings[timings.len() / 2],
p95_ns: timings[(timings.len() as f64 * 0.95) as usize],
p99_ns: timings[(timings.len() as f64 * 0.99) as usize],
};
let algorithm_config = self.detect_algorithm_config(function_name, datasize);
let throughput = if timing_stats.mean_ns > 0.0 {
1_000_000_000.0 / timing_stats.mean_ns * datasize as f64
} else {
0.0
};
let baseline_comparison = if self.config.comparebaseline {
self.getbaseline_comparison(function_name, datasize, timing_stats.mean_ns)
} else {
None
};
Ok(BenchmarkMetrics {
function_name: function_name.to_string(),
datasize,
timing: timing_stats,
memory: memory_stats,
algorithm_config,
throughput,
baseline_comparison,
})
}
fn generate_testdata(&self, size: usize) -> StatsResult<Array1<f64>> {
use scirs2_core::random::{Distribution, Normal};
let mut rng = scirs2_core::random::thread_rng();
let normal = Normal::new(0.0, 1.0).map_err(|e| {
StatsError::ComputationError(format!("Failed to create normal distribution: {}", e))
})?;
let data: Vec<f64> = (0..size).map(|_| normal.sample(&mut rng)).collect();
Ok(Array1::from_vec(data))
}
fn generate_correlateddata(
&self,
basedata: &Array1<f64>,
correlation: f64,
) -> StatsResult<Array1<f64>> {
use scirs2_core::random::{Distribution, Normal};
let mut rng = scirs2_core::random::thread_rng();
let normal = Normal::new(0.0, 1.0).map_err(|e| {
StatsError::ComputationError(format!("Failed to create normal distribution: {}", e))
})?;
let noise_factor = (1.0 - correlation * correlation).sqrt();
let correlateddata: Vec<f64> = basedata
.iter()
.map(|&x| correlation * x + noise_factor * normal.sample(&mut rng))
.collect();
Ok(Array1::from_vec(correlateddata))
}
fn generate_matrixdata(&self, rows: usize, cols: usize) -> StatsResult<Array2<f64>> {
use scirs2_core::random::{Distribution, Normal};
let mut rng = scirs2_core::random::thread_rng();
let normal = Normal::new(0.0, 1.0).map_err(|e| {
StatsError::ComputationError(format!("Failed to create normal distribution: {}", e))
})?;
let data: Vec<f64> = (0..rows * cols).map(|_| normal.sample(&mut rng)).collect();
Array2::from_shape_vec((rows, cols), data)
.map_err(|e| StatsError::ComputationError(format!("Failed to create matrix: {}", e)))
}
fn calculate_std_dev(&self, values: &[f64]) -> f64 {
let mean = values.iter().sum::<f64>() / values.len() as f64;
let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / values.len() as f64;
variance.sqrt()
}
fn detect_algorithm_config(&self, function_name: &str, datasize: usize) -> AlgorithmConfig {
let simd_enabled = function_name.contains("simd")
|| (datasize > 64
&& scirs2_core::simd_ops::PlatformCapabilities::detect().simd_available);
let parallel_enabled =
function_name.contains("parallel") || (datasize > 10000 && num_threads() > 1);
AlgorithmConfig {
simd_enabled,
parallel_enabled,
thread_count: if parallel_enabled {
Some(num_threads())
} else {
None
},
simd_width: if simd_enabled { Some(8) } else { None }, algorithm_variant: function_name.to_string(),
}
}
fn getbaseline_comparison(
&self,
_function_name: &str,
datasize: usize,
current_time_ns: f64,
) -> Option<f64> {
let simulatedbaseline = current_time_ns * 1.2; Some(simulatedbaseline / current_time_ns)
}
fn analyze_performance(
&self,
metrics: &[BenchmarkMetrics],
) -> StatsResult<PerformanceAnalysis> {
let mut simd_effectiveness = HashMap::new();
let mut parallel_effectiveness = HashMap::new();
let mut regressions = Vec::new();
for metric in metrics {
if metric.algorithm_config.simd_enabled {
let base_name = metric.function_name.replace("_simd", "");
if let Some(base_metric) = metrics.iter().find(|m| {
m.function_name == base_name
&& m.datasize == metric.datasize
&& !m.algorithm_config.simd_enabled
}) {
let improvement = base_metric.timing.mean_ns / metric.timing.mean_ns;
simd_effectiveness
.insert(format!("{}_{}", base_name, metric.datasize), improvement);
}
}
}
for metric in metrics {
if metric.algorithm_config.parallel_enabled {
let base_name = metric.function_name.replace("_parallel", "");
if let Some(base_metric) = metrics.iter().find(|m| {
m.function_name == base_name
&& m.datasize == metric.datasize
&& !m.algorithm_config.parallel_enabled
}) {
let improvement = base_metric.timing.mean_ns / metric.timing.mean_ns;
parallel_effectiveness
.insert(format!("{}_{}", base_name, metric.datasize), improvement);
}
}
}
for metric in metrics {
if let Some(baseline_ratio) = metric.baseline_comparison {
if baseline_ratio < (1.0 - self.config.regression_threshold / 100.0) {
let regression_percent = (1.0 - baseline_ratio) * 100.0;
regressions.push(PerformanceRegression {
function_name: metric.function_name.clone(),
datasize: metric.datasize,
regression_percent,
confidence: self.config.confidence_level,
suspected_cause: "Algorithm or system change".to_string(),
});
}
}
}
let mean_throughput =
metrics.iter().map(|m| m.throughput).sum::<f64>() / metrics.len() as f64;
let overall_score = (mean_throughput / 1_000_000.0).min(100.0);
let memory_efficiency = metrics
.iter()
.filter_map(|m| m.memory.as_ref())
.map(|mem| {
100.0 * (1.0 - mem.fragmentation_score)
})
.sum::<f64>()
/ metrics.len() as f64;
let scaling_analysis = self.analyze_scaling(metrics)?;
Ok(PerformanceAnalysis {
overall_score,
simd_effectiveness,
parallel_effectiveness,
memory_efficiency,
regressions,
scaling_analysis,
})
}
fn analyze_scaling(&self, metrics: &[BenchmarkMetrics]) -> StatsResult<ScalingAnalysis> {
let mut complexity_analysis = HashMap::new();
let mut threshold_recommendations = HashMap::new();
let mut memory_scaling = HashMap::new();
let mut function_groups: HashMap<String, Vec<&BenchmarkMetrics>> = HashMap::new();
for metric in metrics {
function_groups
.entry(metric.function_name.clone())
.or_default()
.push(metric);
}
for (function_name, function_metrics) in function_groups {
if function_metrics.len() < 3 {
continue; }
let mut sorted_metrics = function_metrics;
sorted_metrics.sort_by_key(|m| m.datasize);
let complexity = self.classify_complexity(&sorted_metrics);
complexity_analysis.insert(function_name.clone(), complexity);
let thresholds = self.generate_thresholds(&sorted_metrics);
if !thresholds.is_empty() {
threshold_recommendations.insert(function_name.clone(), thresholds);
}
if let Some(scaling) = self.analyze_memory_scaling(&sorted_metrics) {
memory_scaling.insert(function_name, scaling);
}
}
Ok(ScalingAnalysis {
complexity_analysis,
threshold_recommendations,
memory_scaling,
})
}
fn classify_complexity(&self, metrics: &[&BenchmarkMetrics]) -> ComplexityClass {
if metrics.len() < 3 {
return ComplexityClass::Unknown;
}
let sizes: Vec<f64> = metrics.iter().map(|m| m.datasize as f64).collect();
let times: Vec<f64> = metrics.iter().map(|m| m.timing.mean_ns).collect();
let size_ratios: Vec<f64> = sizes.windows(2).map(|w| w[1] / w[0]).collect();
let time_ratios: Vec<f64> = times.windows(2).map(|w| w[1] / w[0]).collect();
if time_ratios.is_empty() {
return ComplexityClass::Unknown;
}
let avg_time_ratio = time_ratios.iter().sum::<f64>() / time_ratios.len() as f64;
let avgsize_ratio = size_ratios.iter().sum::<f64>() / size_ratios.len() as f64;
if avg_time_ratio < 1.1 {
ComplexityClass::Constant
} else if avg_time_ratio / avgsize_ratio < 1.5 {
ComplexityClass::Linear
} else if avg_time_ratio / (avgsize_ratio * avgsize_ratio.log2()) < 2.0 {
ComplexityClass::LinearLogarithmic
} else if avg_time_ratio / (avgsize_ratio * avgsize_ratio) < 2.0 {
ComplexityClass::Quadratic
} else {
ComplexityClass::Unknown
}
}
fn generate_thresholds(&self, metrics: &[&BenchmarkMetrics]) -> Vec<ThresholdRecommendation> {
Vec::new()
}
fn analyze_memory_scaling(&self, metrics: &[&BenchmarkMetrics]) -> Option<MemoryScaling> {
let memory_data: Vec<_> = metrics
.iter()
.filter_map(|m| m.memory.as_ref().map(|mem| (m.datasize, mem.peak_bytes)))
.collect();
if memory_data.len() < 2 {
return None;
}
let (sizes, memories): (Vec<f64>, Vec<f64>) = memory_data
.iter()
.map(|(size, mem)| (*size as f64, *mem as f64))
.unzip();
let scaling_factor = if sizes.len() >= 2 {
let size_growth = sizes[sizes.len() - 1] / sizes[0];
let memory_growth = memories[memories.len() - 1] / memories[0];
memory_growth / size_growth
} else {
1.0
};
Some(MemoryScaling {
scaling_factor,
base_overhead: memory_data[0].1,
efficiency_trend: MemoryTrend::Stable, })
}
fn collect_system_info(&self) -> SystemInfo {
#[cfg(target_pointer_width = "32")]
let total_memory = 512 * 1024 * 1024; #[cfg(target_pointer_width = "64")]
let total_memory = 8usize * 1024 * 1024 * 1024;
SystemInfo {
cpu_info: "Generic CPU".to_string(), total_memory,
cpu_cores: num_threads(),
simd_capabilities: vec!["SSE2".to_string(), "AVX2".to_string()], os_info: std::env::consts::OS.to_string(),
rust_version: std::env::var("RUSTC_VERSION").unwrap_or_else(|_| "unknown".to_string()),
}
}
fn generate_recommendations(
&self,
metrics: &[BenchmarkMetrics],
analysis: &PerformanceAnalysis,
) -> Vec<OptimizationRecommendation> {
let mut recommendations = Vec::new();
for (function, effectiveness) in &analysis.simd_effectiveness {
if *effectiveness < 1.5 {
recommendations.push(OptimizationRecommendation {
priority: 4,
target: function.clone(),
strategy: "Improve SIMD implementation or increase vectorization".to_string(),
expected_impact: format!("Potential {:.1}x speedup", 2.0 - effectiveness),
complexity: "Medium".to_string(),
});
}
}
if analysis.memory_efficiency < 80.0 {
recommendations.push(OptimizationRecommendation {
priority: 3,
target: "Memory Management".to_string(),
strategy: "Reduce memory fragmentation and allocation overhead".to_string(),
expected_impact: "Improved cache performance and reduced GC pressure".to_string(),
complexity: "High".to_string(),
});
}
for regression in &analysis.regressions {
recommendations.push(OptimizationRecommendation {
priority: 5,
target: regression.function_name.clone(),
strategy: format!(
"Investigate {:.1}% performance regression",
regression.regression_percent
),
expected_impact: "Restore baseline performance".to_string(),
complexity: "Variable".to_string(),
});
}
recommendations
}
}
impl MemoryTracker {
fn new() -> Self {
Self {
initial_memory: 0,
peak_memory: 0,
allocations: 0,
deallocations: 0,
allocationsizes: Vec::new(),
}
}
fn reset(&mut self) {
self.initial_memory = 0; self.peak_memory = 0;
self.allocations = 0;
self.deallocations = 0;
self.allocationsizes.clear();
}
fn get_stats(&self) -> MemoryStats {
let avg_allocationsize = if self.allocations > 0 {
self.allocationsizes.iter().sum::<usize>() as f64 / self.allocations as f64
} else {
0.0
};
let fragmentation_score = if self.peak_memory > 0 {
1.0 - (self.allocations as f64 / self.peak_memory as f64)
} else {
0.0
};
MemoryStats {
peak_bytes: self.peak_memory,
allocations: self.allocations,
deallocations: self.deallocations,
avg_allocationsize,
fragmentation_score: fragmentation_score.max(0.0).min(1.0),
}
}
}
impl Default for BenchmarkSuite {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_benchmark_suite_creation() {
let suite = BenchmarkSuite::new();
assert_eq!(suite.config.datasizes.len(), 5);
assert_eq!(suite.config.iterations, 100);
}
#[test]
fn test_testdata_generation() {
let suite = BenchmarkSuite::new();
let data = suite.generate_testdata(1000).expect("Operation failed");
assert_eq!(data.len(), 1000);
}
#[test]
fn test_correlateddata_generation() {
let suite = BenchmarkSuite::new();
let basedata = suite.generate_testdata(100).expect("Operation failed");
let correlateddata = suite
.generate_correlateddata(&basedata, 0.8)
.expect("Operation failed");
assert_eq!(correlateddata.len(), 100);
}
#[test]
fn test_complexity_classification() {
let suite = BenchmarkSuite::new();
let metric1 = BenchmarkMetrics {
function_name: "test".to_string(),
datasize: 100,
timing: TimingStats {
mean_ns: 1000.0,
std_dev_ns: 100.0,
min_ns: 900.0,
max_ns: 1100.0,
median_ns: 1000.0,
p95_ns: 1050.0,
p99_ns: 1080.0,
},
memory: None,
algorithm_config: AlgorithmConfig {
simd_enabled: false,
parallel_enabled: false,
thread_count: None,
simd_width: None,
algorithm_variant: "test".to_string(),
},
throughput: 100000.0,
baseline_comparison: None,
};
let metric2 = BenchmarkMetrics {
function_name: "test".to_string(),
datasize: 1000,
timing: TimingStats {
mean_ns: 10000.0,
std_dev_ns: 1000.0,
min_ns: 9000.0,
max_ns: 11000.0,
median_ns: 10000.0,
p95_ns: 10500.0,
p99_ns: 10800.0,
},
memory: None,
algorithm_config: AlgorithmConfig {
simd_enabled: false,
parallel_enabled: false,
thread_count: None,
simd_width: None,
algorithm_variant: "test".to_string(),
},
throughput: 100000.0,
baseline_comparison: None,
};
let metric3 = BenchmarkMetrics {
function_name: "test".to_string(),
datasize: 10000,
timing: TimingStats {
mean_ns: 100000.0,
std_dev_ns: 10000.0,
min_ns: 90000.0,
max_ns: 110000.0,
median_ns: 100000.0,
p95_ns: 105000.0,
p99_ns: 108000.0,
},
memory: None,
algorithm_config: AlgorithmConfig {
simd_enabled: false,
parallel_enabled: false,
thread_count: None,
simd_width: None,
algorithm_variant: "test".to_string(),
},
throughput: 100000.0,
baseline_comparison: None,
};
let metrics = vec![&metric1, &metric2, &metric3];
let complexity = suite.classify_complexity(&metrics);
assert!(matches!(complexity, ComplexityClass::Linear));
}
}