pub mod performance_targets;
use crate::Result;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use tracing::{info, warn};
pub use performance_targets::{
BatteryMeasurement, BenchmarkConfig, BenchmarkResults, DeviceCategoryResult,
DeviceCoverageResults, FrameworkSizeResults, LatencyMeasurement, PerformanceBenchmark,
PerformanceTargets, TargetAchievement,
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkSuiteConfig {
pub targets: PerformanceTargets,
pub detailed_profiling: bool,
pub export_results: bool,
pub export_path: Option<String>,
}
impl Default for BenchmarkSuiteConfig {
fn default() -> Self {
Self {
targets: PerformanceTargets::default(),
detailed_profiling: true,
export_results: true,
export_path: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkSummary {
pub overall_score: f64,
pub all_targets_achieved: bool,
pub target_results: HashMap<String, bool>,
pub performance_summary: PerformanceSummaryMetrics,
pub recommendations: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceSummaryMetrics {
pub best_latency_ms: u64,
pub worst_latency_ms: u64,
pub avg_latency_ms: u64,
pub best_battery_efficiency: f64,
pub worst_battery_efficiency: f64,
pub device_coverage_percent: f64,
pub framework_size_mb: f64,
}
pub struct BenchmarkSuite {
config: BenchmarkSuiteConfig,
performance_benchmark: PerformanceBenchmark,
}
impl BenchmarkSuite {
pub fn new(config: BenchmarkSuiteConfig) -> Self {
let perf_config = BenchmarkConfig {
targets: config.targets.clone(),
detailed_profiling: config.detailed_profiling,
..BenchmarkConfig::default()
};
let performance_benchmark = PerformanceBenchmark::new(perf_config);
info!("Created benchmark suite with targets:");
info!(
" Inference latency: <{}ms",
config.targets.max_inference_latency_ms
);
info!(
" Battery drain: <{}% per hour",
config.targets.max_battery_drain_per_hour
);
info!(
" Device coverage: >{}%",
config.targets.min_device_coverage
);
info!(
" Framework size: <{}MB",
config.targets.max_framework_size_mb
);
Self {
config,
performance_benchmark,
}
}
pub fn run_comprehensive_benchmarks(&mut self) -> Result<BenchmarkSummary> {
info!("Starting comprehensive mobile benchmark suite");
let perf_results = self.performance_benchmark.run_benchmarks()?.clone();
let summary = self.generate_summary(&perf_results)?;
if self.config.export_results {
self.export_results(&summary, &perf_results)?;
}
info!(
"Benchmark suite completed. Overall score: {:.1}/100",
summary.overall_score
);
if summary.all_targets_achieved {
info!("🎉 All performance targets achieved!");
} else {
warn!("⚠️ Some performance targets not met. See recommendations.");
}
Ok(summary)
}
fn generate_summary(&self, results: &BenchmarkResults) -> Result<BenchmarkSummary> {
let mut target_results = HashMap::new();
target_results.insert(
"inference_speed".to_string(),
results.targets_achieved.inference_speed,
);
target_results.insert(
"battery_efficiency".to_string(),
results.targets_achieved.battery_efficiency,
);
target_results.insert(
"device_coverage".to_string(),
results.targets_achieved.device_coverage,
);
target_results.insert(
"framework_size".to_string(),
results.targets_achieved.framework_size,
);
let latencies: Vec<u64> =
results.latency_measurements.iter().map(|m| m.latency_ms).collect();
let battery_drains: Vec<f64> =
results.battery_measurements.iter().map(|m| m.estimated_hourly_drain).collect();
let performance_summary = PerformanceSummaryMetrics {
best_latency_ms: *latencies.iter().min().unwrap_or(&0),
worst_latency_ms: *latencies.iter().max().unwrap_or(&0),
avg_latency_ms: if !latencies.is_empty() {
latencies.iter().sum::<u64>() / latencies.len() as u64
} else {
0
},
best_battery_efficiency: battery_drains.iter().cloned().fold(f64::INFINITY, f64::min),
worst_battery_efficiency: battery_drains
.iter()
.cloned()
.fold(f64::NEG_INFINITY, f64::max),
device_coverage_percent: results.device_coverage.coverage_percentage,
framework_size_mb: results.framework_size.measured_size_mb,
};
let recommendations = self.generate_recommendations(results);
Ok(BenchmarkSummary {
overall_score: results.overall_score,
all_targets_achieved: results.targets_achieved.all_targets,
target_results,
performance_summary,
recommendations,
})
}
fn generate_recommendations(&self, results: &BenchmarkResults) -> Vec<String> {
let mut recommendations = Vec::new();
if !results.targets_achieved.inference_speed {
recommendations.push(
"🚀 Enable hardware acceleration (Neural Engine/NNAPI) for faster inference"
.to_string(),
);
recommendations
.push("⚡ Implement more aggressive quantization (INT4/INT8)".to_string());
recommendations.push("🔄 Add model caching and warm-up strategies".to_string());
} else {
recommendations.push("✅ Inference speed target achieved!".to_string());
}
if !results.targets_achieved.battery_efficiency {
recommendations
.push("🔋 Implement adaptive power management based on battery level".to_string());
recommendations
.push("📊 Add batch processing for better energy efficiency".to_string());
recommendations
.push("⏸️ Implement smart inference scheduling during low usage".to_string());
} else {
recommendations.push("✅ Battery efficiency target achieved!".to_string());
}
if !results.targets_achieved.device_coverage {
recommendations.push("📱 Add fallback implementations for legacy devices".to_string());
recommendations.push("🔧 Implement device-specific optimization paths".to_string());
recommendations
.push("🧪 Expand compatibility testing across more device models".to_string());
} else {
recommendations.push("✅ Device coverage target achieved!".to_string());
}
if !results.targets_achieved.framework_size {
recommendations.push(
"📦 Enable feature gating for optional components (Unity, React Native)"
.to_string(),
);
recommendations
.push("🗜️ Implement dynamic loading for infrequently used features".to_string());
recommendations
.push("📚 Consider shared libraries for common dependencies".to_string());
} else {
recommendations.push("✅ Framework size target achieved!".to_string());
}
if results.targets_achieved.all_targets {
recommendations.push(
"🎯 All targets achieved! Consider pushing for even better performance".to_string(),
);
recommendations.push("🚀 Ready for production deployment with confidence".to_string());
}
recommendations
}
fn export_results(&self, summary: &BenchmarkSummary, results: &BenchmarkResults) -> Result<()> {
let export_path = self
.config
.export_path
.as_deref()
.unwrap_or("/tmp/mobile_benchmark_results.json");
let export_data = serde_json::json!({
"benchmark_summary": summary,
"detailed_results": results,
"configuration": self.config,
"timestamp": chrono::Utc::now().to_rfc3339(),
"targets_overview": {
"all_achieved": summary.all_targets_achieved,
"score": summary.overall_score,
"target_breakdown": summary.target_results
}
});
std::fs::write(export_path, serde_json::to_string_pretty(&export_data)?)?;
info!("Benchmark results exported to: {}", export_path);
Ok(())
}
pub fn get_config(&self) -> &BenchmarkSuiteConfig {
&self.config
}
pub fn update_targets(&mut self, targets: PerformanceTargets) {
self.config.targets = targets.clone();
let perf_config = BenchmarkConfig {
targets,
detailed_profiling: self.config.detailed_profiling,
..BenchmarkConfig::default()
};
self.performance_benchmark = PerformanceBenchmark::new(perf_config);
}
pub fn quick_validation(&mut self) -> Result<bool> {
let results = self.run_comprehensive_benchmarks()?;
Ok(results.all_targets_achieved)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_benchmark_suite_creation() {
let config = BenchmarkSuiteConfig::default();
let suite = BenchmarkSuite::new(config);
assert_eq!(suite.config.targets.max_inference_latency_ms, 100);
assert_eq!(suite.config.targets.max_battery_drain_per_hour, 5.0);
}
#[test]
#[ignore] fn test_comprehensive_benchmarks() {
let mut config = BenchmarkSuiteConfig::default();
config.export_results = false;
let mut suite = BenchmarkSuite::new(config);
let summary = suite.run_comprehensive_benchmarks().expect("operation failed in test");
assert!(summary.overall_score >= 0.0 && summary.overall_score <= 100.0);
assert!(!summary.target_results.is_empty());
assert!(!summary.recommendations.is_empty());
}
#[test]
fn test_target_updates() {
let config = BenchmarkSuiteConfig::default();
let mut suite = BenchmarkSuite::new(config);
let mut new_targets = PerformanceTargets::default();
new_targets.max_inference_latency_ms = 50;
suite.update_targets(new_targets);
assert_eq!(suite.config.targets.max_inference_latency_ms, 50);
}
#[test]
#[ignore] fn test_quick_validation() {
let mut config = BenchmarkSuiteConfig::default();
config.export_results = false;
let mut suite = BenchmarkSuite::new(config);
let is_valid = suite.quick_validation().expect("operation failed in test");
let _result = is_valid;
}
}