use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Instant;
use super::config::QuantumPrecision;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PrecisionAnalysis {
pub recommended_precisions: HashMap<String, QuantumPrecision>,
pub error_estimates: HashMap<QuantumPrecision, f64>,
pub performance_metrics: HashMap<QuantumPrecision, PerformanceMetrics>,
pub selection_rationale: String,
pub quality_score: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceMetrics {
pub execution_time_ms: f64,
pub memory_usage_bytes: usize,
pub throughput_ops_per_sec: f64,
pub energy_efficiency: f64,
}
pub struct PrecisionAnalyzer {
analysis_state: AnalysisState,
benchmark_cache: HashMap<QuantumPrecision, PerformanceMetrics>,
error_history: Vec<ErrorSample>,
}
#[derive(Debug, Clone)]
struct AnalysisState {
operations_count: usize,
total_time: f64,
current_precision: QuantumPrecision,
}
#[derive(Debug, Clone)]
struct ErrorSample {
precision: QuantumPrecision,
error: f64,
operation_type: String,
timestamp: Instant,
}
impl PrecisionAnalysis {
#[must_use]
pub fn new() -> Self {
Self {
recommended_precisions: HashMap::new(),
error_estimates: HashMap::new(),
performance_metrics: HashMap::new(),
selection_rationale: String::new(),
quality_score: 0.0,
}
}
pub fn add_recommendation(&mut self, operation: String, precision: QuantumPrecision) {
self.recommended_precisions.insert(operation, precision);
}
pub fn add_error_estimate(&mut self, precision: QuantumPrecision, error: f64) {
self.error_estimates.insert(precision, error);
}
pub fn add_performance_metrics(
&mut self,
precision: QuantumPrecision,
metrics: PerformanceMetrics,
) {
self.performance_metrics.insert(precision, metrics);
}
pub fn set_rationale(&mut self, rationale: String) {
self.selection_rationale = rationale;
}
pub fn calculate_quality_score(&mut self) {
let mut score = 0.0;
let mut count = 0;
for (&precision, &error) in &self.error_estimates {
let error_score = 1.0 / error.mul_add(1000.0, 1.0); score += error_score;
count += 1;
}
for (precision, metrics) in &self.performance_metrics {
let perf_score = metrics.throughput_ops_per_sec / 1000.0; let mem_score = 1.0 / (1.0 + metrics.memory_usage_bytes as f64 / 1e9); score += f64::midpoint(perf_score, mem_score);
count += 1;
}
if count > 0 {
self.quality_score = (score / f64::from(count)).min(1.0);
}
}
#[must_use]
pub fn get_best_precision(&self, operation: &str) -> Option<QuantumPrecision> {
self.recommended_precisions.get(operation).copied()
}
#[must_use]
pub fn get_overall_recommendation(&self) -> QuantumPrecision {
let mut precision_counts = HashMap::new();
for &precision in self.recommended_precisions.values() {
*precision_counts.entry(precision).or_insert(0) += 1;
}
precision_counts
.into_iter()
.max_by_key(|(_, count)| *count)
.map_or(QuantumPrecision::Single, |(precision, _)| precision)
}
#[must_use]
pub fn is_high_quality(&self) -> bool {
self.quality_score > 0.8
}
#[must_use]
pub fn get_summary(&self) -> AnalysisSummary {
AnalysisSummary {
num_operations_analyzed: self.recommended_precisions.len(),
overall_precision: self.get_overall_recommendation(),
quality_score: self.quality_score,
total_error_estimate: self.error_estimates.values().sum(),
avg_execution_time: self
.performance_metrics
.values()
.map(|m| m.execution_time_ms)
.sum::<f64>()
/ self.performance_metrics.len().max(1) as f64,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AnalysisSummary {
pub num_operations_analyzed: usize,
pub overall_precision: QuantumPrecision,
pub quality_score: f64,
pub total_error_estimate: f64,
pub avg_execution_time: f64,
}
impl PerformanceMetrics {
#[must_use]
pub const fn new(
execution_time_ms: f64,
memory_usage_bytes: usize,
throughput_ops_per_sec: f64,
energy_efficiency: f64,
) -> Self {
Self {
execution_time_ms,
memory_usage_bytes,
throughput_ops_per_sec,
energy_efficiency,
}
}
#[must_use]
pub fn from_time_and_memory(execution_time_ms: f64, memory_usage_bytes: usize) -> Self {
let throughput = if execution_time_ms > 0.0 {
1000.0 / execution_time_ms
} else {
0.0
};
let energy_efficiency = throughput / (memory_usage_bytes as f64 / 1e6);
Self::new(
execution_time_ms,
memory_usage_bytes,
throughput,
energy_efficiency,
)
}
#[must_use]
pub fn performance_score(&self) -> f64 {
let time_score = 1.0 / (1.0 + self.execution_time_ms / 1000.0);
let memory_score = 1.0 / (1.0 + self.memory_usage_bytes as f64 / 1e9);
let throughput_score = self.throughput_ops_per_sec / 1000.0;
let energy_score = self.energy_efficiency / 1000.0;
(time_score + memory_score + throughput_score + energy_score) / 4.0
}
#[must_use]
pub fn is_better_than(&self, other: &Self) -> bool {
self.performance_score() > other.performance_score()
}
}
impl PrecisionAnalyzer {
#[must_use]
pub fn new() -> Self {
Self {
analysis_state: AnalysisState {
operations_count: 0,
total_time: 0.0,
current_precision: QuantumPrecision::Single,
},
benchmark_cache: HashMap::new(),
error_history: Vec::new(),
}
}
pub fn analyze_for_tolerance(&mut self, tolerance: f64) -> PrecisionAnalysis {
let mut analysis = PrecisionAnalysis::new();
for &precision in &[
QuantumPrecision::Half,
QuantumPrecision::Single,
QuantumPrecision::Double,
] {
let error = precision.typical_error();
analysis.add_error_estimate(precision, error);
let metrics = self.create_synthetic_metrics(precision);
analysis.add_performance_metrics(precision, metrics);
if precision.is_sufficient_for_tolerance(tolerance) {
analysis.add_recommendation("default".to_string(), precision);
}
}
let rationale = format!(
"Analysis for tolerance {tolerance:.2e}: recommended precision based on error bounds and performance trade-offs"
);
analysis.set_rationale(rationale);
analysis.calculate_quality_score();
analysis
}
pub fn benchmark_precision(&mut self, precision: QuantumPrecision) -> PerformanceMetrics {
if let Some(cached) = self.benchmark_cache.get(&precision) {
return cached.clone();
}
let start_time = Instant::now();
std::thread::sleep(std::time::Duration::from_millis(1));
let execution_time = start_time.elapsed().as_millis() as f64;
let memory_usage = self.estimate_memory_usage(precision);
let metrics = PerformanceMetrics::from_time_and_memory(execution_time, memory_usage);
self.benchmark_cache.insert(precision, metrics.clone());
metrics
}
pub fn record_error(
&mut self,
precision: QuantumPrecision,
error: f64,
operation_type: String,
) {
self.error_history.push(ErrorSample {
precision,
error,
operation_type,
timestamp: Instant::now(),
});
}
#[must_use]
pub fn get_average_error(&self, precision: QuantumPrecision) -> f64 {
let errors: Vec<f64> = self
.error_history
.iter()
.filter(|sample| sample.precision == precision)
.map(|sample| sample.error)
.collect();
if errors.is_empty() {
precision.typical_error()
} else {
errors.iter().sum::<f64>() / errors.len() as f64
}
}
pub fn reset(&mut self) {
self.analysis_state.operations_count = 0;
self.analysis_state.total_time = 0.0;
self.error_history.clear();
self.benchmark_cache.clear();
}
fn create_synthetic_metrics(&self, precision: QuantumPrecision) -> PerformanceMetrics {
let base_time = 100.0; let execution_time = base_time * precision.computation_factor();
let base_memory = 1024 * 1024; let memory_usage = (f64::from(base_memory) * precision.memory_factor()) as usize;
PerformanceMetrics::from_time_and_memory(execution_time, memory_usage)
}
fn estimate_memory_usage(&self, precision: QuantumPrecision) -> usize {
let base_memory = 1024 * 1024; (f64::from(base_memory) * precision.memory_factor()) as usize
}
}
impl Default for PrecisionAnalysis {
fn default() -> Self {
Self::new()
}
}
impl Default for PrecisionAnalyzer {
fn default() -> Self {
Self::new()
}
}