quantrs2_tytan/benchmark/
metrics.rs

1//! Benchmark metrics collection and analysis
2
3use serde::{Deserialize, Serialize};
4use std::collections::HashMap;
5use std::time::Duration;
6
7/// Types of metrics to collect
8#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
9pub enum MetricType {
10    /// Execution time
11    ExecutionTime,
12    /// Memory usage
13    MemoryUsage,
14    /// Solution quality
15    SolutionQuality,
16    /// Energy efficiency
17    EnergyEfficiency,
18    /// Throughput
19    Throughput,
20    /// Scalability
21    Scalability,
22    /// Cache efficiency
23    CacheEfficiency,
24    /// Convergence rate
25    ConvergenceRate,
26}
27
28/// Benchmark metrics collection
29#[derive(Debug, Clone, Serialize, Deserialize)]
30pub struct BenchmarkMetrics {
31    /// Problem size (number of variables)
32    pub problem_size: usize,
33    /// Problem density (fraction of non-zero coefficients)
34    pub problem_density: f64,
35    /// Timing measurements
36    pub timings: TimingMetrics,
37    /// Memory measurements
38    pub memory: MemoryMetrics,
39    /// Solution quality metrics
40    pub quality: QualityMetrics,
41    /// Hardware utilization
42    pub utilization: UtilizationMetrics,
43    /// Additional custom metrics
44    pub custom: HashMap<String, f64>,
45}
46
47/// Timing-related metrics
48#[derive(Debug, Clone, Serialize, Deserialize)]
49pub struct TimingMetrics {
50    /// Total execution time
51    pub total_time: Duration,
52    /// Setup/initialization time
53    pub setup_time: Duration,
54    /// Actual computation time
55    pub compute_time: Duration,
56    /// Post-processing time
57    pub postprocess_time: Duration,
58    /// Time per sample
59    pub time_per_sample: Duration,
60    /// Time to first solution
61    pub time_to_solution: Option<Duration>,
62}
63
64/// Memory-related metrics
65#[derive(Debug, Clone, Serialize, Deserialize, Default)]
66pub struct MemoryMetrics {
67    /// Peak memory usage in bytes
68    pub peak_memory: usize,
69    /// Average memory usage
70    pub avg_memory: usize,
71    /// Memory allocated
72    pub allocated: usize,
73    /// Memory deallocated
74    pub deallocated: usize,
75    /// Cache misses
76    pub cache_misses: Option<u64>,
77}
78
79/// Solution quality metrics
80#[derive(Debug, Clone, Serialize, Deserialize)]
81pub struct QualityMetrics {
82    /// Best energy found
83    pub best_energy: f64,
84    /// Average energy across samples
85    pub avg_energy: f64,
86    /// Standard deviation of energies
87    pub energy_std: f64,
88    /// Success probability (finding optimal or near-optimal)
89    pub success_probability: f64,
90    /// Time to reach target quality
91    pub time_to_target: Option<Duration>,
92    /// Number of unique solutions found
93    pub unique_solutions: usize,
94}
95
96/// Hardware utilization metrics
97#[derive(Debug, Clone, Serialize, Deserialize)]
98pub struct UtilizationMetrics {
99    /// CPU utilization percentage
100    pub cpu_usage: f64,
101    /// GPU utilization percentage (if applicable)
102    pub gpu_usage: Option<f64>,
103    /// Memory bandwidth utilization
104    pub memory_bandwidth: f64,
105    /// Cache hit rate
106    pub cache_hit_rate: Option<f64>,
107    /// Power consumption (if measurable)
108    pub power_consumption: Option<f64>,
109}
110
111impl BenchmarkMetrics {
112    /// Create new metrics collection
113    pub fn new(problem_size: usize, problem_density: f64) -> Self {
114        Self {
115            problem_size,
116            problem_density,
117            timings: TimingMetrics::default(),
118            memory: MemoryMetrics::default(),
119            quality: QualityMetrics::default(),
120            utilization: UtilizationMetrics::default(),
121            custom: HashMap::new(),
122        }
123    }
124
125    /// Calculate efficiency metrics
126    pub fn calculate_efficiency(&self) -> EfficiencyMetrics {
127        EfficiencyMetrics {
128            samples_per_second: self.calculate_throughput(),
129            energy_per_sample: self.calculate_energy_efficiency(),
130            memory_efficiency: self.calculate_memory_efficiency(),
131            scalability_factor: self.calculate_scalability(),
132        }
133    }
134
135    /// Calculate throughput (samples per second)
136    fn calculate_throughput(&self) -> f64 {
137        if self.timings.total_time.as_secs_f64() > 0.0 {
138            1.0 / self.timings.time_per_sample.as_secs_f64()
139        } else {
140            0.0
141        }
142    }
143
144    /// Calculate energy efficiency
145    fn calculate_energy_efficiency(&self) -> Option<f64> {
146        self.utilization
147            .power_consumption
148            .map(|power| power * self.timings.time_per_sample.as_secs_f64())
149    }
150
151    /// Calculate memory efficiency
152    fn calculate_memory_efficiency(&self) -> f64 {
153        if self.memory.peak_memory > 0 {
154            (self.problem_size as f64) / (self.memory.peak_memory as f64)
155        } else {
156            0.0
157        }
158    }
159
160    /// Calculate scalability factor
161    fn calculate_scalability(&self) -> f64 {
162        // Simple O(n) check - ideal would be linear scaling
163        let expected_time = self.problem_size as f64 * 1e-6; // Microseconds per variable
164        let actual_time = self.timings.compute_time.as_secs_f64();
165
166        if actual_time > 0.0 {
167            expected_time / actual_time
168        } else {
169            0.0
170        }
171    }
172}
173
174/// Efficiency metrics derived from raw measurements
175#[derive(Debug, Clone, Serialize, Deserialize)]
176pub struct EfficiencyMetrics {
177    /// Samples generated per second
178    pub samples_per_second: f64,
179    /// Energy consumed per sample (if available)
180    pub energy_per_sample: Option<f64>,
181    /// Memory efficiency (problem size / memory used)
182    pub memory_efficiency: f64,
183    /// Scalability factor (1.0 = perfect linear scaling)
184    pub scalability_factor: f64,
185}
186
187/// Default implementations
188impl Default for TimingMetrics {
189    fn default() -> Self {
190        Self {
191            total_time: Duration::ZERO,
192            setup_time: Duration::ZERO,
193            compute_time: Duration::ZERO,
194            postprocess_time: Duration::ZERO,
195            time_per_sample: Duration::ZERO,
196            time_to_solution: None,
197        }
198    }
199}
200
201impl Default for QualityMetrics {
202    fn default() -> Self {
203        Self {
204            best_energy: f64::INFINITY,
205            avg_energy: 0.0,
206            energy_std: 0.0,
207            success_probability: 0.0,
208            time_to_target: None,
209            unique_solutions: 0,
210        }
211    }
212}
213
214impl Default for UtilizationMetrics {
215    fn default() -> Self {
216        Self {
217            cpu_usage: 0.0,
218            gpu_usage: None,
219            memory_bandwidth: 0.0,
220            cache_hit_rate: None,
221            power_consumption: None,
222        }
223    }
224}
225
226/// Metrics aggregation utilities
227pub mod aggregation {
228    use super::*;
229
230    /// Aggregate multiple metric collections
231    pub fn aggregate_metrics(metrics: &[BenchmarkMetrics]) -> AggregatedMetrics {
232        if metrics.is_empty() {
233            return AggregatedMetrics::default();
234        }
235
236        let mut aggregated = AggregatedMetrics {
237            num_runs: metrics.len(),
238            problem_sizes: metrics.iter().map(|m| m.problem_size).collect(),
239            ..Default::default()
240        };
241
242        // Timing aggregation
243        let total_times: Vec<f64> = metrics
244            .iter()
245            .map(|m| m.timings.total_time.as_secs_f64())
246            .collect();
247        aggregated.avg_total_time =
248            Duration::from_secs_f64(total_times.iter().sum::<f64>() / total_times.len() as f64);
249        aggregated.min_total_time =
250            Duration::from_secs_f64(total_times.iter().copied().fold(f64::INFINITY, f64::min));
251        aggregated.max_total_time =
252            Duration::from_secs_f64(total_times.iter().copied().fold(0.0, f64::max));
253
254        // Quality aggregation
255        aggregated.best_energy_overall = metrics
256            .iter()
257            .map(|m| m.quality.best_energy)
258            .fold(f64::INFINITY, f64::min);
259        aggregated.avg_success_rate = metrics
260            .iter()
261            .map(|m| m.quality.success_probability)
262            .sum::<f64>()
263            / metrics.len() as f64;
264
265        // Memory aggregation
266        aggregated.peak_memory_overall = metrics
267            .iter()
268            .map(|m| m.memory.peak_memory)
269            .max()
270            .unwrap_or(0);
271
272        aggregated
273    }
274
275    /// Aggregated metrics across multiple runs
276    #[derive(Debug, Clone, Default, Serialize, Deserialize)]
277    pub struct AggregatedMetrics {
278        pub num_runs: usize,
279        pub problem_sizes: Vec<usize>,
280        pub avg_total_time: Duration,
281        pub min_total_time: Duration,
282        pub max_total_time: Duration,
283        pub best_energy_overall: f64,
284        pub avg_success_rate: f64,
285        pub peak_memory_overall: usize,
286    }
287}
288
289/// Statistical analysis utilities
290pub mod statistics {
291    use super::*;
292
293    /// Calculate statistical measures for a set of values
294    pub fn calculate_statistics(values: &[f64]) -> Statistics {
295        if values.is_empty() {
296            return Statistics::default();
297        }
298
299        let n = values.len() as f64;
300        let mean = values.iter().sum::<f64>() / n;
301        let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / (n - 1.0).max(1.0);
302        let std_dev = variance.sqrt();
303
304        let mut sorted = values.to_vec();
305        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
306
307        let median = if sorted.len() % 2 == 0 {
308            f64::midpoint(sorted[sorted.len() / 2 - 1], sorted[sorted.len() / 2])
309        } else {
310            sorted[sorted.len() / 2]
311        };
312
313        Statistics {
314            mean,
315            median,
316            std_dev,
317            min: sorted[0],
318            max: sorted[sorted.len() - 1],
319            percentile_25: percentile(&sorted, 0.25),
320            percentile_75: percentile(&sorted, 0.75),
321            percentile_95: percentile(&sorted, 0.95),
322        }
323    }
324
325    fn percentile(sorted: &[f64], p: f64) -> f64 {
326        let k = (sorted.len() as f64 - 1.0) * p;
327        let f = k.floor() as usize;
328        let c = f + 1;
329
330        if c >= sorted.len() {
331            sorted[sorted.len() - 1]
332        } else {
333            (k - f as f64).mul_add(sorted[c] - sorted[f], sorted[f])
334        }
335    }
336
337    #[derive(Debug, Clone, Default, Serialize, Deserialize)]
338    pub struct Statistics {
339        pub mean: f64,
340        pub median: f64,
341        pub std_dev: f64,
342        pub min: f64,
343        pub max: f64,
344        pub percentile_25: f64,
345        pub percentile_75: f64,
346        pub percentile_95: f64,
347    }
348}