quantrs2_tytan/benchmark/
metrics.rs1use serde::{Deserialize, Serialize};
4use std::collections::HashMap;
5use std::time::Duration;
6
7#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
9pub enum MetricType {
10 ExecutionTime,
12 MemoryUsage,
14 SolutionQuality,
16 EnergyEfficiency,
18 Throughput,
20 Scalability,
22 CacheEfficiency,
24 ConvergenceRate,
26}
27
28#[derive(Debug, Clone, Serialize, Deserialize)]
30pub struct BenchmarkMetrics {
31 pub problem_size: usize,
33 pub problem_density: f64,
35 pub timings: TimingMetrics,
37 pub memory: MemoryMetrics,
39 pub quality: QualityMetrics,
41 pub utilization: UtilizationMetrics,
43 pub custom: HashMap<String, f64>,
45}
46
47#[derive(Debug, Clone, Serialize, Deserialize)]
49pub struct TimingMetrics {
50 pub total_time: Duration,
52 pub setup_time: Duration,
54 pub compute_time: Duration,
56 pub postprocess_time: Duration,
58 pub time_per_sample: Duration,
60 pub time_to_solution: Option<Duration>,
62}
63
64#[derive(Debug, Clone, Serialize, Deserialize, Default)]
66pub struct MemoryMetrics {
67 pub peak_memory: usize,
69 pub avg_memory: usize,
71 pub allocated: usize,
73 pub deallocated: usize,
75 pub cache_misses: Option<u64>,
77}
78
79#[derive(Debug, Clone, Serialize, Deserialize)]
81pub struct QualityMetrics {
82 pub best_energy: f64,
84 pub avg_energy: f64,
86 pub energy_std: f64,
88 pub success_probability: f64,
90 pub time_to_target: Option<Duration>,
92 pub unique_solutions: usize,
94}
95
96#[derive(Debug, Clone, Serialize, Deserialize)]
98pub struct UtilizationMetrics {
99 pub cpu_usage: f64,
101 pub gpu_usage: Option<f64>,
103 pub memory_bandwidth: f64,
105 pub cache_hit_rate: Option<f64>,
107 pub power_consumption: Option<f64>,
109}
110
111impl BenchmarkMetrics {
112 pub fn new(problem_size: usize, problem_density: f64) -> Self {
114 Self {
115 problem_size,
116 problem_density,
117 timings: TimingMetrics::default(),
118 memory: MemoryMetrics::default(),
119 quality: QualityMetrics::default(),
120 utilization: UtilizationMetrics::default(),
121 custom: HashMap::new(),
122 }
123 }
124
125 pub fn calculate_efficiency(&self) -> EfficiencyMetrics {
127 EfficiencyMetrics {
128 samples_per_second: self.calculate_throughput(),
129 energy_per_sample: self.calculate_energy_efficiency(),
130 memory_efficiency: self.calculate_memory_efficiency(),
131 scalability_factor: self.calculate_scalability(),
132 }
133 }
134
135 fn calculate_throughput(&self) -> f64 {
137 if self.timings.total_time.as_secs_f64() > 0.0 {
138 1.0 / self.timings.time_per_sample.as_secs_f64()
139 } else {
140 0.0
141 }
142 }
143
144 fn calculate_energy_efficiency(&self) -> Option<f64> {
146 self.utilization
147 .power_consumption
148 .map(|power| power * self.timings.time_per_sample.as_secs_f64())
149 }
150
151 fn calculate_memory_efficiency(&self) -> f64 {
153 if self.memory.peak_memory > 0 {
154 (self.problem_size as f64) / (self.memory.peak_memory as f64)
155 } else {
156 0.0
157 }
158 }
159
160 fn calculate_scalability(&self) -> f64 {
162 let expected_time = self.problem_size as f64 * 1e-6; let actual_time = self.timings.compute_time.as_secs_f64();
165
166 if actual_time > 0.0 {
167 expected_time / actual_time
168 } else {
169 0.0
170 }
171 }
172}
173
174#[derive(Debug, Clone, Serialize, Deserialize)]
176pub struct EfficiencyMetrics {
177 pub samples_per_second: f64,
179 pub energy_per_sample: Option<f64>,
181 pub memory_efficiency: f64,
183 pub scalability_factor: f64,
185}
186
187impl Default for TimingMetrics {
189 fn default() -> Self {
190 Self {
191 total_time: Duration::ZERO,
192 setup_time: Duration::ZERO,
193 compute_time: Duration::ZERO,
194 postprocess_time: Duration::ZERO,
195 time_per_sample: Duration::ZERO,
196 time_to_solution: None,
197 }
198 }
199}
200
201impl Default for QualityMetrics {
202 fn default() -> Self {
203 Self {
204 best_energy: f64::INFINITY,
205 avg_energy: 0.0,
206 energy_std: 0.0,
207 success_probability: 0.0,
208 time_to_target: None,
209 unique_solutions: 0,
210 }
211 }
212}
213
214impl Default for UtilizationMetrics {
215 fn default() -> Self {
216 Self {
217 cpu_usage: 0.0,
218 gpu_usage: None,
219 memory_bandwidth: 0.0,
220 cache_hit_rate: None,
221 power_consumption: None,
222 }
223 }
224}
225
226pub mod aggregation {
228 use super::*;
229
230 pub fn aggregate_metrics(metrics: &[BenchmarkMetrics]) -> AggregatedMetrics {
232 if metrics.is_empty() {
233 return AggregatedMetrics::default();
234 }
235
236 let mut aggregated = AggregatedMetrics {
237 num_runs: metrics.len(),
238 problem_sizes: metrics.iter().map(|m| m.problem_size).collect(),
239 ..Default::default()
240 };
241
242 let total_times: Vec<f64> = metrics
244 .iter()
245 .map(|m| m.timings.total_time.as_secs_f64())
246 .collect();
247 aggregated.avg_total_time =
248 Duration::from_secs_f64(total_times.iter().sum::<f64>() / total_times.len() as f64);
249 aggregated.min_total_time =
250 Duration::from_secs_f64(total_times.iter().copied().fold(f64::INFINITY, f64::min));
251 aggregated.max_total_time =
252 Duration::from_secs_f64(total_times.iter().copied().fold(0.0, f64::max));
253
254 aggregated.best_energy_overall = metrics
256 .iter()
257 .map(|m| m.quality.best_energy)
258 .fold(f64::INFINITY, f64::min);
259 aggregated.avg_success_rate = metrics
260 .iter()
261 .map(|m| m.quality.success_probability)
262 .sum::<f64>()
263 / metrics.len() as f64;
264
265 aggregated.peak_memory_overall = metrics
267 .iter()
268 .map(|m| m.memory.peak_memory)
269 .max()
270 .unwrap_or(0);
271
272 aggregated
273 }
274
275 #[derive(Debug, Clone, Default, Serialize, Deserialize)]
277 pub struct AggregatedMetrics {
278 pub num_runs: usize,
279 pub problem_sizes: Vec<usize>,
280 pub avg_total_time: Duration,
281 pub min_total_time: Duration,
282 pub max_total_time: Duration,
283 pub best_energy_overall: f64,
284 pub avg_success_rate: f64,
285 pub peak_memory_overall: usize,
286 }
287}
288
289pub mod statistics {
291 use super::*;
292
293 pub fn calculate_statistics(values: &[f64]) -> Statistics {
295 if values.is_empty() {
296 return Statistics::default();
297 }
298
299 let n = values.len() as f64;
300 let mean = values.iter().sum::<f64>() / n;
301 let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / (n - 1.0).max(1.0);
302 let std_dev = variance.sqrt();
303
304 let mut sorted = values.to_vec();
305 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
306
307 let median = if sorted.len() % 2 == 0 {
308 f64::midpoint(sorted[sorted.len() / 2 - 1], sorted[sorted.len() / 2])
309 } else {
310 sorted[sorted.len() / 2]
311 };
312
313 Statistics {
314 mean,
315 median,
316 std_dev,
317 min: sorted[0],
318 max: sorted[sorted.len() - 1],
319 percentile_25: percentile(&sorted, 0.25),
320 percentile_75: percentile(&sorted, 0.75),
321 percentile_95: percentile(&sorted, 0.95),
322 }
323 }
324
325 fn percentile(sorted: &[f64], p: f64) -> f64 {
326 let k = (sorted.len() as f64 - 1.0) * p;
327 let f = k.floor() as usize;
328 let c = f + 1;
329
330 if c >= sorted.len() {
331 sorted[sorted.len() - 1]
332 } else {
333 (k - f as f64).mul_add(sorted[c] - sorted[f], sorted[f])
334 }
335 }
336
337 #[derive(Debug, Clone, Default, Serialize, Deserialize)]
338 pub struct Statistics {
339 pub mean: f64,
340 pub median: f64,
341 pub std_dev: f64,
342 pub min: f64,
343 pub max: f64,
344 pub percentile_25: f64,
345 pub percentile_75: f64,
346 pub percentile_95: f64,
347 }
348}