quantrs2_tytan/benchmark/
analysis.rs

1//! Benchmark analysis and reporting
2
3use crate::benchmark::{
4    metrics::{aggregation, BenchmarkMetrics},
5    runner::BenchmarkResult,
6};
7use serde::{Deserialize, Serialize};
8use std::collections::HashMap;
9
10/// Performance analysis report
11#[derive(Debug, Clone, Serialize, Deserialize)]
12pub struct PerformanceReport {
13    /// Report metadata
14    pub metadata: ReportMetadata,
15    /// Summary statistics
16    pub summary: SummaryStatistics,
17    /// Detailed analysis by backend
18    pub backend_analysis: HashMap<String, BackendAnalysis>,
19    /// Detailed analysis by sampler
20    pub sampler_analysis: HashMap<String, SamplerAnalysis>,
21    /// Scaling analysis
22    pub scaling_analysis: ScalingAnalysis,
23    /// Comparative analysis
24    pub comparison: ComparativeAnalysis,
25    /// Recommendations
26    pub recommendations: Vec<Recommendation>,
27}
28
29/// Report metadata
30#[derive(Debug, Clone, Serialize, Deserialize)]
31pub struct ReportMetadata {
32    pub generated_at: std::time::SystemTime,
33    pub total_benchmarks: usize,
34    pub total_duration: std::time::Duration,
35    pub platform_info: PlatformInfo,
36}
37
38/// Platform information
39#[derive(Debug, Clone, Serialize, Deserialize)]
40pub struct PlatformInfo {
41    pub os: String,
42    pub cpu_cores: usize,
43    pub cpu_model: String,
44    pub memory_gb: f64,
45    pub rust_version: String,
46}
47
48/// Summary statistics across all benchmarks
49#[derive(Debug, Clone, Serialize, Deserialize)]
50pub struct SummaryStatistics {
51    pub total_samples: usize,
52    pub best_time_per_sample: std::time::Duration,
53    pub best_energy_found: f64,
54    pub most_efficient_backend: String,
55    pub most_efficient_sampler: String,
56    pub overall_metrics: BenchmarkMetrics,
57}
58
59/// Analysis for a specific backend
60#[derive(Debug, Clone, Serialize, Deserialize)]
61pub struct BackendAnalysis {
62    pub name: String,
63    pub num_benchmarks: usize,
64    pub success_rate: f64,
65    pub avg_time_per_sample: std::time::Duration,
66    pub avg_memory_usage: usize,
67    pub best_problem_size: usize,
68    pub efficiency_by_size: HashMap<usize, f64>,
69    pub efficiency_by_density: HashMap<String, f64>,
70}
71
72/// Analysis for a specific sampler
73#[derive(Debug, Clone, Serialize, Deserialize)]
74pub struct SamplerAnalysis {
75    pub name: String,
76    pub num_benchmarks: usize,
77    pub avg_solution_quality: f64,
78    pub convergence_rate: f64,
79    pub best_parameters: HashMap<String, f64>,
80    pub performance_by_problem_type: HashMap<String, f64>,
81}
82
83/// Scaling analysis results
84#[derive(Debug, Clone, Serialize, Deserialize)]
85pub struct ScalingAnalysis {
86    pub time_complexity: ComplexityEstimate,
87    pub memory_complexity: ComplexityEstimate,
88    pub weak_scaling_efficiency: f64,
89    pub strong_scaling_efficiency: f64,
90    pub optimal_problem_sizes: Vec<usize>,
91}
92
93/// Complexity estimate
94#[derive(Debug, Clone, Serialize, Deserialize)]
95pub struct ComplexityEstimate {
96    pub order: String, // e.g., "O(n)", "O(n²)", "O(n log n)"
97    pub coefficient: f64,
98    pub r_squared: f64, // Goodness of fit
99}
100
101/// Comparative analysis between backends/samplers
102#[derive(Debug, Clone, Serialize, Deserialize)]
103pub struct ComparativeAnalysis {
104    pub speedup_matrix: HashMap<(String, String), f64>,
105    pub quality_comparison: HashMap<String, f64>,
106    pub efficiency_ranking: Vec<(String, f64)>,
107    pub pareto_frontier: Vec<ParetoPoint>,
108}
109
110/// Point on Pareto frontier (quality vs performance trade-off)
111#[derive(Debug, Clone, Serialize, Deserialize)]
112pub struct ParetoPoint {
113    pub configuration: String,
114    pub quality_score: f64,
115    pub performance_score: f64,
116}
117
118/// Recommendation based on analysis
119#[derive(Debug, Clone, Serialize, Deserialize)]
120pub struct Recommendation {
121    pub category: RecommendationCategory,
122    pub message: String,
123    pub impact: ImpactLevel,
124    pub details: HashMap<String, String>,
125}
126
127#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
128pub enum RecommendationCategory {
129    Configuration,
130    Hardware,
131    Algorithm,
132    Optimization,
133}
134
135#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
136pub enum ImpactLevel {
137    High,
138    Medium,
139    Low,
140}
141
142impl PerformanceReport {
143    /// Generate report from benchmark results
144    pub fn from_results(results: &[BenchmarkResult]) -> Result<Self, Box<dyn std::error::Error>> {
145        if results.is_empty() {
146            return Err("No benchmark results to analyze".into());
147        }
148
149        // Safe to use expect() here since we verified results.is_empty() == false above
150        let start_time = results
151            .first()
152            .expect("results guaranteed non-empty after is_empty check")
153            .timestamp;
154        let end_time = results
155            .last()
156            .expect("results guaranteed non-empty after is_empty check")
157            .timestamp;
158        let total_duration = end_time
159            .duration_since(start_time)
160            .unwrap_or(std::time::Duration::ZERO);
161
162        let metadata = ReportMetadata {
163            generated_at: std::time::SystemTime::now(),
164            total_benchmarks: results.len(),
165            total_duration,
166            platform_info: Self::get_platform_info(),
167        };
168
169        let summary = Self::calculate_summary(results);
170        let backend_analysis = Self::analyze_backends(results);
171        let sampler_analysis = Self::analyze_samplers(results);
172        let scaling_analysis = Self::analyze_scaling(results);
173        let comparison = Self::comparative_analysis(results);
174        let recommendations = Self::generate_recommendations(&summary, &scaling_analysis);
175
176        Ok(Self {
177            metadata,
178            summary,
179            backend_analysis,
180            sampler_analysis,
181            scaling_analysis,
182            comparison,
183            recommendations,
184        })
185    }
186
187    /// Get platform information
188    fn get_platform_info() -> PlatformInfo {
189        PlatformInfo {
190            os: std::env::consts::OS.to_string(),
191            cpu_cores: num_cpus::get(),
192            cpu_model: "Unknown".to_string(), // Would need system-specific code
193            memory_gb: 0.0,                   // Would need system-specific code
194            rust_version: std::env::var("RUSTC_VERSION").unwrap_or_else(|_| "unknown".to_string()),
195        }
196    }
197
198    /// Calculate summary statistics
199    fn calculate_summary(results: &[BenchmarkResult]) -> SummaryStatistics {
200        let total_samples: usize = results
201            .iter()
202            .map(|r| r.metrics.quality.unique_solutions)
203            .sum();
204
205        let best_time_per_sample = results
206            .iter()
207            .map(|r| r.metrics.timings.time_per_sample)
208            .min()
209            .unwrap_or(std::time::Duration::ZERO);
210
211        let best_energy_found = results
212            .iter()
213            .map(|r| r.metrics.quality.best_energy)
214            .fold(f64::INFINITY, f64::min);
215
216        // Find most efficient configurations
217        let mut backend_efficiency: HashMap<String, f64> = HashMap::new();
218        let mut sampler_efficiency: HashMap<String, f64> = HashMap::new();
219
220        for result in results {
221            let efficiency = result.metrics.calculate_efficiency();
222
223            backend_efficiency
224                .entry(result.backend_name.clone())
225                .and_modify(|e| *e += efficiency.samples_per_second)
226                .or_insert(efficiency.samples_per_second);
227
228            sampler_efficiency
229                .entry(result.sampler_name.clone())
230                .and_modify(|e| *e += efficiency.samples_per_second)
231                .or_insert(efficiency.samples_per_second);
232        }
233
234        let most_efficient_backend = backend_efficiency
235            .iter()
236            .max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
237            .map(|(k, _)| k.clone())
238            .unwrap_or_default();
239
240        let most_efficient_sampler = sampler_efficiency
241            .iter()
242            .max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
243            .map(|(k, _)| k.clone())
244            .unwrap_or_default();
245
246        // Create aggregate metrics
247        let metrics_vec: Vec<_> = results.iter().map(|r| r.metrics.clone()).collect();
248        let aggregated = aggregation::aggregate_metrics(&metrics_vec);
249
250        SummaryStatistics {
251            total_samples,
252            best_time_per_sample,
253            best_energy_found,
254            most_efficient_backend,
255            most_efficient_sampler,
256            overall_metrics: BenchmarkMetrics::new(
257                aggregated.problem_sizes.iter().sum::<usize>() / aggregated.problem_sizes.len(),
258                0.5, // Average density
259            ),
260        }
261    }
262
263    /// Analyze performance by backend
264    fn analyze_backends(results: &[BenchmarkResult]) -> HashMap<String, BackendAnalysis> {
265        let mut analysis = HashMap::new();
266
267        // Group results by backend
268        let mut by_backend: HashMap<String, Vec<&BenchmarkResult>> = HashMap::new();
269        for result in results {
270            by_backend
271                .entry(result.backend_name.clone())
272                .or_default()
273                .push(result);
274        }
275
276        for (backend_name, backend_results) in by_backend {
277            let num_benchmarks = backend_results.len();
278            let success_rate = 1.0; // All completed successfully
279
280            let avg_time_per_sample = backend_results
281                .iter()
282                .map(|r| r.metrics.timings.time_per_sample.as_secs_f64())
283                .sum::<f64>()
284                / backend_results.len() as f64;
285
286            let avg_memory_usage = backend_results
287                .iter()
288                .map(|r| r.metrics.memory.peak_memory)
289                .sum::<usize>()
290                / backend_results.len();
291
292            // Find best problem size
293            let mut size_performance: HashMap<usize, Vec<f64>> = HashMap::new();
294            for result in &backend_results {
295                let efficiency = result.metrics.calculate_efficiency();
296                size_performance
297                    .entry(result.problem_size)
298                    .or_default()
299                    .push(efficiency.samples_per_second);
300            }
301
302            let best_problem_size = size_performance
303                .iter()
304                .map(|(size, perfs)| {
305                    let avg_perf = perfs.iter().sum::<f64>() / perfs.len() as f64;
306                    (*size, avg_perf)
307                })
308                .max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
309                .map_or(0, |(size, _)| size);
310
311            // Calculate efficiency by size
312            let efficiency_by_size: HashMap<usize, f64> = size_performance
313                .iter()
314                .map(|(size, perfs)| {
315                    let avg = perfs.iter().sum::<f64>() / perfs.len() as f64;
316                    (*size, avg)
317                })
318                .collect();
319
320            // Calculate efficiency by density
321            let mut density_performance: HashMap<String, Vec<f64>> = HashMap::new();
322            for result in &backend_results {
323                let density_str = format!("{:.1}", result.problem_density);
324                let efficiency = result.metrics.calculate_efficiency();
325                density_performance
326                    .entry(density_str)
327                    .or_default()
328                    .push(efficiency.samples_per_second);
329            }
330
331            let efficiency_by_density: HashMap<String, f64> = density_performance
332                .iter()
333                .map(|(density, perfs)| {
334                    let avg = perfs.iter().sum::<f64>() / perfs.len() as f64;
335                    (density.clone(), avg)
336                })
337                .collect();
338
339            analysis.insert(
340                backend_name.clone(),
341                BackendAnalysis {
342                    name: backend_name,
343                    num_benchmarks,
344                    success_rate,
345                    avg_time_per_sample: std::time::Duration::from_secs_f64(avg_time_per_sample),
346                    avg_memory_usage,
347                    best_problem_size,
348                    efficiency_by_size,
349                    efficiency_by_density,
350                },
351            );
352        }
353
354        analysis
355    }
356
357    /// Analyze performance by sampler
358    fn analyze_samplers(results: &[BenchmarkResult]) -> HashMap<String, SamplerAnalysis> {
359        let mut analysis = HashMap::new();
360
361        // Group results by sampler
362        let mut by_sampler: HashMap<String, Vec<&BenchmarkResult>> = HashMap::new();
363        for result in results {
364            by_sampler
365                .entry(result.sampler_name.clone())
366                .or_default()
367                .push(result);
368        }
369
370        for (sampler_name, sampler_results) in by_sampler {
371            let num_benchmarks = sampler_results.len();
372
373            let avg_solution_quality = sampler_results
374                .iter()
375                .map(|r| r.metrics.quality.best_energy)
376                .sum::<f64>()
377                / sampler_results.len() as f64;
378
379            // Simple convergence rate estimate
380            let convergence_rate = sampler_results
381                .iter()
382                .filter_map(|r| r.metrics.quality.time_to_target)
383                .map(|t| 1.0 / t.as_secs_f64())
384                .sum::<f64>()
385                / sampler_results.len() as f64;
386
387            // Placeholder for best parameters
388            let best_parameters = HashMap::new();
389
390            // Performance by problem type (density)
391            let mut problem_type_performance: HashMap<String, Vec<f64>> = HashMap::new();
392            for result in &sampler_results {
393                let problem_type = if result.problem_density < 0.3 {
394                    "sparse"
395                } else if result.problem_density < 0.7 {
396                    "medium"
397                } else {
398                    "dense"
399                };
400
401                problem_type_performance
402                    .entry(problem_type.to_string())
403                    .or_default()
404                    .push(result.metrics.quality.best_energy);
405            }
406
407            let performance_by_problem_type: HashMap<String, f64> = problem_type_performance
408                .iter()
409                .map(|(ptype, energies)| {
410                    let avg = energies.iter().sum::<f64>() / energies.len() as f64;
411                    (ptype.clone(), avg)
412                })
413                .collect();
414
415            analysis.insert(
416                sampler_name.clone(),
417                SamplerAnalysis {
418                    name: sampler_name,
419                    num_benchmarks,
420                    avg_solution_quality,
421                    convergence_rate,
422                    best_parameters,
423                    performance_by_problem_type,
424                },
425            );
426        }
427
428        analysis
429    }
430
431    /// Analyze scaling behavior
432    fn analyze_scaling(results: &[BenchmarkResult]) -> ScalingAnalysis {
433        // Extract time vs problem size data
434        let mut time_data: Vec<(f64, f64)> = Vec::new();
435        let mut memory_data: Vec<(f64, f64)> = Vec::new();
436
437        for result in results {
438            let size = result.problem_size as f64;
439            let time = result.metrics.timings.compute_time.as_secs_f64();
440            let memory = result.metrics.memory.peak_memory as f64;
441
442            time_data.push((size, time));
443            memory_data.push((size, memory));
444        }
445
446        // Fit complexity models
447        let time_complexity = Self::fit_complexity_model(&time_data);
448        let memory_complexity = Self::fit_complexity_model(&memory_data);
449
450        // Calculate scaling efficiencies (simplified)
451        let weak_scaling_efficiency = 0.8; // Placeholder
452        let strong_scaling_efficiency = 0.7; // Placeholder
453
454        // Find optimal problem sizes based on efficiency
455        let mut size_efficiencies: HashMap<usize, f64> = HashMap::new();
456        for result in results {
457            let efficiency = result.metrics.calculate_efficiency();
458            size_efficiencies
459                .entry(result.problem_size)
460                .and_modify(|e| *e += efficiency.scalability_factor)
461                .or_insert(efficiency.scalability_factor);
462        }
463
464        let mut optimal_sizes: Vec<(usize, f64)> = size_efficiencies.into_iter().collect();
465        optimal_sizes.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
466        let optimal_problem_sizes: Vec<usize> = optimal_sizes
467            .into_iter()
468            .take(3)
469            .map(|(size, _)| size)
470            .collect();
471
472        ScalingAnalysis {
473            time_complexity,
474            memory_complexity,
475            weak_scaling_efficiency,
476            strong_scaling_efficiency,
477            optimal_problem_sizes,
478        }
479    }
480
481    /// Fit complexity model to data
482    fn fit_complexity_model(data: &[(f64, f64)]) -> ComplexityEstimate {
483        // Simple linear regression (in practice would fit various models)
484        if data.is_empty() {
485            return ComplexityEstimate {
486                order: "O(1)".to_string(),
487                coefficient: 0.0,
488                r_squared: 0.0,
489            };
490        }
491
492        let n = data.len() as f64;
493        let sum_x: f64 = data.iter().map(|(x, _)| x).sum();
494        let sum_y: f64 = data.iter().map(|(_, y)| y).sum();
495        let sum_xy: f64 = data.iter().map(|(x, y)| x * y).sum();
496        let sum_x2: f64 = data.iter().map(|(x, _)| x * x).sum();
497
498        let slope = n.mul_add(sum_xy, -(sum_x * sum_y)) / n.mul_add(sum_x2, -(sum_x * sum_x));
499        let intercept = slope.mul_add(-sum_x, sum_y) / n;
500
501        // Calculate R²
502        let mean_y = sum_y / n;
503        let ss_tot: f64 = data.iter().map(|(_, y)| (y - mean_y).powi(2)).sum();
504        let ss_res: f64 = data
505            .iter()
506            .map(|(x, y)| (y - (slope * x + intercept)).powi(2))
507            .sum();
508        let r_squared = 1.0 - (ss_res / ss_tot);
509
510        // Determine complexity order based on slope
511        let order = if slope < 0.1 {
512            "O(1)"
513        } else if slope < 1.5 {
514            "O(n)"
515        } else if slope < 2.5 {
516            "O(n²)"
517        } else {
518            "O(n³)"
519        }
520        .to_string();
521
522        ComplexityEstimate {
523            order,
524            coefficient: slope,
525            r_squared,
526        }
527    }
528
529    /// Comparative analysis
530    fn comparative_analysis(results: &[BenchmarkResult]) -> ComparativeAnalysis {
531        let mut speedup_matrix = HashMap::new();
532        let mut quality_comparison = HashMap::new();
533        let mut efficiency_scores: HashMap<String, f64> = HashMap::new();
534
535        // Calculate average performance for each configuration
536        let mut config_performance: HashMap<String, (f64, f64)> = HashMap::new();
537        for result in results {
538            let config = format!("{}-{}", result.backend_name, result.sampler_name);
539            let efficiency = result.metrics.calculate_efficiency();
540            let quality = result.metrics.quality.best_energy;
541
542            config_performance
543                .entry(config.clone())
544                .and_modify(|(perf, qual)| {
545                    *perf += efficiency.samples_per_second;
546                    *qual = qual.min(quality);
547                })
548                .or_insert((efficiency.samples_per_second, quality));
549
550            efficiency_scores
551                .entry(config)
552                .and_modify(|e| *e += efficiency.samples_per_second)
553                .or_insert(efficiency.samples_per_second);
554        }
555
556        // Calculate speedup matrix
557        let configs: Vec<String> = config_performance.keys().cloned().collect();
558        for config1 in &configs {
559            for config2 in &configs {
560                if let (Some((perf1, _)), Some((perf2, _))) = (
561                    config_performance.get(config1),
562                    config_performance.get(config2),
563                ) {
564                    let speedup = perf1 / perf2;
565                    speedup_matrix.insert((config1.clone(), config2.clone()), speedup);
566                }
567            }
568        }
569
570        // Quality comparison
571        for (config, (_, quality)) in &config_performance {
572            quality_comparison.insert(config.clone(), *quality);
573        }
574
575        // Efficiency ranking
576        let mut efficiency_ranking: Vec<(String, f64)> = efficiency_scores.into_iter().collect();
577        efficiency_ranking
578            .sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
579
580        // Calculate Pareto frontier
581        let mut pareto_points: Vec<ParetoPoint> = Vec::new();
582        for (config, (performance, quality)) in config_performance {
583            let quality_score = -quality; // Negative because lower is better
584            let performance_score = performance;
585
586            // Check if dominated
587            let is_dominated = pareto_points.iter().any(|p| {
588                p.quality_score >= quality_score
589                    && p.performance_score >= performance_score
590                    && (p.quality_score > quality_score || p.performance_score > performance_score)
591            });
592
593            if !is_dominated {
594                // Remove dominated points
595                pareto_points.retain(|p| {
596                    !(quality_score >= p.quality_score
597                        && performance_score >= p.performance_score
598                        && (quality_score > p.quality_score
599                            || performance_score > p.performance_score))
600                });
601
602                pareto_points.push(ParetoPoint {
603                    configuration: config,
604                    quality_score,
605                    performance_score,
606                });
607            }
608        }
609
610        ComparativeAnalysis {
611            speedup_matrix,
612            quality_comparison,
613            efficiency_ranking,
614            pareto_frontier: pareto_points,
615        }
616    }
617
618    /// Generate recommendations
619    fn generate_recommendations(
620        summary: &SummaryStatistics,
621        scaling: &ScalingAnalysis,
622    ) -> Vec<Recommendation> {
623        let mut recommendations = Vec::new();
624
625        // Configuration recommendations
626        if !scaling.optimal_problem_sizes.is_empty() {
627            recommendations.push(Recommendation {
628                category: RecommendationCategory::Configuration,
629                message: format!(
630                    "Optimal problem sizes for this system: {:?}",
631                    scaling.optimal_problem_sizes
632                ),
633                impact: ImpactLevel::High,
634                details: HashMap::new(),
635            });
636        }
637
638        // Hardware recommendations
639        if scaling.time_complexity.order.contains("³")
640            || scaling.time_complexity.order.contains("⁴")
641        {
642            recommendations.push(Recommendation {
643                category: RecommendationCategory::Hardware,
644                message: "Consider GPU acceleration for large problem instances".to_string(),
645                impact: ImpactLevel::High,
646                details: HashMap::from([(
647                    "reason".to_string(),
648                    format!("Time complexity is {}", scaling.time_complexity.order),
649                )]),
650            });
651        }
652
653        // Algorithm recommendations
654        if summary.best_energy_found > -100.0 {
655            // Arbitrary threshold
656            recommendations.push(Recommendation {
657                category: RecommendationCategory::Algorithm,
658                message: "Consider hybrid algorithms for better solution quality".to_string(),
659                impact: ImpactLevel::Medium,
660                details: HashMap::new(),
661            });
662        }
663
664        // Optimization recommendations
665        if scaling.weak_scaling_efficiency < 0.7 {
666            recommendations.push(Recommendation {
667                category: RecommendationCategory::Optimization,
668                message: "Parallel efficiency is low - consider optimizing communication patterns"
669                    .to_string(),
670                impact: ImpactLevel::High,
671                details: HashMap::from([(
672                    "efficiency".to_string(),
673                    scaling.weak_scaling_efficiency.to_string(),
674                )]),
675            });
676        }
677
678        recommendations
679    }
680
681    /// Save report to file
682    pub fn save_to_file(&self, path: &str) -> Result<(), Box<dyn std::error::Error>> {
683        let json = serde_json::to_string_pretty(self)?;
684        std::fs::write(path, json)?;
685        Ok(())
686    }
687}
688
689/// Generate human-readable summary
690impl PerformanceReport {
691    pub fn generate_summary(&self) -> String {
692        let mut summary = String::new();
693
694        summary.push_str("# Performance Benchmark Report\n\n");
695        summary.push_str(&format!("Generated: {:?}\n", self.metadata.generated_at));
696        summary.push_str(&format!(
697            "Total benchmarks: {}\n",
698            self.metadata.total_benchmarks
699        ));
700        summary.push_str(&format!("Duration: {:?}\n\n", self.metadata.total_duration));
701
702        summary.push_str("## Summary\n");
703        summary.push_str(&format!(
704            "- Best time per sample: {:?}\n",
705            self.summary.best_time_per_sample
706        ));
707        summary.push_str(&format!(
708            "- Best energy found: {:.4}\n",
709            self.summary.best_energy_found
710        ));
711        summary.push_str(&format!(
712            "- Most efficient backend: {}\n",
713            self.summary.most_efficient_backend
714        ));
715        summary.push_str(&format!(
716            "- Most efficient sampler: {}\n\n",
717            self.summary.most_efficient_sampler
718        ));
719
720        summary.push_str("## Recommendations\n");
721        for rec in &self.recommendations {
722            summary.push_str(&format!(
723                "- [{}] {}\n",
724                match rec.impact {
725                    ImpactLevel::High => "HIGH",
726                    ImpactLevel::Medium => "MEDIUM",
727                    ImpactLevel::Low => "LOW",
728                },
729                rec.message
730            ));
731        }
732
733        summary
734    }
735}