scirs2_optimize/
benchmarking.rs

1//! Comprehensive benchmarking system for optimization algorithms
2//!
3//! This module provides a complete benchmarking suite for comparing different
4//! optimization algorithms across various test problems, metrics, and scenarios.
5
6use crate::error::ScirsResult;
7use scirs2_core::ndarray::{Array1, ArrayView1};
8use std::collections::HashMap;
9use std::path::Path;
10use std::time::{Duration, Instant};
11
12use crate::result::OptimizeResults;
13use crate::visualization::{OptimizationTrajectory, OptimizationVisualizer};
14
15/// Standard test functions for optimization benchmarking
16pub mod test_functions {
17    use super::*;
18
19    /// Rosenbrock function (classic unconstrained optimization test)
20    pub fn rosenbrock(x: &ArrayView1<f64>) -> f64 {
21        let n = x.len();
22        let mut sum = 0.0;
23        for i in 0..(n - 1) {
24            let term1 = x[i + 1] - x[i].powi(2);
25            let term2 = 1.0 - x[i];
26            sum += 100.0 * term1.powi(2) + term2.powi(2);
27        }
28        sum
29    }
30
31    /// Sphere function (simple convex test)
32    pub fn sphere(x: &ArrayView1<f64>) -> f64 {
33        x.iter().map(|&xi| xi.powi(2)).sum()
34    }
35
36    /// Rastrigin function (highly multimodal)
37    pub fn rastrigin(x: &ArrayView1<f64>) -> f64 {
38        let a = 10.0;
39        let n = x.len() as f64;
40        a * n
41            + x.iter()
42                .map(|&xi| xi.powi(2) - a * (2.0 * std::f64::consts::PI * xi).cos())
43                .sum::<f64>()
44    }
45
46    /// Ackley function (multimodal with global structure)
47    pub fn ackley(x: &ArrayView1<f64>) -> f64 {
48        let n = x.len() as f64;
49        let sum_sq = x.iter().map(|&xi| xi.powi(2)).sum::<f64>();
50        let sum_cos = x
51            .iter()
52            .map(|&xi| (2.0 * std::f64::consts::PI * xi).cos())
53            .sum::<f64>();
54
55        -20.0 * (-0.2 * (sum_sq / n).sqrt()).exp() - (sum_cos / n).exp()
56            + 20.0
57            + std::f64::consts::E
58    }
59
60    /// Griewank function (multimodal with product term)
61    pub fn griewank(x: &ArrayView1<f64>) -> f64 {
62        let sum_term = x.iter().map(|&xi| xi.powi(2)).sum::<f64>() / 4000.0;
63        let prod_term = x
64            .iter()
65            .enumerate()
66            .map(|(i, &xi)| (xi / ((i + 1) as f64).sqrt()).cos())
67            .product::<f64>();
68
69        sum_term - prod_term + 1.0
70    }
71
72    /// Levy function (multimodal with variable transformation)
73    pub fn levy(x: &ArrayView1<f64>) -> f64 {
74        let w: Vec<f64> = x.iter().map(|&xi| 1.0 + (xi - 1.0) / 4.0).collect();
75        let n = w.len();
76
77        let term1 = (std::f64::consts::PI * w[0]).sin().powi(2);
78        let term2 = (0..(n - 1))
79            .map(|i| {
80                (w[i] - 1.0).powi(2)
81                    * (1.0 + 10.0 * (std::f64::consts::PI * w[i + 1]).sin().powi(2))
82            })
83            .sum::<f64>();
84        let term3 = (w[n - 1] - 1.0).powi(2)
85            * (1.0 + (2.0 * std::f64::consts::PI * w[n - 1]).sin().powi(2));
86
87        term1 + term2 + term3
88    }
89
90    /// Schwefel function (multimodal with shifted optimum)
91    pub fn schwefel(x: &ArrayView1<f64>) -> f64 {
92        let n = x.len() as f64;
93        418.9829 * n
94            - x.iter()
95                .map(|&xi| xi * (xi.abs().sqrt()).sin())
96                .sum::<f64>()
97    }
98
99    /// Get bounds for a test function
100    pub fn get_bounds(function_name: &str, dimensions: usize) -> Vec<(f64, f64)> {
101        match function_name {
102            "rosenbrock" => vec![(-5.0, 5.0); dimensions],
103            "sphere" => vec![(-5.12, 5.12); dimensions],
104            "rastrigin" => vec![(-5.12, 5.12); dimensions],
105            "ackley" => vec![(-32.768, 32.768); dimensions],
106            "griewank" => vec![(-600.0, 600.0); dimensions],
107            "levy" => vec![(-10.0, 10.0); dimensions],
108            "schwefel" => vec![(-500.0, 500.0); dimensions],
109            _ => vec![(-10.0, 10.0); dimensions],
110        }
111    }
112
113    /// Get global optimum for a test function
114    pub fn get_global_optimum(function_name: &str, dimensions: usize) -> (Array1<f64>, f64) {
115        match function_name {
116            "rosenbrock" => (Array1::ones(dimensions), 0.0),
117            "sphere" => (Array1::zeros(dimensions), 0.0),
118            "rastrigin" => (Array1::zeros(dimensions), 0.0),
119            "ackley" => (Array1::zeros(dimensions), 0.0),
120            "griewank" => (Array1::zeros(dimensions), 0.0),
121            "levy" => (Array1::ones(dimensions), 0.0),
122            "schwefel" => (Array1::from_elem(dimensions, 420.9687), 0.0),
123            _ => (Array1::zeros(dimensions), 0.0),
124        }
125    }
126}
127
128/// Test problem definition
129#[derive(Debug, Clone)]
130pub struct TestProblem {
131    /// Name of the test function
132    pub name: String,
133    /// Function to optimize
134    pub function: fn(&ArrayView1<f64>) -> f64,
135    /// Problem dimensions
136    pub dimensions: usize,
137    /// Variable bounds
138    pub bounds: Vec<(f64, f64)>,
139    /// Known global optimum location
140    pub global_optimum: Array1<f64>,
141    /// Known global optimum value
142    pub global_minimum: f64,
143    /// Problem characteristics
144    pub characteristics: ProblemCharacteristics,
145}
146
147impl TestProblem {
148    /// Create a new test problem
149    pub fn new(name: &str, dimensions: usize) -> Self {
150        let function = match name {
151            "rosenbrock" => test_functions::rosenbrock,
152            "sphere" => test_functions::sphere,
153            "rastrigin" => test_functions::rastrigin,
154            "ackley" => test_functions::ackley,
155            "griewank" => test_functions::griewank,
156            "levy" => test_functions::levy,
157            "schwefel" => test_functions::schwefel,
158            _ => test_functions::sphere,
159        };
160
161        let bounds = test_functions::get_bounds(name, dimensions);
162        let (global_optimum, global_minimum) = test_functions::get_global_optimum(name, dimensions);
163        let characteristics = ProblemCharacteristics::from_function_name(name);
164
165        Self {
166            name: name.to_string(),
167            function,
168            dimensions,
169            bounds,
170            global_optimum,
171            global_minimum,
172            characteristics,
173        }
174    }
175
176    /// Evaluate the function at a point
177    pub fn evaluate(&self, x: &ArrayView1<f64>) -> f64 {
178        (self.function)(x)
179    }
180
181    /// Generate random starting points for the problem
182    pub fn generate_starting_points(&self, count: usize) -> ScirsResult<Vec<Array1<f64>>> {
183        use scirs2_core::random::{thread_rng, Rng};
184        let mut rng = thread_rng();
185        let mut points = Vec::with_capacity(count);
186
187        for _ in 0..count {
188            let mut point = Array1::zeros(self.dimensions);
189            for (i, &(low, high)) in self.bounds.iter().enumerate() {
190                point[i] = rng.gen_range(low..=high);
191            }
192            points.push(point);
193        }
194
195        Ok(points)
196    }
197}
198
199/// Problem characteristics for categorization
200#[derive(Debug, Clone)]
201pub struct ProblemCharacteristics {
202    /// Whether the function is multimodal
203    pub multimodal: bool,
204    /// Whether the function is separable
205    pub separable: bool,
206    /// Whether the function is convex
207    pub convex: bool,
208    /// Estimated difficulty level (1-5)
209    pub difficulty: u8,
210}
211
212impl ProblemCharacteristics {
213    fn from_function_name(name: &str) -> Self {
214        match name {
215            "sphere" => Self {
216                multimodal: false,
217                separable: true,
218                convex: true,
219                difficulty: 1,
220            },
221            "rosenbrock" => Self {
222                multimodal: false,
223                separable: false,
224                convex: false,
225                difficulty: 3,
226            },
227            "rastrigin" => Self {
228                multimodal: true,
229                separable: true,
230                convex: false,
231                difficulty: 4,
232            },
233            "ackley" => Self {
234                multimodal: true,
235                separable: false,
236                convex: false,
237                difficulty: 4,
238            },
239            "griewank" => Self {
240                multimodal: true,
241                separable: false,
242                convex: false,
243                difficulty: 4,
244            },
245            "levy" => Self {
246                multimodal: true,
247                separable: false,
248                convex: false,
249                difficulty: 4,
250            },
251            "schwefel" => Self {
252                multimodal: true,
253                separable: true,
254                convex: false,
255                difficulty: 5,
256            },
257            _ => Self {
258                multimodal: false,
259                separable: true,
260                convex: true,
261                difficulty: 1,
262            },
263        }
264    }
265}
266
267/// Benchmark configuration
268#[derive(Debug, Clone)]
269pub struct BenchmarkConfig {
270    /// Test problems to include
271    pub test_problems: Vec<String>,
272    /// Problem dimensions to test
273    pub dimensions: Vec<usize>,
274    /// Number of independent runs per problem
275    pub runs_per_problem: usize,
276    /// Maximum number of function evaluations
277    pub max_function_evaluations: usize,
278    /// Maximum optimization time per run
279    pub max_time: Duration,
280    /// Target accuracy for success criteria
281    pub target_accuracy: f64,
282    /// Whether to enable detailed logging
283    pub detailed_logging: bool,
284    /// Whether to save optimization trajectories
285    pub save_trajectories: bool,
286    /// Output directory for results
287    pub output_directory: String,
288}
289
290impl Default for BenchmarkConfig {
291    fn default() -> Self {
292        Self {
293            test_problems: vec![
294                "sphere".to_string(),
295                "rosenbrock".to_string(),
296                "rastrigin".to_string(),
297                "ackley".to_string(),
298            ],
299            dimensions: vec![2, 5, 10, 20],
300            runs_per_problem: 30,
301            max_function_evaluations: 10000,
302            max_time: Duration::from_secs(300), // 5 minutes
303            target_accuracy: 1e-6,
304            detailed_logging: true,
305            save_trajectories: false,
306            output_directory: "benchmark_results".to_string(),
307        }
308    }
309}
310
311/// Benchmark results for a single run
312#[derive(Debug, Clone)]
313pub struct BenchmarkRun {
314    /// Problem that was solved
315    pub problem_name: String,
316    /// Problem dimensions
317    pub dimensions: usize,
318    /// Run number
319    pub run_id: usize,
320    /// Optimization algorithm used
321    pub algorithm: String,
322    /// Optimization results
323    pub results: OptimizeResults<f64>,
324    /// Runtime statistics
325    pub runtime_stats: RuntimeStats,
326    /// Distance to global optimum
327    pub distance_to_optimum: f64,
328    /// Whether the run was successful (reached target accuracy)
329    pub success: bool,
330    /// Optimization trajectory (if saved)
331    pub trajectory: Option<OptimizationTrajectory>,
332}
333
334/// Runtime statistics for a benchmark run
335#[derive(Debug, Clone)]
336pub struct RuntimeStats {
337    /// Total wall clock time
338    pub total_time: Duration,
339    /// Time per function evaluation
340    pub time_per_evaluation: Duration,
341    /// Peak memory usage (in bytes)
342    pub peak_memory: usize,
343    /// Number of convergence checks
344    pub convergence_checks: usize,
345}
346
347/// Aggregated benchmark results
348#[derive(Debug, Clone)]
349pub struct BenchmarkResults {
350    /// Configuration used for benchmarking
351    pub config: BenchmarkConfig,
352    /// Individual run results
353    pub runs: Vec<BenchmarkRun>,
354    /// Statistical summary
355    pub summary: BenchmarkSummary,
356    /// Performance rankings
357    pub rankings: HashMap<String, AlgorithmRanking>,
358}
359
360impl BenchmarkResults {
361    /// Generate comprehensive benchmark report
362    pub fn generate_report(&self) -> ScirsResult<String> {
363        let mut report = String::from("Optimization Algorithm Benchmark Report\n");
364        report.push_str("======================================\n\n");
365
366        // Configuration summary
367        report.push_str("Benchmark Configuration:\n");
368        report.push_str(&format!(
369            "  Test Problems: {:?}\n",
370            self.config.test_problems
371        ));
372        report.push_str(&format!("  Dimensions: {:?}\n", self.config.dimensions));
373        report.push_str(&format!(
374            "  Runs per Problem: {}\n",
375            self.config.runs_per_problem
376        ));
377        report.push_str(&format!(
378            "  Max Function Evaluations: {}\n",
379            self.config.max_function_evaluations
380        ));
381        report.push_str(&format!(
382            "  Target Accuracy: {:.2e}\n",
383            self.config.target_accuracy
384        ));
385        report.push_str("\n");
386
387        // Overall summary
388        report.push_str("Overall Summary:\n");
389        report.push_str(&format!("  Total Runs: {}\n", self.runs.len()));
390        report.push_str(&format!(
391            "  Successful Runs: {}\n",
392            self.summary.successful_runs
393        ));
394        report.push_str(&format!(
395            "  Success Rate: {:.1}%\n",
396            self.summary.overall_success_rate * 100.0
397        ));
398        report.push_str(&format!(
399            "  Average Runtime: {:.3}s\n",
400            self.summary.average_runtime.as_secs_f64()
401        ));
402        report.push_str("\n");
403
404        // Algorithm rankings
405        report.push_str("Algorithm Rankings:\n");
406        let mut ranked_algorithms: Vec<_> = self.rankings.iter().collect();
407        ranked_algorithms.sort_by(|a, b| {
408            a.1.overall_score
409                .partial_cmp(&b.1.overall_score)
410                .unwrap()
411                .reverse()
412        });
413
414        for (i, (algorithm, ranking)) in ranked_algorithms.iter().enumerate() {
415            report.push_str(&format!(
416                "  {}. {} (Score: {:.3})\n",
417                i + 1,
418                algorithm,
419                ranking.overall_score
420            ));
421            report.push_str(&format!(
422                "     Success Rate: {:.1}%, Avg Runtime: {:.3}s\n",
423                ranking.success_rate * 100.0,
424                ranking.average_runtime.as_secs_f64()
425            ));
426        }
427        report.push_str("\n");
428
429        // Problem-specific results
430        report.push_str("Problem-Specific Results:\n");
431        for problem in &self.config.test_problems {
432            report.push_str(&format!("  {}:\n", problem));
433
434            let problem_runs: Vec<_> = self
435                .runs
436                .iter()
437                .filter(|run| run.problem_name == *problem)
438                .collect();
439
440            if !problem_runs.is_empty() {
441                let success_count = problem_runs.iter().filter(|run| run.success).count();
442                let success_rate = success_count as f64 / problem_runs.len() as f64;
443                let avg_distance = problem_runs
444                    .iter()
445                    .map(|run| run.distance_to_optimum)
446                    .sum::<f64>()
447                    / problem_runs.len() as f64;
448
449                report.push_str(&format!("    Success Rate: {:.1}%\n", success_rate * 100.0));
450                report.push_str(&format!(
451                    "    Avg Distance to Optimum: {:.6e}\n",
452                    avg_distance
453                ));
454            }
455        }
456
457        Ok(report)
458    }
459
460    /// Save results to files
461    pub fn save_results(&self, output_dir: &Path) -> ScirsResult<()> {
462        std::fs::create_dir_all(output_dir)?;
463
464        // Save summary report
465        let report = self.generate_report()?;
466        let report_path = output_dir.join("benchmark_report.txt");
467        std::fs::write(report_path, report)?;
468
469        // Save detailed results as CSV
470        self.save_csv_results(output_dir)?;
471
472        // Generate visualizations
473        if self.config.save_trajectories {
474            self.generate_visualizations(output_dir)?;
475        }
476
477        Ok(())
478    }
479
480    /// Save results in CSV format
481    fn save_csv_results(&self, output_dir: &Path) -> ScirsResult<()> {
482        let csv_path = output_dir.join("benchmark_results.csv");
483        let mut csv_content = String::from("problem,dimensions,run_id,algorithm,success,final_value,function_evaluations,runtime_ms,distance_to_optimum\n");
484
485        for run in &self.runs {
486            csv_content.push_str(&format!(
487                "{},{},{},{},{},{:.6e},{},{},{:.6e}\n",
488                run.problem_name,
489                run.dimensions,
490                run.run_id,
491                run.algorithm,
492                run.success,
493                run.results.fun,
494                run.results.nfev,
495                run.runtime_stats.total_time.as_millis(),
496                run.distance_to_optimum
497            ));
498        }
499
500        std::fs::write(csv_path, csv_content)?;
501        Ok(())
502    }
503
504    /// Generate visualization plots
505    fn generate_visualizations(&self, output_dir: &Path) -> ScirsResult<()> {
506        let viz_dir = output_dir.join("visualizations");
507        std::fs::create_dir_all(&viz_dir)?;
508
509        let visualizer = OptimizationVisualizer::new();
510
511        // Generate convergence plots for each run with trajectory
512        for run in &self.runs {
513            if let Some(ref trajectory) = run.trajectory {
514                let plot_path = viz_dir.join(format!(
515                    "{}_{}_{}_{}.svg",
516                    run.problem_name, run.dimensions, run.algorithm, run.run_id
517                ));
518                visualizer.plot_convergence(trajectory, &plot_path)?;
519            }
520        }
521
522        Ok(())
523    }
524}
525
526/// Statistical summary of benchmark results
527#[derive(Debug, Clone)]
528pub struct BenchmarkSummary {
529    /// Total number of successful runs
530    pub successful_runs: usize,
531    /// Overall success rate across all problems
532    pub overall_success_rate: f64,
533    /// Average runtime across all runs
534    pub average_runtime: Duration,
535    /// Standard deviation of runtime
536    pub runtime_std: Duration,
537    /// Average function evaluations
538    pub average_function_evaluations: f64,
539    /// Best achieved distance to optimum
540    pub best_distance_to_optimum: f64,
541    /// Worst achieved distance to optimum
542    pub worst_distance_to_optimum: f64,
543}
544
545/// Algorithm ranking information
546#[derive(Debug, Clone)]
547pub struct AlgorithmRanking {
548    /// Algorithm name
549    pub algorithm: String,
550    /// Overall performance score (higher is better)
551    pub overall_score: f64,
552    /// Success rate
553    pub success_rate: f64,
554    /// Average runtime
555    pub average_runtime: Duration,
556    /// Average distance to optimum
557    pub average_distance: f64,
558    /// Ranking on each problem type
559    pub problem_rankings: HashMap<String, f64>,
560}
561
562/// Main benchmarking system
563pub struct BenchmarkSystem {
564    config: BenchmarkConfig,
565    test_problems: Vec<TestProblem>,
566}
567
568impl BenchmarkSystem {
569    /// Create a new benchmark system
570    pub fn new(config: BenchmarkConfig) -> Self {
571        let mut test_problems = Vec::new();
572
573        for problem_name in &config.test_problems {
574            for &dim in &config.dimensions {
575                test_problems.push(TestProblem::new(problem_name, dim));
576            }
577        }
578
579        Self {
580            config,
581            test_problems,
582        }
583    }
584
585    /// Run benchmark for a specific algorithm
586    pub fn benchmark_algorithm<F>(
587        &self,
588        algorithm_name: &str,
589        optimize_fn: F,
590    ) -> ScirsResult<BenchmarkResults>
591    where
592        F: Fn(&TestProblem, &Array1<f64>) -> ScirsResult<OptimizeResults<f64>> + Clone,
593    {
594        let mut runs = Vec::new();
595
596        for problem in &self.test_problems {
597            println!(
598                "Benchmarking {} on {} ({}D)",
599                algorithm_name, problem.name, problem.dimensions
600            );
601
602            let starting_points = problem.generate_starting_points(self.config.runs_per_problem)?;
603
604            for (run_id, start_point) in starting_points.iter().enumerate() {
605                let start_time = Instant::now();
606
607                // Run optimization
608                let result = optimize_fn(problem, start_point);
609
610                let runtime = start_time.elapsed();
611
612                match result {
613                    Ok(opt_result) => {
614                        // Calculate distance to global optimum
615                        let distance = (&opt_result.x - &problem.global_optimum)
616                            .iter()
617                            .map(|&x| x * x)
618                            .sum::<f64>()
619                            .sqrt();
620
621                        // Check if run was successful
622                        let success = distance < self.config.target_accuracy;
623
624                        let runtime_stats = RuntimeStats {
625                            total_time: runtime,
626                            time_per_evaluation: runtime / opt_result.nfev.max(1) as u32,
627                            peak_memory: 0, // Would need system monitoring
628                            convergence_checks: opt_result.nit,
629                        };
630
631                        runs.push(BenchmarkRun {
632                            problem_name: problem.name.clone(),
633                            dimensions: problem.dimensions,
634                            run_id,
635                            algorithm: algorithm_name.to_string(),
636                            results: opt_result,
637                            runtime_stats,
638                            distance_to_optimum: distance,
639                            success,
640                            trajectory: None, // Would need to be provided by algorithm
641                        });
642                    }
643                    Err(e) => {
644                        // Record failed run
645                        let runtime_stats = RuntimeStats {
646                            total_time: runtime,
647                            time_per_evaluation: Duration::from_secs(0),
648                            peak_memory: 0,
649                            convergence_checks: 0,
650                        };
651
652                        runs.push(BenchmarkRun {
653                            problem_name: problem.name.clone(),
654                            dimensions: problem.dimensions,
655                            run_id,
656                            algorithm: algorithm_name.to_string(),
657                            results: OptimizeResults::<f64> {
658                                x: start_point.clone(),
659                                fun: f64::INFINITY,
660                                success: false,
661                                message: format!("Error: {}", e),
662                                nit: 0,
663                                nfev: 0,
664                                ..OptimizeResults::default()
665                            },
666                            runtime_stats,
667                            distance_to_optimum: f64::INFINITY,
668                            success: false,
669                            trajectory: None,
670                        });
671                    }
672                }
673            }
674        }
675
676        // Compute summary statistics
677        let summary = self.compute_summary(&runs);
678        let mut rankings = HashMap::new();
679        rankings.insert(
680            algorithm_name.to_string(),
681            self.compute_ranking(algorithm_name, &runs),
682        );
683
684        Ok(BenchmarkResults {
685            config: self.config.clone(),
686            runs,
687            summary,
688            rankings,
689        })
690    }
691
692    /// Compute summary statistics
693    fn compute_summary(&self, runs: &[BenchmarkRun]) -> BenchmarkSummary {
694        let successful_runs = runs.iter().filter(|run| run.success).count();
695        let overall_success_rate = successful_runs as f64 / runs.len() as f64;
696
697        let total_runtime: Duration = runs.iter().map(|run| run.runtime_stats.total_time).sum();
698        let average_runtime = total_runtime / runs.len() as u32;
699
700        let average_function_evaluations =
701            runs.iter().map(|run| run.results.nfev as f64).sum::<f64>() / runs.len() as f64;
702
703        let distances: Vec<f64> = runs
704            .iter()
705            .filter(|run| run.distance_to_optimum.is_finite())
706            .map(|run| run.distance_to_optimum)
707            .collect();
708
709        let best_distance_to_optimum = distances.iter().cloned().fold(f64::INFINITY, f64::min);
710        let worst_distance_to_optimum = distances.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
711
712        // Compute runtime standard deviation
713        let mean_runtime_ms = average_runtime.as_millis() as f64;
714        let variance = runs
715            .iter()
716            .map(|run| {
717                let diff = run.runtime_stats.total_time.as_millis() as f64 - mean_runtime_ms;
718                diff * diff
719            })
720            .sum::<f64>()
721            / runs.len() as f64;
722        let runtime_std = Duration::from_millis(variance.sqrt() as u64);
723
724        BenchmarkSummary {
725            successful_runs,
726            overall_success_rate,
727            average_runtime,
728            runtime_std,
729            average_function_evaluations,
730            best_distance_to_optimum,
731            worst_distance_to_optimum,
732        }
733    }
734
735    /// Compute algorithm ranking
736    fn compute_ranking(&self, algorithm: &str, runs: &[BenchmarkRun]) -> AlgorithmRanking {
737        let successful_runs = runs.iter().filter(|run| run.success).count();
738        let success_rate = successful_runs as f64 / runs.len() as f64;
739
740        let total_runtime: Duration = runs.iter().map(|run| run.runtime_stats.total_time).sum();
741        let average_runtime = total_runtime / runs.len() as u32;
742
743        let finite_distances: Vec<f64> = runs
744            .iter()
745            .filter(|run| run.distance_to_optimum.is_finite())
746            .map(|run| run.distance_to_optimum)
747            .collect();
748
749        let average_distance = if finite_distances.is_empty() {
750            f64::INFINITY
751        } else {
752            finite_distances.iter().sum::<f64>() / finite_distances.len() as f64
753        };
754
755        // Compute overall score (higher is better)
756        let runtime_score = 1.0 / (average_runtime.as_secs_f64() + 1e-6);
757        let accuracy_score = 1.0 / (average_distance + 1e-6);
758        let overall_score = success_rate * (runtime_score + accuracy_score) / 2.0;
759
760        // Compute problem-specific rankings
761        let mut problem_rankings = HashMap::new();
762        for problem_name in &self.config.test_problems {
763            let problem_runs: Vec<_> = runs
764                .iter()
765                .filter(|run| run.problem_name == *problem_name)
766                .collect();
767
768            if !problem_runs.is_empty() {
769                let problem_success = problem_runs.iter().filter(|run| run.success).count() as f64
770                    / problem_runs.len() as f64;
771                problem_rankings.insert(problem_name.clone(), problem_success);
772            }
773        }
774
775        AlgorithmRanking {
776            algorithm: algorithm.to_string(),
777            overall_score,
778            success_rate,
779            average_runtime,
780            average_distance,
781            problem_rankings,
782        }
783    }
784}
785
786/// Predefined benchmark suites
787pub mod benchmark_suites {
788    use super::*;
789
790    /// Create a quick benchmark for algorithm development
791    pub fn quick_benchmark() -> BenchmarkConfig {
792        BenchmarkConfig {
793            test_problems: vec!["sphere".to_string(), "rosenbrock".to_string()],
794            dimensions: vec![2, 5],
795            runs_per_problem: 5,
796            max_function_evaluations: 1000,
797            max_time: Duration::from_secs(30),
798            target_accuracy: 1e-3,
799            detailed_logging: false,
800            save_trajectories: false,
801            output_directory: "quick_benchmark".to_string(),
802        }
803    }
804
805    /// Create a comprehensive benchmark for publication
806    pub fn comprehensive_benchmark() -> BenchmarkConfig {
807        BenchmarkConfig {
808            test_problems: vec![
809                "sphere".to_string(),
810                "rosenbrock".to_string(),
811                "rastrigin".to_string(),
812                "ackley".to_string(),
813                "griewank".to_string(),
814                "levy".to_string(),
815                "schwefel".to_string(),
816            ],
817            dimensions: vec![2, 5, 10, 20, 50],
818            runs_per_problem: 50,
819            max_function_evaluations: 100000,
820            max_time: Duration::from_secs(3600), // 1 hour
821            target_accuracy: 1e-8,
822            detailed_logging: true,
823            save_trajectories: true,
824            output_directory: "comprehensive_benchmark".to_string(),
825        }
826    }
827
828    /// Create a scalability benchmark for large dimensions
829    pub fn scalability_benchmark() -> BenchmarkConfig {
830        BenchmarkConfig {
831            test_problems: vec!["sphere".to_string(), "rastrigin".to_string()],
832            dimensions: vec![10, 50, 100, 500, 1000],
833            runs_per_problem: 20,
834            max_function_evaluations: 1000000,
835            max_time: Duration::from_secs(1800), // 30 minutes
836            target_accuracy: 1e-6,
837            detailed_logging: true,
838            save_trajectories: false,
839            output_directory: "scalability_benchmark".to_string(),
840        }
841    }
842}
843
844#[cfg(test)]
845mod tests {
846    use super::*;
847    use scirs2_core::ndarray::array;
848
849    #[test]
850    fn test_test_functions() {
851        let x = array![0.0, 0.0];
852
853        // Test that global optima are correct
854        assert!((test_functions::sphere(&x.view()) - 0.0).abs() < 1e-10);
855        assert!((test_functions::rastrigin(&x.view()) - 0.0).abs() < 1e-10);
856        assert!((test_functions::ackley(&x.view()) - 0.0).abs() < 1e-10);
857        assert!((test_functions::griewank(&x.view()) - 0.0).abs() < 1e-10);
858
859        let x_ones = array![1.0, 1.0];
860        assert!((test_functions::rosenbrock(&x_ones.view()) - 0.0).abs() < 1e-10);
861        assert!((test_functions::levy(&x_ones.view()) - 0.0).abs() < 1e-10);
862    }
863
864    #[test]
865    fn test_problem_creation() {
866        let problem = TestProblem::new("rosenbrock", 2);
867        assert_eq!(problem.name, "rosenbrock");
868        assert_eq!(problem.dimensions, 2);
869        assert_eq!(problem.bounds.len(), 2);
870        assert_eq!(problem.global_optimum.len(), 2);
871        assert!(problem.characteristics.separable == false);
872    }
873
874    #[test]
875    #[ignore = "timeout"]
876    fn test_benchmark_config() {
877        let config = BenchmarkConfig::default();
878        assert!(config.test_problems.contains(&"sphere".to_string()));
879        assert!(config.dimensions.contains(&2));
880        assert_eq!(config.runs_per_problem, 30);
881    }
882
883    #[test]
884    #[ignore = "timeout"]
885    fn test_benchmark_suites() {
886        let quick = benchmark_suites::quick_benchmark();
887        assert_eq!(quick.runs_per_problem, 5);
888        assert_eq!(quick.max_function_evaluations, 1000);
889
890        let comprehensive = benchmark_suites::comprehensive_benchmark();
891        assert_eq!(comprehensive.runs_per_problem, 50);
892        assert!(comprehensive.test_problems.len() >= 5);
893
894        let scalability = benchmark_suites::scalability_benchmark();
895        assert!(scalability.dimensions.contains(&1000));
896    }
897}