Skip to main content

sklears_model_selection/
multi_fidelity_optimization.rs

1//! Multi-Fidelity Bayesian Optimization
2//!
3//! This module provides multi-fidelity Bayesian optimization for efficient hyperparameter tuning
4//! by leveraging multiple approximation levels (fidelities) of the objective function.
5//! Lower fidelity evaluations are cheaper but less accurate, while higher fidelity evaluations
6//! are more expensive but more accurate.
7
8use scirs2_core::ndarray::Array1;
9use scirs2_core::random::rngs::StdRng;
10use scirs2_core::random::SeedableRng;
11use scirs2_core::RngExt;
12use sklears_core::types::Float;
13use std::collections::HashMap;
14
15/// Fidelity levels for multi-fidelity optimization
16#[derive(Debug, Clone)]
17pub enum FidelityLevel {
18    /// Low fidelity (fast, less accurate)
19    Low {
20        sample_fraction: Float,
21
22        epochs_fraction: Float,
23
24        cv_folds: usize,
25    },
26    /// Medium fidelity (moderate speed and accuracy)
27    Medium {
28        sample_fraction: Float,
29
30        epochs_fraction: Float,
31        cv_folds: usize,
32    },
33    /// High fidelity (slow, most accurate)
34    High {
35        sample_fraction: Float,
36        epochs_fraction: Float,
37        cv_folds: usize,
38    },
39    /// Custom fidelity with user-defined parameters
40    Custom {
41        parameters: HashMap<String, Float>,
42        relative_cost: Float,
43        accuracy_estimate: Float,
44    },
45}
46
47/// Multi-fidelity optimization strategies
48#[derive(Debug, Clone)]
49pub enum MultiFidelityStrategy {
50    /// Successive Halving with multiple fidelities
51    SuccessiveHalving {
52        eta: Float,
53
54        min_fidelity: FidelityLevel,
55
56        max_fidelity: FidelityLevel,
57    },
58    /// Multi-fidelity Bayesian Optimization (MFBO)
59    BayesianOptimization {
60        acquisition_function: AcquisitionFunction,
61
62        fidelity_selection: FidelitySelectionMethod,
63        correlation_model: CorrelationModel,
64    },
65    /// Hyperband with multi-fidelity
66    Hyperband {
67        max_budget: Float,
68        eta: Float,
69        fidelities: Vec<FidelityLevel>,
70    },
71    /// BOHB (Bayesian Optimization and Hyperband)
72    BOHB {
73        min_budget: Float,
74        max_budget: Float,
75        eta: Float,
76        bandwidth_factor: Float,
77    },
78    /// Fabolas (Fast Bayesian Optimization on Large Datasets)
79    Fabolas {
80        min_dataset_fraction: Float,
81        max_dataset_fraction: Float,
82        cost_model: CostModel,
83    },
84    /// Multi-Task Gaussian Process
85    MultiTaskGP {
86        task_similarity: Float,
87        shared_hyperparameters: Vec<String>,
88    },
89}
90
91/// Acquisition functions for multi-fidelity optimization
92#[derive(Debug, Clone)]
93pub enum AcquisitionFunction {
94    /// Expected Improvement with fidelity consideration
95    ExpectedImprovement,
96    /// Upper Confidence Bound with fidelity adjustment
97    UpperConfidenceBound { beta: Float },
98    /// Probability of Improvement
99    ProbabilityOfImprovement,
100    /// Knowledge Gradient
101    KnowledgeGradient,
102    /// Entropy Search
103    EntropySearch,
104    /// Multi-fidelity Expected Improvement
105    MultiFidelityEI { fidelity_weight: Float },
106}
107
108/// Methods for selecting fidelity levels
109#[derive(Debug, Clone)]
110pub enum FidelitySelectionMethod {
111    /// Always start with lowest fidelity
112    LowestFirst,
113    /// Dynamic selection based on uncertainty
114    UncertaintyBased { threshold: Float },
115    /// Cost-aware selection
116    CostAware { budget_fraction: Float },
117    /// Performance-based selection
118    PerformanceBased { improvement_threshold: Float },
119    /// Information-theoretic selection
120    InformationTheoretic,
121}
122
123/// Models for correlation between fidelities
124#[derive(Debug, Clone)]
125pub enum CorrelationModel {
126    /// Linear correlation between fidelities
127    Linear { correlation_strength: Float },
128    /// Exponential correlation
129    Exponential { decay_rate: Float },
130    /// Learned correlation using Gaussian Process
131    GaussianProcess { kernel_type: String },
132    /// Rank correlation
133    RankCorrelation,
134}
135
136/// Cost models for different fidelity levels
137#[derive(Debug, Clone)]
138pub enum CostModel {
139    /// Polynomial cost model
140    Polynomial {
141        degree: usize,
142
143        coefficients: Vec<Float>,
144    },
145    /// Exponential cost model
146    Exponential { base: Float, scale: Float },
147    /// Linear cost model
148    Linear { slope: Float, intercept: Float },
149    /// Custom cost function
150    Custom { cost_function: String },
151}
152
153/// Multi-fidelity optimization configuration
154#[derive(Debug, Clone)]
155pub struct MultiFidelityConfig {
156    pub strategy: MultiFidelityStrategy,
157    pub max_evaluations: usize,
158    pub max_budget: Float,
159    pub early_stopping_patience: usize,
160    pub fidelity_progression: FidelityProgression,
161    pub random_state: Option<u64>,
162    pub parallel_evaluations: usize,
163}
164
165/// Fidelity progression strategies
166#[derive(Debug, Clone)]
167pub enum FidelityProgression {
168    /// Linear progression from low to high fidelity
169    Linear,
170    /// Exponential progression
171    Exponential { growth_rate: Float },
172    /// Adaptive progression based on performance
173    Adaptive { adaptation_rate: Float },
174    /// Conservative progression (slow increase)
175    Conservative,
176    /// Aggressive progression (fast increase)
177    Aggressive,
178}
179
180/// Evaluation result at a specific fidelity
181#[derive(Debug, Clone)]
182pub struct FidelityEvaluation {
183    pub hyperparameters: HashMap<String, Float>,
184    pub fidelity: FidelityLevel,
185    pub score: Float,
186    pub cost: Float,
187    pub evaluation_time: Float,
188    pub uncertainty: Option<Float>,
189    pub additional_metrics: HashMap<String, Float>,
190}
191
192/// Multi-fidelity optimization result
193#[derive(Debug, Clone)]
194pub struct MultiFidelityResult {
195    pub best_hyperparameters: HashMap<String, Float>,
196    pub best_score: Float,
197    pub best_fidelity: FidelityLevel,
198    pub optimization_history: Vec<FidelityEvaluation>,
199    pub total_cost: Float,
200    pub total_time: Float,
201    pub convergence_curve: Vec<Float>,
202    pub fidelity_usage: HashMap<String, usize>,
203    pub cost_efficiency: Float,
204}
205
206/// Multi-fidelity Bayesian optimizer
207#[derive(Debug)]
208pub struct MultiFidelityOptimizer {
209    config: MultiFidelityConfig,
210    gaussian_process: MultiFidelityGP,
211    evaluation_history: Vec<FidelityEvaluation>,
212    current_best: Option<FidelityEvaluation>,
213    rng: StdRng,
214}
215
216/// Multi-fidelity Gaussian Process
217#[derive(Debug, Clone)]
218pub struct MultiFidelityGP {
219    observations: Vec<(Array1<Float>, Float, Float)>, // (hyperparams, fidelity, score)
220    hyperparameters: GPHyperparameters,
221    trained: bool,
222}
223
224/// Gaussian Process hyperparameters
225#[derive(Debug, Clone)]
226pub struct GPHyperparameters {
227    pub length_scales: Array1<Float>,
228    pub signal_variance: Float,
229    pub noise_variance: Float,
230    pub fidelity_correlation: Float,
231}
232
233impl Default for MultiFidelityConfig {
234    fn default() -> Self {
235        Self {
236            strategy: MultiFidelityStrategy::BayesianOptimization {
237                acquisition_function: AcquisitionFunction::ExpectedImprovement,
238                fidelity_selection: FidelitySelectionMethod::UncertaintyBased { threshold: 0.1 },
239                correlation_model: CorrelationModel::Linear {
240                    correlation_strength: 0.8,
241                },
242            },
243            max_evaluations: 100,
244            max_budget: 1000.0,
245            early_stopping_patience: 10,
246            fidelity_progression: FidelityProgression::Adaptive {
247                adaptation_rate: 0.1,
248            },
249            random_state: None,
250            parallel_evaluations: 1,
251        }
252    }
253}
254
255impl MultiFidelityOptimizer {
256    /// Create a new multi-fidelity optimizer
257    pub fn new(config: MultiFidelityConfig) -> Self {
258        let rng = match config.random_state {
259            Some(seed) => StdRng::seed_from_u64(seed),
260            None => {
261                use scirs2_core::random::thread_rng;
262                StdRng::from_rng(&mut thread_rng())
263            }
264        };
265
266        let gaussian_process = MultiFidelityGP::new();
267
268        Self {
269            config,
270            gaussian_process,
271            evaluation_history: Vec::new(),
272            current_best: None,
273            rng,
274        }
275    }
276
277    /// Optimize hyperparameters using multi-fidelity approach
278    pub fn optimize<F>(
279        &mut self,
280        evaluation_fn: F,
281        parameter_bounds: &[(Float, Float)],
282    ) -> Result<MultiFidelityResult, Box<dyn std::error::Error>>
283    where
284        F: Fn(
285            &HashMap<String, Float>,
286            &FidelityLevel,
287        ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
288    {
289        let start_time = std::time::Instant::now();
290        let mut total_cost = 0.0;
291        let mut convergence_curve = Vec::new();
292        let mut fidelity_usage = HashMap::new();
293
294        match &self.config.strategy {
295            MultiFidelityStrategy::SuccessiveHalving { .. } => {
296                self.successive_halving_optimize(
297                    &evaluation_fn,
298                    parameter_bounds,
299                    &mut total_cost,
300                    &mut convergence_curve,
301                    &mut fidelity_usage,
302                )?;
303            }
304            MultiFidelityStrategy::BayesianOptimization { .. } => {
305                self.bayesian_optimize(
306                    &evaluation_fn,
307                    parameter_bounds,
308                    &mut total_cost,
309                    &mut convergence_curve,
310                    &mut fidelity_usage,
311                )?;
312            }
313            MultiFidelityStrategy::Hyperband { .. } => {
314                self.hyperband_optimize(
315                    &evaluation_fn,
316                    parameter_bounds,
317                    &mut total_cost,
318                    &mut convergence_curve,
319                    &mut fidelity_usage,
320                )?;
321            }
322            MultiFidelityStrategy::BOHB { .. } => {
323                self.bohb_optimize(
324                    &evaluation_fn,
325                    parameter_bounds,
326                    &mut total_cost,
327                    &mut convergence_curve,
328                    &mut fidelity_usage,
329                )?;
330            }
331            MultiFidelityStrategy::Fabolas { .. } => {
332                self.fabolas_optimize(
333                    &evaluation_fn,
334                    parameter_bounds,
335                    &mut total_cost,
336                    &mut convergence_curve,
337                    &mut fidelity_usage,
338                )?;
339            }
340            MultiFidelityStrategy::MultiTaskGP { .. } => {
341                self.multi_task_gp_optimize(
342                    &evaluation_fn,
343                    parameter_bounds,
344                    &mut total_cost,
345                    &mut convergence_curve,
346                    &mut fidelity_usage,
347                )?;
348            }
349        }
350
351        let total_time = start_time.elapsed().as_secs_f64() as Float;
352        let cost_efficiency = if total_cost > 0.0 {
353            self.current_best.as_ref().map_or(0.0, |best| best.score) / total_cost
354        } else {
355            0.0
356        };
357
358        Ok(MultiFidelityResult {
359            best_hyperparameters: self
360                .current_best
361                .as_ref()
362                .map(|best| best.hyperparameters.clone())
363                .unwrap_or_default(),
364            best_score: self.current_best.as_ref().map_or(0.0, |best| best.score),
365            best_fidelity: self
366                .current_best
367                .as_ref()
368                .map(|best| best.fidelity.clone())
369                .unwrap_or(self.get_default_fidelity()),
370            optimization_history: self.evaluation_history.clone(),
371            total_cost,
372            total_time,
373            convergence_curve,
374            fidelity_usage,
375            cost_efficiency,
376        })
377    }
378
379    /// Successive halving with multi-fidelity
380    fn successive_halving_optimize<F>(
381        &mut self,
382        evaluation_fn: &F,
383        parameter_bounds: &[(Float, Float)],
384        total_cost: &mut Float,
385        convergence_curve: &mut Vec<Float>,
386        fidelity_usage: &mut HashMap<String, usize>,
387    ) -> Result<(), Box<dyn std::error::Error>>
388    where
389        F: Fn(
390            &HashMap<String, Float>,
391            &FidelityLevel,
392        ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
393    {
394        let (eta, min_fidelity, max_fidelity) = match &self.config.strategy {
395            MultiFidelityStrategy::SuccessiveHalving {
396                eta,
397                min_fidelity,
398                max_fidelity,
399            } => (*eta, min_fidelity.clone(), max_fidelity.clone()),
400            _ => unreachable!(),
401        };
402
403        let mut configurations = self.generate_initial_configurations(parameter_bounds, 50)?;
404        let mut current_fidelity = min_fidelity;
405
406        while configurations.len() > 1 && !self.should_stop() {
407            let mut evaluations = Vec::new();
408
409            // Evaluate all configurations at current fidelity
410            for config in &configurations {
411                let evaluation = evaluation_fn(config, &current_fidelity)?;
412                *total_cost += evaluation.cost;
413                *fidelity_usage
414                    .entry(self.fidelity_to_string(&current_fidelity))
415                    .or_insert(0) += 1;
416
417                self.evaluation_history.push(evaluation.clone());
418                evaluations.push(evaluation.clone());
419
420                if self.update_best(&evaluation) {
421                    convergence_curve.push(
422                        self.current_best
423                            .as_ref()
424                            .expect("operation should succeed")
425                            .score,
426                    );
427                } else if let Some(best) = &self.current_best {
428                    convergence_curve.push(best.score);
429                }
430            }
431
432            // Keep top 1/eta configurations
433            evaluations.sort_by(|a, b| {
434                b.score
435                    .partial_cmp(&a.score)
436                    .expect("operation should succeed")
437            });
438            let keep_count = (configurations.len() as Float / eta).max(1.0) as usize;
439
440            configurations = evaluations
441                .iter()
442                .take(keep_count)
443                .map(|eval| eval.hyperparameters.clone())
444                .collect();
445
446            // Increase fidelity
447            current_fidelity = self.increase_fidelity(&current_fidelity, &max_fidelity);
448        }
449
450        Ok(())
451    }
452
453    /// Bayesian optimization with multi-fidelity
454    fn bayesian_optimize<F>(
455        &mut self,
456        evaluation_fn: &F,
457        parameter_bounds: &[(Float, Float)],
458        total_cost: &mut Float,
459        convergence_curve: &mut Vec<Float>,
460        fidelity_usage: &mut HashMap<String, usize>,
461    ) -> Result<(), Box<dyn std::error::Error>>
462    where
463        F: Fn(
464            &HashMap<String, Float>,
465            &FidelityLevel,
466        ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
467    {
468        let (acquisition_function, fidelity_selection, _correlation_model) =
469            match &self.config.strategy {
470                MultiFidelityStrategy::BayesianOptimization {
471                    acquisition_function,
472                    fidelity_selection,
473                    correlation_model,
474                } => (
475                    acquisition_function.clone(),
476                    fidelity_selection.clone(),
477                    correlation_model.clone(),
478                ),
479                _ => unreachable!(),
480            };
481
482        // Initialize with random evaluations
483        let init_evaluations = 5;
484        for _ in 0..init_evaluations {
485            let config = self.sample_random_configuration(parameter_bounds)?;
486            let fidelity = self.select_fidelity(&fidelity_selection, None)?;
487
488            let evaluation = evaluation_fn(&config, &fidelity)?;
489            *total_cost += evaluation.cost;
490            *fidelity_usage
491                .entry(self.fidelity_to_string(&fidelity))
492                .or_insert(0) += 1;
493
494            self.evaluation_history.push(evaluation.clone());
495            if self.update_best(&evaluation) {
496                convergence_curve.push(
497                    self.current_best
498                        .as_ref()
499                        .expect("operation should succeed")
500                        .score,
501                );
502            } else if let Some(best) = &self.current_best {
503                convergence_curve.push(best.score);
504            }
505        }
506
507        // Update Gaussian Process
508        self.gaussian_process.update(&self.evaluation_history)?;
509
510        // Bayesian optimization loop
511        while self.evaluation_history.len() < self.config.max_evaluations && !self.should_stop() {
512            // Select next configuration and fidelity
513            let next_config = self.optimize_acquisition(&acquisition_function, parameter_bounds)?;
514            let next_fidelity = self.select_fidelity(&fidelity_selection, Some(&next_config))?;
515
516            let evaluation = evaluation_fn(&next_config, &next_fidelity)?;
517            *total_cost += evaluation.cost;
518            *fidelity_usage
519                .entry(self.fidelity_to_string(&next_fidelity))
520                .or_insert(0) += 1;
521
522            self.evaluation_history.push(evaluation.clone());
523            if self.update_best(&evaluation) {
524                convergence_curve.push(
525                    self.current_best
526                        .as_ref()
527                        .expect("operation should succeed")
528                        .score,
529                );
530            } else if let Some(best) = &self.current_best {
531                convergence_curve.push(best.score);
532            }
533
534            // Update Gaussian Process periodically
535            if self.evaluation_history.len() % 5 == 0 {
536                self.gaussian_process.update(&self.evaluation_history)?;
537            }
538        }
539
540        Ok(())
541    }
542
543    /// Hyperband optimization
544    fn hyperband_optimize<F>(
545        &mut self,
546        evaluation_fn: &F,
547        parameter_bounds: &[(Float, Float)],
548        total_cost: &mut Float,
549        convergence_curve: &mut Vec<Float>,
550        fidelity_usage: &mut HashMap<String, usize>,
551    ) -> Result<(), Box<dyn std::error::Error>>
552    where
553        F: Fn(
554            &HashMap<String, Float>,
555            &FidelityLevel,
556        ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
557    {
558        let (max_budget, eta, fidelities) = match &self.config.strategy {
559            MultiFidelityStrategy::Hyperband {
560                max_budget,
561                eta,
562                fidelities,
563            } => (*max_budget, *eta, fidelities.clone()),
564            _ => unreachable!(),
565        };
566
567        let log_eta = eta.ln();
568        let s_max = (max_budget.ln() / log_eta).floor() as usize;
569
570        for s in 0..=s_max {
571            let n = ((s_max + 1) as Float * eta.powi(s as i32) / (s + 1) as Float).ceil() as usize;
572            let r = max_budget * eta.powi(-(s as i32));
573
574            let mut configurations = self.generate_initial_configurations(parameter_bounds, n)?;
575            let current_budget = r;
576
577            for i in 0..=s {
578                let n_i = (n as Float * eta.powi(-(i as i32))).floor() as usize;
579                let r_i = current_budget * eta.powi(i as i32);
580
581                if configurations.len() > n_i {
582                    configurations.truncate(n_i);
583                }
584
585                let fidelity = self.budget_to_fidelity(r_i, &fidelities);
586                let mut evaluations = Vec::new();
587
588                for config in &configurations {
589                    let evaluation = evaluation_fn(config, &fidelity)?;
590                    *total_cost += evaluation.cost;
591                    *fidelity_usage
592                        .entry(self.fidelity_to_string(&fidelity))
593                        .or_insert(0) += 1;
594
595                    self.evaluation_history.push(evaluation.clone());
596                    evaluations.push(evaluation.clone());
597
598                    if self.update_best(&evaluation) {
599                        convergence_curve.push(
600                            self.current_best
601                                .as_ref()
602                                .expect("operation should succeed")
603                                .score,
604                        );
605                    } else if let Some(best) = &self.current_best {
606                        convergence_curve.push(best.score);
607                    }
608                }
609
610                // Keep top configurations
611                evaluations.sort_by(|a, b| {
612                    b.score
613                        .partial_cmp(&a.score)
614                        .expect("operation should succeed")
615                });
616                configurations = evaluations
617                    .iter()
618                    .take(n_i)
619                    .map(|eval| eval.hyperparameters.clone())
620                    .collect();
621            }
622        }
623
624        Ok(())
625    }
626
627    /// BOHB optimization (Bayesian Optimization and Hyperband)
628    fn bohb_optimize<F>(
629        &mut self,
630        evaluation_fn: &F,
631        parameter_bounds: &[(Float, Float)],
632        total_cost: &mut Float,
633        convergence_curve: &mut Vec<Float>,
634        fidelity_usage: &mut HashMap<String, usize>,
635    ) -> Result<(), Box<dyn std::error::Error>>
636    where
637        F: Fn(
638            &HashMap<String, Float>,
639            &FidelityLevel,
640        ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
641    {
642        // Simplified BOHB implementation combining Hyperband with Bayesian optimization
643        // Start with Hyperband for exploration
644        self.hyperband_optimize(
645            evaluation_fn,
646            parameter_bounds,
647            total_cost,
648            convergence_curve,
649            fidelity_usage,
650        )?;
651
652        // Continue with Bayesian optimization for exploitation
653        let remaining_budget = self.config.max_budget - *total_cost;
654        if remaining_budget > 0.0 {
655            self.bayesian_optimize(
656                evaluation_fn,
657                parameter_bounds,
658                total_cost,
659                convergence_curve,
660                fidelity_usage,
661            )?;
662        }
663
664        Ok(())
665    }
666
667    /// Fabolas optimization
668    fn fabolas_optimize<F>(
669        &mut self,
670        evaluation_fn: &F,
671        parameter_bounds: &[(Float, Float)],
672        total_cost: &mut Float,
673        convergence_curve: &mut Vec<Float>,
674        fidelity_usage: &mut HashMap<String, usize>,
675    ) -> Result<(), Box<dyn std::error::Error>>
676    where
677        F: Fn(
678            &HashMap<String, Float>,
679            &FidelityLevel,
680        ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
681    {
682        // Simplified Fabolas implementation focusing on dataset size as fidelity
683        let (min_fraction, max_fraction, _cost_model) = match &self.config.strategy {
684            MultiFidelityStrategy::Fabolas {
685                min_dataset_fraction,
686                max_dataset_fraction,
687                cost_model,
688            } => (*min_dataset_fraction, *max_dataset_fraction, cost_model),
689            _ => unreachable!(),
690        };
691
692        let mut current_fraction = min_fraction;
693        let fraction_step = (max_fraction - min_fraction) / 10.0;
694
695        while current_fraction <= max_fraction && !self.should_stop() {
696            let fidelity = FidelityLevel::Custom {
697                parameters: {
698                    let mut params = HashMap::new();
699                    params.insert("dataset_fraction".to_string(), current_fraction);
700                    params
701                },
702                relative_cost: current_fraction,
703                accuracy_estimate: current_fraction.sqrt(),
704            };
705
706            let config = self.sample_random_configuration(parameter_bounds)?;
707            let evaluation = evaluation_fn(&config, &fidelity)?;
708
709            *total_cost += evaluation.cost;
710            *fidelity_usage
711                .entry(self.fidelity_to_string(&fidelity))
712                .or_insert(0) += 1;
713
714            self.evaluation_history.push(evaluation.clone());
715            if self.update_best(&evaluation) {
716                convergence_curve.push(
717                    self.current_best
718                        .as_ref()
719                        .expect("operation should succeed")
720                        .score,
721                );
722            } else if let Some(best) = &self.current_best {
723                convergence_curve.push(best.score);
724            }
725
726            current_fraction += fraction_step;
727        }
728
729        Ok(())
730    }
731
732    /// Multi-task Gaussian Process optimization
733    fn multi_task_gp_optimize<F>(
734        &mut self,
735        evaluation_fn: &F,
736        parameter_bounds: &[(Float, Float)],
737        total_cost: &mut Float,
738        convergence_curve: &mut Vec<Float>,
739        fidelity_usage: &mut HashMap<String, usize>,
740    ) -> Result<(), Box<dyn std::error::Error>>
741    where
742        F: Fn(
743            &HashMap<String, Float>,
744            &FidelityLevel,
745        ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
746    {
747        // Simplified multi-task GP implementation
748        // Treat each fidelity as a separate task
749        let fidelities = vec![
750            FidelityLevel::Low {
751                sample_fraction: 0.1,
752                epochs_fraction: 0.1,
753                cv_folds: 3,
754            },
755            FidelityLevel::Medium {
756                sample_fraction: 0.5,
757                epochs_fraction: 0.5,
758                cv_folds: 5,
759            },
760            FidelityLevel::High {
761                sample_fraction: 1.0,
762                epochs_fraction: 1.0,
763                cv_folds: 10,
764            },
765        ];
766
767        while self.evaluation_history.len() < self.config.max_evaluations && !self.should_stop() {
768            for fidelity in &fidelities {
769                let config = self.sample_random_configuration(parameter_bounds)?;
770                let evaluation = evaluation_fn(&config, fidelity)?;
771
772                *total_cost += evaluation.cost;
773                *fidelity_usage
774                    .entry(self.fidelity_to_string(fidelity))
775                    .or_insert(0) += 1;
776
777                self.evaluation_history.push(evaluation.clone());
778                if self.update_best(&evaluation) {
779                    convergence_curve.push(
780                        self.current_best
781                            .as_ref()
782                            .expect("operation should succeed")
783                            .score,
784                    );
785                } else if let Some(best) = &self.current_best {
786                    convergence_curve.push(best.score);
787                }
788
789                if self.evaluation_history.len() >= self.config.max_evaluations {
790                    break;
791                }
792            }
793        }
794
795        Ok(())
796    }
797
798    /// Generate initial random configurations
799    fn generate_initial_configurations(
800        &mut self,
801        parameter_bounds: &[(Float, Float)],
802        n: usize,
803    ) -> Result<Vec<HashMap<String, Float>>, Box<dyn std::error::Error>> {
804        let mut configurations = Vec::new();
805
806        for _ in 0..n {
807            configurations.push(self.sample_random_configuration(parameter_bounds)?);
808        }
809
810        Ok(configurations)
811    }
812
813    /// Sample a random configuration
814    fn sample_random_configuration(
815        &mut self,
816        parameter_bounds: &[(Float, Float)],
817    ) -> Result<HashMap<String, Float>, Box<dyn std::error::Error>> {
818        let mut config = HashMap::new();
819
820        for (i, &(low, high)) in parameter_bounds.iter().enumerate() {
821            let value = self.rng.random_range(low..high + 1.0);
822            config.insert(format!("param_{}", i), value);
823        }
824
825        Ok(config)
826    }
827
828    /// Select fidelity level based on strategy
829    fn select_fidelity(
830        &mut self,
831        method: &FidelitySelectionMethod,
832        _config: Option<&HashMap<String, Float>>,
833    ) -> Result<FidelityLevel, Box<dyn std::error::Error>> {
834        match method {
835            FidelitySelectionMethod::LowestFirst => Ok(FidelityLevel::Low {
836                sample_fraction: 0.1,
837                epochs_fraction: 0.1,
838                cv_folds: 3,
839            }),
840            FidelitySelectionMethod::UncertaintyBased { threshold } => {
841                // Use uncertainty to determine fidelity
842                if self.evaluation_history.len() < 5 {
843                    Ok(FidelityLevel::Low {
844                        sample_fraction: 0.1,
845                        epochs_fraction: 0.1,
846                        cv_folds: 3,
847                    })
848                } else {
849                    let avg_uncertainty = self
850                        .evaluation_history
851                        .iter()
852                        .filter_map(|eval| eval.uncertainty)
853                        .sum::<Float>()
854                        / self.evaluation_history.len() as Float;
855
856                    if avg_uncertainty > *threshold {
857                        Ok(FidelityLevel::High {
858                            sample_fraction: 1.0,
859                            epochs_fraction: 1.0,
860                            cv_folds: 10,
861                        })
862                    } else {
863                        Ok(FidelityLevel::Medium {
864                            sample_fraction: 0.5,
865                            epochs_fraction: 0.5,
866                            cv_folds: 5,
867                        })
868                    }
869                }
870            }
871            FidelitySelectionMethod::CostAware { budget_fraction } => {
872                let used_budget_fraction = self
873                    .evaluation_history
874                    .iter()
875                    .map(|e| e.cost)
876                    .sum::<Float>()
877                    / self.config.max_budget;
878
879                if used_budget_fraction < *budget_fraction {
880                    Ok(FidelityLevel::Low {
881                        sample_fraction: 0.1,
882                        epochs_fraction: 0.1,
883                        cv_folds: 3,
884                    })
885                } else {
886                    Ok(FidelityLevel::High {
887                        sample_fraction: 1.0,
888                        epochs_fraction: 1.0,
889                        cv_folds: 10,
890                    })
891                }
892            }
893            _ => Ok(FidelityLevel::Medium {
894                sample_fraction: 0.5,
895                epochs_fraction: 0.5,
896                cv_folds: 5,
897            }),
898        }
899    }
900
901    /// Optimize acquisition function
902    fn optimize_acquisition(
903        &mut self,
904        acquisition_function: &AcquisitionFunction,
905        parameter_bounds: &[(Float, Float)],
906    ) -> Result<HashMap<String, Float>, Box<dyn std::error::Error>> {
907        // Simplified acquisition optimization - random search
908        let n_candidates = 100;
909        let mut best_config = self.sample_random_configuration(parameter_bounds)?;
910        let mut best_acquisition_value = Float::NEG_INFINITY;
911
912        for _ in 0..n_candidates {
913            let candidate = self.sample_random_configuration(parameter_bounds)?;
914            let acquisition_value = self.evaluate_acquisition(&candidate, acquisition_function)?;
915
916            if acquisition_value > best_acquisition_value {
917                best_acquisition_value = acquisition_value;
918                best_config = candidate;
919            }
920        }
921
922        Ok(best_config)
923    }
924
925    /// Evaluate acquisition function
926    fn evaluate_acquisition(
927        &mut self,
928        config: &HashMap<String, Float>,
929        acquisition_function: &AcquisitionFunction,
930    ) -> Result<Float, Box<dyn std::error::Error>> {
931        // Simplified acquisition function evaluation
932        match acquisition_function {
933            AcquisitionFunction::ExpectedImprovement => {
934                // Mock EI calculation
935                let config_vec: Vec<Float> = config.values().cloned().collect();
936                let config_sum = config_vec.iter().sum::<Float>();
937                Ok(config_sum + self.rng.random::<Float>() * 0.1)
938            }
939            AcquisitionFunction::UpperConfidenceBound { beta } => {
940                // Mock UCB calculation
941                let config_vec: Vec<Float> = config.values().cloned().collect();
942                let config_sum = config_vec.iter().sum::<Float>();
943                Ok(config_sum + beta * self.rng.random::<Float>())
944            }
945            _ => {
946                // Default to random value
947                Ok(self.rng.random::<Float>())
948            }
949        }
950    }
951
952    /// Increase fidelity level
953    fn increase_fidelity(&self, current: &FidelityLevel, max: &FidelityLevel) -> FidelityLevel {
954        match (current, max) {
955            (FidelityLevel::Low { .. }, _) => FidelityLevel::Medium {
956                sample_fraction: 0.5,
957                epochs_fraction: 0.5,
958                cv_folds: 5,
959            },
960            (FidelityLevel::Medium { .. }, _) => FidelityLevel::High {
961                sample_fraction: 1.0,
962                epochs_fraction: 1.0,
963                cv_folds: 10,
964            },
965            _ => current.clone(),
966        }
967    }
968
969    /// Convert budget to fidelity level
970    fn budget_to_fidelity(&self, budget: Float, fidelities: &[FidelityLevel]) -> FidelityLevel {
971        if budget < 0.3 {
972            fidelities
973                .first()
974                .unwrap_or(&FidelityLevel::Low {
975                    sample_fraction: 0.1,
976                    epochs_fraction: 0.1,
977                    cv_folds: 3,
978                })
979                .clone()
980        } else if budget < 0.7 {
981            fidelities
982                .get(1)
983                .unwrap_or(&FidelityLevel::Medium {
984                    sample_fraction: 0.5,
985                    epochs_fraction: 0.5,
986                    cv_folds: 5,
987                })
988                .clone()
989        } else {
990            fidelities
991                .get(2)
992                .unwrap_or(&FidelityLevel::High {
993                    sample_fraction: 1.0,
994                    epochs_fraction: 1.0,
995                    cv_folds: 10,
996                })
997                .clone()
998        }
999    }
1000
1001    /// Convert fidelity to string for tracking
1002    fn fidelity_to_string(&self, fidelity: &FidelityLevel) -> String {
1003        match fidelity {
1004            FidelityLevel::Low { .. } => "Low".to_string(),
1005            FidelityLevel::Medium { .. } => "Medium".to_string(),
1006            FidelityLevel::High { .. } => "High".to_string(),
1007            FidelityLevel::Custom { .. } => "Custom".to_string(),
1008        }
1009    }
1010
1011    /// Update best configuration
1012    fn update_best(&mut self, evaluation: &FidelityEvaluation) -> bool {
1013        match &self.current_best {
1014            Some(current) => {
1015                if evaluation.score > current.score {
1016                    self.current_best = Some(evaluation.clone());
1017                    true
1018                } else {
1019                    false
1020                }
1021            }
1022            None => {
1023                self.current_best = Some(evaluation.clone());
1024                true
1025            }
1026        }
1027    }
1028
1029    /// Check if optimization should stop
1030    fn should_stop(&self) -> bool {
1031        self.evaluation_history.len() >= self.config.max_evaluations
1032    }
1033
1034    /// Get default fidelity level
1035    fn get_default_fidelity(&self) -> FidelityLevel {
1036        FidelityLevel::Medium {
1037            sample_fraction: 0.5,
1038            epochs_fraction: 0.5,
1039            cv_folds: 5,
1040        }
1041    }
1042}
1043
1044impl MultiFidelityGP {
1045    /// Create a new multi-fidelity Gaussian Process
1046    fn new() -> Self {
1047        Self {
1048            observations: Vec::new(),
1049            hyperparameters: GPHyperparameters {
1050                length_scales: Array1::from_elem(1, 1.0),
1051                signal_variance: 1.0,
1052                noise_variance: 0.1,
1053                fidelity_correlation: 0.8,
1054            },
1055            trained: false,
1056        }
1057    }
1058
1059    /// Update the GP with new observations
1060    fn update(
1061        &mut self,
1062        evaluations: &[FidelityEvaluation],
1063    ) -> Result<(), Box<dyn std::error::Error>> {
1064        self.observations.clear();
1065
1066        for eval in evaluations {
1067            let params: Vec<Float> = eval.hyperparameters.values().cloned().collect();
1068            let fidelity_value = self.fidelity_to_value(&eval.fidelity);
1069            self.observations
1070                .push((Array1::from_vec(params), fidelity_value, eval.score));
1071        }
1072
1073        // Simplified GP update
1074        self.trained = true;
1075        Ok(())
1076    }
1077
1078    /// Convert fidelity to numerical value
1079    fn fidelity_to_value(&self, fidelity: &FidelityLevel) -> Float {
1080        match fidelity {
1081            FidelityLevel::Low { .. } => 0.1,
1082            FidelityLevel::Medium { .. } => 0.5,
1083            FidelityLevel::High { .. } => 1.0,
1084            FidelityLevel::Custom { relative_cost, .. } => *relative_cost,
1085        }
1086    }
1087}
1088
1089/// Convenience function for multi-fidelity optimization
1090pub fn multi_fidelity_optimize<F>(
1091    evaluation_fn: F,
1092    parameter_bounds: &[(Float, Float)],
1093    config: Option<MultiFidelityConfig>,
1094) -> Result<MultiFidelityResult, Box<dyn std::error::Error>>
1095where
1096    F: Fn(
1097        &HashMap<String, Float>,
1098        &FidelityLevel,
1099    ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>>,
1100{
1101    let config = config.unwrap_or_default();
1102    let mut optimizer = MultiFidelityOptimizer::new(config);
1103    optimizer.optimize(evaluation_fn, parameter_bounds)
1104}
1105
1106#[allow(non_snake_case)]
1107#[cfg(test)]
1108mod tests {
1109    use super::*;
1110
1111    fn mock_evaluation_function(
1112        hyperparameters: &HashMap<String, Float>,
1113        fidelity: &FidelityLevel,
1114    ) -> Result<FidelityEvaluation, Box<dyn std::error::Error>> {
1115        let score = hyperparameters.values().sum::<Float>() * 0.1;
1116        let cost = match fidelity {
1117            FidelityLevel::Low { .. } => 1.0,
1118            FidelityLevel::Medium { .. } => 5.0,
1119            FidelityLevel::High { .. } => 10.0,
1120            FidelityLevel::Custom { relative_cost, .. } => *relative_cost * 10.0,
1121        };
1122
1123        Ok(FidelityEvaluation {
1124            hyperparameters: hyperparameters.clone(),
1125            fidelity: fidelity.clone(),
1126            score,
1127            cost,
1128            evaluation_time: cost,
1129            uncertainty: Some(0.1),
1130            additional_metrics: HashMap::new(),
1131        })
1132    }
1133
1134    #[test]
1135    fn test_multi_fidelity_optimizer_creation() {
1136        let config = MultiFidelityConfig::default();
1137        let optimizer = MultiFidelityOptimizer::new(config);
1138        assert_eq!(optimizer.evaluation_history.len(), 0);
1139    }
1140
1141    #[test]
1142    fn test_multi_fidelity_optimization() {
1143        let config = MultiFidelityConfig {
1144            max_evaluations: 10,
1145            max_budget: 100.0,
1146            ..Default::default()
1147        };
1148
1149        let parameter_bounds = vec![(0.0, 1.0), (0.0, 1.0)];
1150
1151        let result =
1152            multi_fidelity_optimize(mock_evaluation_function, &parameter_bounds, Some(config))
1153                .expect("operation should succeed");
1154
1155        assert!(result.best_score >= 0.0);
1156        assert!(result.total_cost > 0.0);
1157        assert!(!result.optimization_history.is_empty());
1158    }
1159
1160    #[test]
1161    fn test_fidelity_levels() {
1162        let low_fidelity = FidelityLevel::Low {
1163            sample_fraction: 0.1,
1164            epochs_fraction: 0.1,
1165            cv_folds: 3,
1166        };
1167
1168        let evaluation = mock_evaluation_function(
1169            &HashMap::from([("param_0".to_string(), 0.5)]),
1170            &low_fidelity,
1171        )
1172        .expect("operation should succeed");
1173
1174        assert_eq!(evaluation.cost, 1.0);
1175    }
1176
1177    #[test]
1178    fn test_successive_halving_strategy() {
1179        let config = MultiFidelityConfig {
1180            strategy: MultiFidelityStrategy::SuccessiveHalving {
1181                eta: 2.0,
1182                min_fidelity: FidelityLevel::Low {
1183                    sample_fraction: 0.1,
1184                    epochs_fraction: 0.1,
1185                    cv_folds: 3,
1186                },
1187                max_fidelity: FidelityLevel::High {
1188                    sample_fraction: 1.0,
1189                    epochs_fraction: 1.0,
1190                    cv_folds: 10,
1191                },
1192            },
1193            max_evaluations: 20,
1194            max_budget: 200.0,
1195            ..Default::default()
1196        };
1197
1198        let parameter_bounds = vec![(0.0, 1.0), (0.0, 1.0)];
1199
1200        let result =
1201            multi_fidelity_optimize(mock_evaluation_function, &parameter_bounds, Some(config))
1202                .expect("operation should succeed");
1203
1204        assert!(result.best_score >= 0.0);
1205        assert!(!result.fidelity_usage.is_empty());
1206    }
1207}