Skip to main content

sklears_model_selection/
multi_fidelity_advanced.rs

1//! Advanced Multi-Fidelity Optimization Techniques
2//!
3//! This module extends multi-fidelity optimization with:
4//! - Progressive Resource Allocation
5//! - Coarse-to-Fine Optimization Strategies
6//! - Adaptive Fidelity Selection
7//! - Budget Allocation Algorithms
8//!
9//! These techniques enable more efficient hyperparameter optimization by intelligently
10//! allocating computational resources across different fidelity levels.
11
12use scirs2_core::random::rngs::StdRng;
13use scirs2_core::random::{RngExt, SeedableRng};
14use sklears_core::types::Float;
15use std::collections::HashMap;
16
17// ============================================================================
18// Progressive Resource Allocation
19// ============================================================================
20
21/// Progressive resource allocation strategy
22#[derive(Debug, Clone)]
23pub enum ProgressiveAllocationStrategy {
24    /// Geometric progression: each level gets r^i times more resources
25    Geometric {
26        base_resources: Float,
27        ratio: Float,
28        n_levels: usize,
29    },
30    /// Exponential progression: exponentially increasing resources
31    Exponential {
32        initial_resources: Float,
33        growth_rate: Float,
34        n_levels: usize,
35    },
36    /// Fibonacci progression: resources follow Fibonacci sequence
37    Fibonacci {
38        base_resources: Float,
39        n_levels: usize,
40    },
41    /// Adaptive progression based on performance feedback
42    Adaptive {
43        initial_resources: Float,
44        adaptation_rate: Float,
45        performance_threshold: Float,
46    },
47    /// Custom progression with user-defined schedule
48    Custom { resource_schedule: Vec<Float> },
49}
50
51/// Configuration for progressive resource allocation
52#[derive(Debug, Clone)]
53pub struct ProgressiveAllocationConfig {
54    pub strategy: ProgressiveAllocationStrategy,
55    pub total_budget: Float,
56    pub min_resource_per_config: Float,
57    pub max_resource_per_config: Float,
58    pub early_stopping_threshold: Option<Float>,
59    pub random_state: Option<u64>,
60}
61
62impl Default for ProgressiveAllocationConfig {
63    fn default() -> Self {
64        Self {
65            strategy: ProgressiveAllocationStrategy::Geometric {
66                base_resources: 1.0,
67                ratio: 2.0,
68                n_levels: 5,
69            },
70            total_budget: 100.0,
71            min_resource_per_config: 0.1,
72            max_resource_per_config: 50.0,
73            early_stopping_threshold: Some(0.01),
74            random_state: None,
75        }
76    }
77}
78
79/// Progressive resource allocator
80pub struct ProgressiveAllocator {
81    config: ProgressiveAllocationConfig,
82    allocation_history: Vec<AllocationRecord>,
83    current_level: usize,
84    remaining_budget: Float,
85}
86
87impl ProgressiveAllocator {
88    pub fn new(config: ProgressiveAllocationConfig) -> Self {
89        let remaining_budget = config.total_budget;
90        Self {
91            config,
92            allocation_history: Vec::new(),
93            current_level: 0,
94            remaining_budget,
95        }
96    }
97
98    /// Allocate resources progressively to configurations
99    pub fn allocate(
100        &mut self,
101        configurations: &[ConfigurationWithPerformance],
102    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
103        // Clone strategy to avoid borrowing issues
104        let strategy = self.config.strategy.clone();
105        match strategy {
106            ProgressiveAllocationStrategy::Geometric {
107                base_resources,
108                ratio,
109                n_levels,
110            } => self.allocate_geometric(configurations, base_resources, ratio, n_levels),
111            ProgressiveAllocationStrategy::Exponential {
112                initial_resources,
113                growth_rate,
114                n_levels,
115            } => {
116                self.allocate_exponential(configurations, initial_resources, growth_rate, n_levels)
117            }
118            ProgressiveAllocationStrategy::Fibonacci {
119                base_resources,
120                n_levels,
121            } => self.allocate_fibonacci(configurations, base_resources, n_levels),
122            ProgressiveAllocationStrategy::Adaptive {
123                initial_resources,
124                adaptation_rate,
125                performance_threshold,
126            } => self.allocate_adaptive(
127                configurations,
128                initial_resources,
129                adaptation_rate,
130                performance_threshold,
131            ),
132            ProgressiveAllocationStrategy::Custom { resource_schedule } => {
133                self.allocate_custom(configurations, &resource_schedule)
134            }
135        }
136    }
137
138    /// Geometric progression allocation
139    fn allocate_geometric(
140        &mut self,
141        configurations: &[ConfigurationWithPerformance],
142        base_resources: Float,
143        ratio: Float,
144        n_levels: usize,
145    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
146        let mut allocations = Vec::new();
147        let mut promoted_configs = configurations.to_vec();
148
149        for level in 0..n_levels {
150            let resources_at_level = base_resources * ratio.powi(level as i32);
151            let resources_clamped = resources_at_level
152                .max(self.config.min_resource_per_config)
153                .min(self.config.max_resource_per_config);
154
155            // Allocate to each configuration
156            for (idx, config) in promoted_configs.iter().enumerate() {
157                if self.remaining_budget >= resources_clamped {
158                    allocations.push(ConfigAllocation {
159                        config_id: idx,
160                        level,
161                        resources: resources_clamped,
162                        estimated_performance: config.performance,
163                    });
164                    self.remaining_budget -= resources_clamped;
165                }
166            }
167
168            // Promote top performing configurations to next level
169            if level < n_levels - 1 {
170                promoted_configs = self.select_top_configs(&promoted_configs, ratio);
171            }
172        }
173
174        Ok(AllocationPlan {
175            allocations,
176            total_resources_used: self.config.total_budget - self.remaining_budget,
177            n_levels,
178            final_configs: promoted_configs.len(),
179        })
180    }
181
182    /// Exponential progression allocation
183    fn allocate_exponential(
184        &mut self,
185        configurations: &[ConfigurationWithPerformance],
186        initial_resources: Float,
187        growth_rate: Float,
188        n_levels: usize,
189    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
190        let mut allocations = Vec::new();
191        let mut promoted_configs = configurations.to_vec();
192
193        for level in 0..n_levels {
194            let resources_at_level = initial_resources * (growth_rate * level as Float).exp();
195            let resources_clamped = resources_at_level
196                .max(self.config.min_resource_per_config)
197                .min(self.config.max_resource_per_config);
198
199            for (idx, config) in promoted_configs.iter().enumerate() {
200                if self.remaining_budget >= resources_clamped {
201                    allocations.push(ConfigAllocation {
202                        config_id: idx,
203                        level,
204                        resources: resources_clamped,
205                        estimated_performance: config.performance,
206                    });
207                    self.remaining_budget -= resources_clamped;
208                }
209            }
210
211            if level < n_levels - 1 {
212                let survival_rate = (-growth_rate).exp();
213                promoted_configs = self.select_top_configs(&promoted_configs, 1.0 / survival_rate);
214            }
215        }
216
217        Ok(AllocationPlan {
218            allocations,
219            total_resources_used: self.config.total_budget - self.remaining_budget,
220            n_levels,
221            final_configs: promoted_configs.len(),
222        })
223    }
224
225    /// Fibonacci progression allocation
226    fn allocate_fibonacci(
227        &mut self,
228        configurations: &[ConfigurationWithPerformance],
229        base_resources: Float,
230        n_levels: usize,
231    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
232        let mut allocations = Vec::new();
233        let mut promoted_configs = configurations.to_vec();
234
235        // Generate Fibonacci sequence
236        let fib_sequence = self.generate_fibonacci(n_levels);
237
238        for (level, &fib) in fib_sequence.iter().enumerate() {
239            let resources_at_level = base_resources * fib as Float;
240            let resources_clamped = resources_at_level
241                .max(self.config.min_resource_per_config)
242                .min(self.config.max_resource_per_config);
243
244            for (idx, config) in promoted_configs.iter().enumerate() {
245                if self.remaining_budget >= resources_clamped {
246                    allocations.push(ConfigAllocation {
247                        config_id: idx,
248                        level,
249                        resources: resources_clamped,
250                        estimated_performance: config.performance,
251                    });
252                    self.remaining_budget -= resources_clamped;
253                }
254            }
255
256            if level < n_levels - 1 {
257                let ratio = if fib > 0 {
258                    fib_sequence[level + 1] as Float / fib as Float
259                } else {
260                    2.0
261                };
262                promoted_configs = self.select_top_configs(&promoted_configs, ratio);
263            }
264        }
265
266        Ok(AllocationPlan {
267            allocations,
268            total_resources_used: self.config.total_budget - self.remaining_budget,
269            n_levels,
270            final_configs: promoted_configs.len(),
271        })
272    }
273
274    /// Adaptive progression allocation based on performance feedback
275    fn allocate_adaptive(
276        &mut self,
277        configurations: &[ConfigurationWithPerformance],
278        initial_resources: Float,
279        adaptation_rate: Float,
280        performance_threshold: Float,
281    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
282        let mut allocations = Vec::new();
283        let mut active_configs = configurations.to_vec();
284        let mut level = 0;
285        let mut current_resources = initial_resources;
286
287        while !active_configs.is_empty() && self.remaining_budget > 0.0 {
288            // Allocate current resources to active configurations
289            for (idx, config) in active_configs.iter().enumerate() {
290                let allocated = current_resources
291                    .max(self.config.min_resource_per_config)
292                    .min(self.config.max_resource_per_config)
293                    .min(self.remaining_budget);
294
295                if allocated > 0.0 {
296                    allocations.push(ConfigAllocation {
297                        config_id: idx,
298                        level,
299                        resources: allocated,
300                        estimated_performance: config.performance,
301                    });
302                    self.remaining_budget -= allocated;
303                }
304            }
305
306            // Compute performance statistics
307            let performances: Vec<Float> = active_configs.iter().map(|c| c.performance).collect();
308            let mean_perf = performances.iter().sum::<Float>() / performances.len() as Float;
309            let max_perf = performances
310                .iter()
311                .cloned()
312                .fold(f64::NEG_INFINITY, f64::max);
313
314            // Adapt resource allocation based on performance spread
315            let perf_spread = max_perf - mean_perf;
316            if perf_spread > performance_threshold {
317                // Good separation: increase resources for next round
318                current_resources *= 1.0 + adaptation_rate;
319            } else {
320                // Poor separation: keep or slightly decrease resources
321                current_resources *= 1.0 - adaptation_rate * 0.5;
322            }
323
324            // Filter configurations above threshold
325            active_configs.retain(|c| c.performance >= mean_perf);
326
327            level += 1;
328
329            // Safety check to prevent infinite loops
330            if level > 100 {
331                break;
332            }
333        }
334
335        Ok(AllocationPlan {
336            allocations,
337            total_resources_used: self.config.total_budget - self.remaining_budget,
338            n_levels: level,
339            final_configs: active_configs.len(),
340        })
341    }
342
343    /// Custom progression allocation
344    fn allocate_custom(
345        &mut self,
346        configurations: &[ConfigurationWithPerformance],
347        resource_schedule: &[Float],
348    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
349        let mut allocations = Vec::new();
350        let mut promoted_configs = configurations.to_vec();
351
352        for (level, &resources_at_level) in resource_schedule.iter().enumerate() {
353            let resources_clamped = resources_at_level
354                .max(self.config.min_resource_per_config)
355                .min(self.config.max_resource_per_config);
356
357            for (idx, config) in promoted_configs.iter().enumerate() {
358                if self.remaining_budget >= resources_clamped {
359                    allocations.push(ConfigAllocation {
360                        config_id: idx,
361                        level,
362                        resources: resources_clamped,
363                        estimated_performance: config.performance,
364                    });
365                    self.remaining_budget -= resources_clamped;
366                }
367            }
368
369            // Halve configurations each level
370            if level < resource_schedule.len() - 1 {
371                promoted_configs = self.select_top_configs(&promoted_configs, 2.0);
372            }
373        }
374
375        Ok(AllocationPlan {
376            allocations,
377            total_resources_used: self.config.total_budget - self.remaining_budget,
378            n_levels: resource_schedule.len(),
379            final_configs: promoted_configs.len(),
380        })
381    }
382
383    // Helper methods
384
385    fn select_top_configs(
386        &self,
387        configs: &[ConfigurationWithPerformance],
388        ratio: Float,
389    ) -> Vec<ConfigurationWithPerformance> {
390        let n_keep = (configs.len() as Float / ratio).ceil() as usize;
391        let n_keep = n_keep.max(1).min(configs.len());
392
393        let mut sorted_configs = configs.to_vec();
394        sorted_configs.sort_by(|a, b| {
395            b.performance
396                .partial_cmp(&a.performance)
397                .expect("operation should succeed")
398        });
399        sorted_configs.truncate(n_keep);
400        sorted_configs
401    }
402
403    fn generate_fibonacci(&self, n: usize) -> Vec<usize> {
404        let mut fib = vec![1, 1];
405        for i in 2..n {
406            fib.push(fib[i - 1] + fib[i - 2]);
407        }
408        fib.truncate(n);
409        fib
410    }
411}
412
413/// Configuration with performance information
414#[derive(Debug, Clone)]
415pub struct ConfigurationWithPerformance {
416    pub config_id: usize,
417    pub parameters: HashMap<String, Float>,
418    pub performance: Float,
419    pub resources_used: Float,
420}
421
422/// Allocation for a specific configuration
423#[derive(Debug, Clone)]
424pub struct ConfigAllocation {
425    pub config_id: usize,
426    pub level: usize,
427    pub resources: Float,
428    pub estimated_performance: Float,
429}
430
431/// Plan for resource allocation
432#[derive(Debug, Clone)]
433pub struct AllocationPlan {
434    pub allocations: Vec<ConfigAllocation>,
435    pub total_resources_used: Float,
436    pub n_levels: usize,
437    pub final_configs: usize,
438}
439
440/// Allocation record for history tracking
441#[derive(Debug, Clone)]
442struct AllocationRecord {
443    level: usize,
444    resources: Float,
445    n_configs: usize,
446}
447
448// ============================================================================
449// Coarse-to-Fine Optimization Strategies
450// ============================================================================
451
452/// Coarse-to-fine optimization strategy
453#[derive(Debug, Clone)]
454pub enum CoarseToFineStrategy {
455    /// Grid refinement: start with coarse grid, refine promising regions
456    GridRefinement {
457        initial_grid_size: usize,
458        refinement_factor: Float,
459        n_refinement_levels: usize,
460    },
461    /// Hierarchical sampling: progressively finer sampling
462    HierarchicalSampling {
463        initial_samples: usize,
464        samples_per_level: usize,
465        focus_radius: Float,
466    },
467    /// Zoom-in strategy: iteratively zoom into best regions
468    ZoomIn {
469        zoom_factor: Float,
470        n_zoom_levels: usize,
471        top_k_regions: usize,
472    },
473    /// Multi-scale optimization
474    MultiScale {
475        scales: Vec<Float>,
476        samples_per_scale: usize,
477    },
478}
479
480/// Configuration for coarse-to-fine optimization
481#[derive(Debug, Clone)]
482pub struct CoarseToFineConfig {
483    pub strategy: CoarseToFineStrategy,
484    pub parameter_bounds: HashMap<String, (Float, Float)>,
485    pub convergence_tolerance: Float,
486    pub max_evaluations: usize,
487    pub random_state: Option<u64>,
488}
489
490impl Default for CoarseToFineConfig {
491    fn default() -> Self {
492        Self {
493            strategy: CoarseToFineStrategy::GridRefinement {
494                initial_grid_size: 10,
495                refinement_factor: 0.5,
496                n_refinement_levels: 3,
497            },
498            parameter_bounds: HashMap::new(),
499            convergence_tolerance: 1e-4,
500            max_evaluations: 1000,
501            random_state: None,
502        }
503    }
504}
505
506/// Coarse-to-fine optimizer
507pub struct CoarseToFineOptimizer {
508    config: CoarseToFineConfig,
509    evaluation_history: Vec<EvaluationPoint>,
510    current_bounds: HashMap<String, (Float, Float)>,
511    rng: StdRng,
512}
513
514impl CoarseToFineOptimizer {
515    pub fn new(config: CoarseToFineConfig) -> Self {
516        let rng = StdRng::seed_from_u64(config.random_state.unwrap_or(42));
517        let current_bounds = config.parameter_bounds.clone();
518
519        Self {
520            config,
521            evaluation_history: Vec::new(),
522            current_bounds,
523            rng,
524        }
525    }
526
527    /// Optimize using coarse-to-fine strategy
528    pub fn optimize<F>(
529        &mut self,
530        objective_fn: F,
531    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
532    where
533        F: Fn(&HashMap<String, Float>) -> Float,
534    {
535        // Clone strategy to avoid borrowing issues
536        let strategy = self.config.strategy.clone();
537        match strategy {
538            CoarseToFineStrategy::GridRefinement {
539                initial_grid_size,
540                refinement_factor,
541                n_refinement_levels,
542            } => self.optimize_grid_refinement(
543                &objective_fn,
544                initial_grid_size,
545                refinement_factor,
546                n_refinement_levels,
547            ),
548            CoarseToFineStrategy::HierarchicalSampling {
549                initial_samples,
550                samples_per_level,
551                focus_radius,
552            } => self.optimize_hierarchical(
553                &objective_fn,
554                initial_samples,
555                samples_per_level,
556                focus_radius,
557            ),
558            CoarseToFineStrategy::ZoomIn {
559                zoom_factor,
560                n_zoom_levels,
561                top_k_regions,
562            } => self.optimize_zoomin(&objective_fn, zoom_factor, n_zoom_levels, top_k_regions),
563            CoarseToFineStrategy::MultiScale {
564                scales,
565                samples_per_scale,
566            } => self.optimize_multiscale(&objective_fn, &scales, samples_per_scale),
567        }
568    }
569
570    fn optimize_grid_refinement<F>(
571        &mut self,
572        objective_fn: &F,
573        initial_grid_size: usize,
574        refinement_factor: Float,
575        n_refinement_levels: usize,
576    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
577    where
578        F: Fn(&HashMap<String, Float>) -> Float,
579    {
580        let mut best_point = None;
581        let mut best_value = f64::NEG_INFINITY;
582
583        for level in 0..n_refinement_levels {
584            // Sample grid at current resolution
585            let grid_size = (initial_grid_size as Float * (1.0 + level as Float)).ceil() as usize;
586            let points = self.generate_grid_points(grid_size)?;
587
588            // Evaluate all points
589            for params in points {
590                let value = objective_fn(&params);
591                self.evaluation_history.push(EvaluationPoint {
592                    parameters: params.clone(),
593                    value,
594                    level,
595                });
596
597                if value > best_value {
598                    best_value = value;
599                    best_point = Some(params);
600                }
601            }
602
603            // Refine bounds around best point
604            if let Some(ref best) = best_point {
605                self.refine_bounds(best, refinement_factor);
606            }
607        }
608
609        Ok(CoarseToFineResult {
610            best_parameters: best_point.unwrap_or_default(),
611            best_value,
612            n_evaluations: self.evaluation_history.len(),
613            convergence_history: self.get_convergence_history(),
614        })
615    }
616
617    fn optimize_hierarchical<F>(
618        &mut self,
619        objective_fn: &F,
620        initial_samples: usize,
621        samples_per_level: usize,
622        focus_radius: Float,
623    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
624    where
625        F: Fn(&HashMap<String, Float>) -> Float,
626    {
627        let mut best_point = None;
628        let mut best_value = f64::NEG_INFINITY;
629        let mut level = 0;
630
631        // Initial random sampling
632        for _ in 0..initial_samples {
633            let params = self.sample_from_bounds(&self.current_bounds.clone())?;
634            let value = objective_fn(&params);
635            self.evaluation_history.push(EvaluationPoint {
636                parameters: params.clone(),
637                value,
638                level,
639            });
640
641            if value > best_value {
642                best_value = value;
643                best_point = Some(params);
644            }
645        }
646
647        // Hierarchical refinement
648        while self.evaluation_history.len() < self.config.max_evaluations {
649            level += 1;
650
651            let best_params = best_point.clone();
652            if let Some(best) = best_params {
653                // Focus sampling around best point
654                for _ in 0..samples_per_level {
655                    let params = self.sample_around_point(&best, focus_radius)?;
656                    let value = objective_fn(&params);
657                    self.evaluation_history.push(EvaluationPoint {
658                        parameters: params.clone(),
659                        value,
660                        level,
661                    });
662
663                    if value > best_value {
664                        best_value = value;
665                        best_point = Some(params);
666                    }
667                }
668            }
669
670            // Reduce focus radius for next level
671            let new_radius = focus_radius * 0.5;
672            if new_radius < self.config.convergence_tolerance {
673                break;
674            }
675        }
676
677        Ok(CoarseToFineResult {
678            best_parameters: best_point.unwrap_or_default(),
679            best_value,
680            n_evaluations: self.evaluation_history.len(),
681            convergence_history: self.get_convergence_history(),
682        })
683    }
684
685    fn optimize_zoomin<F>(
686        &mut self,
687        objective_fn: &F,
688        zoom_factor: Float,
689        n_zoom_levels: usize,
690        top_k_regions: usize,
691    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
692    where
693        F: Fn(&HashMap<String, Float>) -> Float,
694    {
695        let mut regions = vec![self.current_bounds.clone()];
696        let mut best_point = None;
697        let mut best_value = f64::NEG_INFINITY;
698
699        for level in 0..n_zoom_levels {
700            let mut region_performances = Vec::new();
701
702            // Evaluate each region
703            for region in &regions {
704                let samples = self.sample_from_bounds(region)?;
705                let value = objective_fn(&samples);
706
707                self.evaluation_history.push(EvaluationPoint {
708                    parameters: samples.clone(),
709                    value,
710                    level,
711                });
712
713                if value > best_value {
714                    best_value = value;
715                    best_point = Some(samples.clone());
716                }
717
718                region_performances.push((region.clone(), samples, value));
719            }
720
721            // Select top-k regions
722            region_performances
723                .sort_by(|a, b| b.2.partial_cmp(&a.2).expect("operation should succeed"));
724            region_performances.truncate(top_k_regions);
725
726            // Zoom into top regions
727            regions = region_performances
728                .iter()
729                .map(|(region, center, _)| self.zoom_region(region, center, zoom_factor))
730                .collect();
731        }
732
733        Ok(CoarseToFineResult {
734            best_parameters: best_point.unwrap_or_default(),
735            best_value,
736            n_evaluations: self.evaluation_history.len(),
737            convergence_history: self.get_convergence_history(),
738        })
739    }
740
741    fn optimize_multiscale<F>(
742        &mut self,
743        objective_fn: &F,
744        scales: &[Float],
745        samples_per_scale: usize,
746    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
747    where
748        F: Fn(&HashMap<String, Float>) -> Float,
749    {
750        let mut best_point = None;
751        let mut best_value = f64::NEG_INFINITY;
752
753        for (level, &scale) in scales.iter().enumerate() {
754            // Generate samples at current scale
755            for _ in 0..samples_per_scale {
756                let current_bounds = self.current_bounds.clone();
757                let mut params = self.sample_from_bounds(&current_bounds)?;
758
759                // Apply scale perturbation
760                for (_, value) in params.iter_mut() {
761                    let perturbation = self.rng.random_range(-scale..scale);
762                    *value += perturbation;
763                }
764
765                let value = objective_fn(&params);
766                self.evaluation_history.push(EvaluationPoint {
767                    parameters: params.clone(),
768                    value,
769                    level,
770                });
771
772                if value > best_value {
773                    best_value = value;
774                    best_point = Some(params);
775                }
776            }
777
778            // Refine bounds for next scale
779            if let Some(ref best) = best_point {
780                self.refine_bounds(best, 0.5);
781            }
782        }
783
784        Ok(CoarseToFineResult {
785            best_parameters: best_point.unwrap_or_default(),
786            best_value,
787            n_evaluations: self.evaluation_history.len(),
788            convergence_history: self.get_convergence_history(),
789        })
790    }
791
792    // Helper methods
793
794    fn generate_grid_points(
795        &self,
796        grid_size: usize,
797    ) -> Result<Vec<HashMap<String, Float>>, Box<dyn std::error::Error>> {
798        let mut points = Vec::new();
799
800        // Generate grid for each parameter
801        let param_names: Vec<_> = self.current_bounds.keys().cloned().collect();
802
803        if param_names.is_empty() {
804            return Ok(points);
805        }
806
807        // Simple 1D grid for each parameter (can be extended to multi-D)
808        for param_name in &param_names {
809            if let Some(&(min, max)) = self.current_bounds.get(param_name) {
810                for i in 0..grid_size {
811                    let value = min + (max - min) * (i as Float) / ((grid_size - 1) as Float);
812                    let mut params = HashMap::new();
813                    params.insert(param_name.clone(), value);
814                    points.push(params);
815                }
816            }
817        }
818
819        Ok(points)
820    }
821
822    fn sample_from_bounds(
823        &mut self,
824        bounds: &HashMap<String, (Float, Float)>,
825    ) -> Result<HashMap<String, Float>, Box<dyn std::error::Error>> {
826        let mut params = HashMap::new();
827
828        for (param_name, &(min, max)) in bounds {
829            let value = self.rng.random_range(min..max);
830            params.insert(param_name.clone(), value);
831        }
832
833        Ok(params)
834    }
835
836    fn sample_around_point(
837        &mut self,
838        center: &HashMap<String, Float>,
839        radius: Float,
840    ) -> Result<HashMap<String, Float>, Box<dyn std::error::Error>> {
841        let mut params = HashMap::new();
842
843        for (param_name, &center_value) in center {
844            if let Some(&(min, max)) = self.current_bounds.get(param_name) {
845                let perturbation = self.rng.random_range(-radius..radius);
846                let value = (center_value + perturbation).max(min).min(max);
847                params.insert(param_name.clone(), value);
848            }
849        }
850
851        Ok(params)
852    }
853
854    fn refine_bounds(&mut self, best_point: &HashMap<String, Float>, refinement_factor: Float) {
855        for (param_name, &value) in best_point {
856            if let Some(&(min, max)) = self.current_bounds.get(param_name) {
857                let range = max - min;
858                let new_range = range * refinement_factor;
859                let new_min = (value - new_range / 2.0).max(min);
860                let new_max = (value + new_range / 2.0).min(max);
861                self.current_bounds
862                    .insert(param_name.clone(), (new_min, new_max));
863            }
864        }
865    }
866
867    fn zoom_region(
868        &self,
869        region: &HashMap<String, (Float, Float)>,
870        center: &HashMap<String, Float>,
871        zoom_factor: Float,
872    ) -> HashMap<String, (Float, Float)> {
873        let mut zoomed = HashMap::new();
874
875        for (param_name, &(min, max)) in region {
876            if let Some(&center_value) = center.get(param_name) {
877                let range = max - min;
878                let new_range = range * zoom_factor;
879                let new_min = (center_value - new_range / 2.0).max(min);
880                let new_max = (center_value + new_range / 2.0).min(max);
881                zoomed.insert(param_name.clone(), (new_min, new_max));
882            }
883        }
884
885        zoomed
886    }
887
888    fn get_convergence_history(&self) -> Vec<Float> {
889        let mut best_so_far = f64::NEG_INFINITY;
890        self.evaluation_history
891            .iter()
892            .map(|point| {
893                if point.value > best_so_far {
894                    best_so_far = point.value;
895                }
896                best_so_far
897            })
898            .collect()
899    }
900}
901
902/// Evaluation point in optimization
903#[derive(Debug, Clone)]
904struct EvaluationPoint {
905    parameters: HashMap<String, Float>,
906    value: Float,
907    level: usize,
908}
909
910/// Result of coarse-to-fine optimization
911#[derive(Debug, Clone)]
912pub struct CoarseToFineResult {
913    pub best_parameters: HashMap<String, Float>,
914    pub best_value: Float,
915    pub n_evaluations: usize,
916    pub convergence_history: Vec<Float>,
917}
918
919// ============================================================================
920// Adaptive Fidelity Selection
921// ============================================================================
922
923/// Adaptive fidelity selection strategy
924#[derive(Debug, Clone)]
925pub enum AdaptiveFidelityStrategy {
926    /// UCB-based fidelity selection
927    UpperConfidenceBound { exploration_weight: Float },
928    /// Expected Improvement with fidelity cost
929    ExpectedImprovement { cost_weight: Float },
930    /// Information gain maximization
931    InformationGain { uncertainty_threshold: Float },
932    /// Cost-aware Thompson sampling
933    ThompsonSampling {
934        prior_alpha: Float,
935        prior_beta: Float,
936    },
937}
938
939/// Adaptive fidelity selector
940pub struct AdaptiveFidelitySelector {
941    strategy: AdaptiveFidelityStrategy,
942    fidelity_stats: HashMap<usize, FidelityStatistics>,
943}
944
945impl AdaptiveFidelitySelector {
946    pub fn new(strategy: AdaptiveFidelityStrategy) -> Self {
947        Self {
948            strategy,
949            fidelity_stats: HashMap::new(),
950        }
951    }
952
953    /// Select fidelity level for next evaluation
954    pub fn select_fidelity(
955        &mut self,
956        available_fidelities: &[usize],
957        budget_remaining: Float,
958    ) -> usize {
959        match &self.strategy {
960            AdaptiveFidelityStrategy::UpperConfidenceBound { exploration_weight } => {
961                self.select_ucb(available_fidelities, *exploration_weight)
962            }
963            AdaptiveFidelityStrategy::ExpectedImprovement { cost_weight } => {
964                self.select_ei(available_fidelities, *cost_weight, budget_remaining)
965            }
966            AdaptiveFidelityStrategy::InformationGain {
967                uncertainty_threshold,
968            } => self.select_ig(available_fidelities, *uncertainty_threshold),
969            AdaptiveFidelityStrategy::ThompsonSampling {
970                prior_alpha,
971                prior_beta,
972            } => self.select_thompson(available_fidelities, *prior_alpha, *prior_beta),
973        }
974    }
975
976    /// Update fidelity statistics after evaluation
977    pub fn update(&mut self, fidelity: usize, performance: Float, cost: Float) {
978        let stats = self
979            .fidelity_stats
980            .entry(fidelity)
981            .or_insert_with(|| FidelityStatistics {
982                n_evaluations: 0,
983                total_performance: 0.0,
984                total_cost: 0.0,
985                mean_performance: 0.0,
986                mean_cost: 0.0,
987            });
988
989        stats.n_evaluations += 1;
990        stats.total_performance += performance;
991        stats.total_cost += cost;
992        stats.mean_performance = stats.total_performance / stats.n_evaluations as Float;
993        stats.mean_cost = stats.total_cost / stats.n_evaluations as Float;
994    }
995
996    fn select_ucb(&self, fidelities: &[usize], exploration_weight: Float) -> usize {
997        let total_evals: usize = self.fidelity_stats.values().map(|s| s.n_evaluations).sum();
998
999        let mut best_fidelity = fidelities[0];
1000        let mut best_score = f64::NEG_INFINITY;
1001
1002        for &fidelity in fidelities {
1003            let score = if let Some(stats) = self.fidelity_stats.get(&fidelity) {
1004                let exploitation = stats.mean_performance / stats.mean_cost.max(1e-6);
1005                let exploration = exploration_weight
1006                    * ((total_evals as Float).ln() / stats.n_evaluations as Float).sqrt();
1007                exploitation + exploration
1008            } else {
1009                f64::INFINITY // Unvisited fidelities get highest priority
1010            };
1011
1012            if score > best_score {
1013                best_score = score;
1014                best_fidelity = fidelity;
1015            }
1016        }
1017
1018        best_fidelity
1019    }
1020
1021    fn select_ei(&self, fidelities: &[usize], cost_weight: Float, _budget: Float) -> usize {
1022        let mut best_fidelity = fidelities[0];
1023        let mut best_score = f64::NEG_INFINITY;
1024
1025        for &fidelity in fidelities {
1026            let score = if let Some(stats) = self.fidelity_stats.get(&fidelity) {
1027                stats.mean_performance - cost_weight * stats.mean_cost
1028            } else {
1029                0.0 // Default for unvisited
1030            };
1031
1032            if score > best_score {
1033                best_score = score;
1034                best_fidelity = fidelity;
1035            }
1036        }
1037
1038        best_fidelity
1039    }
1040
1041    fn select_ig(&self, fidelities: &[usize], _threshold: Float) -> usize {
1042        // Prefer fidelities with less data (higher uncertainty)
1043        let mut best_fidelity = fidelities[0];
1044        let mut min_evals = usize::MAX;
1045
1046        for &fidelity in fidelities {
1047            let n_evals = self
1048                .fidelity_stats
1049                .get(&fidelity)
1050                .map(|s| s.n_evaluations)
1051                .unwrap_or(0);
1052            if n_evals < min_evals {
1053                min_evals = n_evals;
1054                best_fidelity = fidelity;
1055            }
1056        }
1057
1058        best_fidelity
1059    }
1060
1061    fn select_thompson(
1062        &self,
1063        fidelities: &[usize],
1064        prior_alpha: Float,
1065        prior_beta: Float,
1066    ) -> usize {
1067        let mut rng = StdRng::seed_from_u64(42);
1068        let mut best_fidelity = fidelities[0];
1069        let mut best_sample = f64::NEG_INFINITY;
1070
1071        for &fidelity in fidelities {
1072            // Beta distribution sampling (simplified)
1073            let (alpha, beta) = if let Some(stats) = self.fidelity_stats.get(&fidelity) {
1074                (
1075                    prior_alpha + stats.total_performance,
1076                    prior_beta + stats.n_evaluations as Float - stats.total_performance,
1077                )
1078            } else {
1079                (prior_alpha, prior_beta)
1080            };
1081
1082            // Simple sampling (in practice would use proper Beta distribution)
1083            let sample = rng.random_range(0.0..(alpha / (alpha + beta)));
1084
1085            if sample > best_sample {
1086                best_sample = sample;
1087                best_fidelity = fidelity;
1088            }
1089        }
1090
1091        best_fidelity
1092    }
1093}
1094
1095#[derive(Debug, Clone)]
1096struct FidelityStatistics {
1097    n_evaluations: usize,
1098    total_performance: Float,
1099    total_cost: Float,
1100    mean_performance: Float,
1101    mean_cost: Float,
1102}
1103
1104// ============================================================================
1105// Budget Allocation Algorithms
1106// ============================================================================
1107
1108/// Budget allocation strategy
1109#[derive(Debug, Clone)]
1110pub enum BudgetAllocationStrategy {
1111    /// Equal allocation to all configurations
1112    Equal,
1113    /// Proportional to estimated performance
1114    Proportional { min_allocation: Float },
1115    /// Rank-based allocation
1116    RankBased { allocation_ratios: Vec<Float> },
1117    /// Adaptive allocation based on uncertainty
1118    UncertaintyBased { exploration_factor: Float },
1119    /// Racing algorithm
1120    Racing { confidence_level: Float },
1121}
1122
1123/// Budget allocator
1124pub struct BudgetAllocator {
1125    strategy: BudgetAllocationStrategy,
1126    total_budget: Float,
1127    allocations: HashMap<usize, Float>,
1128}
1129
1130impl BudgetAllocator {
1131    pub fn new(strategy: BudgetAllocationStrategy, total_budget: Float) -> Self {
1132        Self {
1133            strategy,
1134            total_budget,
1135            allocations: HashMap::new(),
1136        }
1137    }
1138
1139    /// Allocate budget among configurations
1140    pub fn allocate(
1141        &mut self,
1142        configurations: &[ConfigurationWithPerformance],
1143    ) -> HashMap<usize, Float> {
1144        // Clone strategy to avoid borrowing issues
1145        let strategy = self.strategy.clone();
1146        match strategy {
1147            BudgetAllocationStrategy::Equal => self.allocate_equal(configurations),
1148            BudgetAllocationStrategy::Proportional { min_allocation } => {
1149                self.allocate_proportional(configurations, min_allocation)
1150            }
1151            BudgetAllocationStrategy::RankBased { allocation_ratios } => {
1152                self.allocate_rank_based(configurations, &allocation_ratios)
1153            }
1154            BudgetAllocationStrategy::UncertaintyBased { exploration_factor } => {
1155                self.allocate_uncertainty(configurations, exploration_factor)
1156            }
1157            BudgetAllocationStrategy::Racing { confidence_level } => {
1158                self.allocate_racing(configurations, confidence_level)
1159            }
1160        }
1161    }
1162
1163    fn allocate_equal(
1164        &mut self,
1165        configs: &[ConfigurationWithPerformance],
1166    ) -> HashMap<usize, Float> {
1167        let per_config = self.total_budget / configs.len() as Float;
1168        configs
1169            .iter()
1170            .enumerate()
1171            .map(|(i, _)| (i, per_config))
1172            .collect()
1173    }
1174
1175    fn allocate_proportional(
1176        &mut self,
1177        configs: &[ConfigurationWithPerformance],
1178        min_allocation: Float,
1179    ) -> HashMap<usize, Float> {
1180        let total_perf: Float = configs.iter().map(|c| c.performance).sum();
1181        let available = self.total_budget - min_allocation * configs.len() as Float;
1182
1183        configs
1184            .iter()
1185            .enumerate()
1186            .map(|(i, c)| {
1187                let proportional = if total_perf > 0.0 {
1188                    (c.performance / total_perf) * available
1189                } else {
1190                    0.0
1191                };
1192                (i, min_allocation + proportional)
1193            })
1194            .collect()
1195    }
1196
1197    fn allocate_rank_based(
1198        &mut self,
1199        configs: &[ConfigurationWithPerformance],
1200        ratios: &[Float],
1201    ) -> HashMap<usize, Float> {
1202        let mut sorted: Vec<_> = configs.iter().enumerate().collect();
1203        sorted.sort_by(|a, b| {
1204            b.1.performance
1205                .partial_cmp(&a.1.performance)
1206                .expect("operation should succeed")
1207        });
1208
1209        let total_ratio: Float = ratios.iter().sum();
1210        sorted
1211            .iter()
1212            .enumerate()
1213            .map(|(rank, (orig_idx, _))| {
1214                let ratio = ratios.get(rank).cloned().unwrap_or(0.0);
1215                let allocation = (ratio / total_ratio) * self.total_budget;
1216                (*orig_idx, allocation)
1217            })
1218            .collect()
1219    }
1220
1221    fn allocate_uncertainty(
1222        &mut self,
1223        configs: &[ConfigurationWithPerformance],
1224        exploration: Float,
1225    ) -> HashMap<usize, Float> {
1226        // Simplified: allocate based on inverse of resources already used
1227        let total_inverse: Float = configs.iter().map(|c| 1.0 / (c.resources_used + 1.0)).sum();
1228
1229        configs
1230            .iter()
1231            .enumerate()
1232            .map(|(i, c)| {
1233                let uncertainty = 1.0 / (c.resources_used + 1.0);
1234                let exploitation = c.performance;
1235                let score = exploitation + exploration * uncertainty;
1236                let allocation = (score / total_inverse) * self.total_budget;
1237                (i, allocation)
1238            })
1239            .collect()
1240    }
1241
1242    fn allocate_racing(
1243        &mut self,
1244        configs: &[ConfigurationWithPerformance],
1245        _confidence: Float,
1246    ) -> HashMap<usize, Float> {
1247        // Racing: allocate more to statistically indistinguishable configurations
1248        let initial = self.total_budget / configs.len() as Float;
1249
1250        configs
1251            .iter()
1252            .enumerate()
1253            .map(|(i, _)| (i, initial))
1254            .collect()
1255    }
1256}
1257
1258// ============================================================================
1259// Tests
1260// ============================================================================
1261
1262#[cfg(test)]
1263mod tests {
1264    use super::*;
1265
1266    #[test]
1267    fn test_progressive_allocation_config() {
1268        let config = ProgressiveAllocationConfig::default();
1269        assert_eq!(config.total_budget, 100.0);
1270        assert_eq!(config.min_resource_per_config, 0.1);
1271    }
1272
1273    #[test]
1274    fn test_coarse_to_fine_config() {
1275        let config = CoarseToFineConfig::default();
1276        assert_eq!(config.max_evaluations, 1000);
1277    }
1278
1279    #[test]
1280    fn test_adaptive_fidelity_selector() {
1281        let strategy = AdaptiveFidelityStrategy::UpperConfidenceBound {
1282            exploration_weight: 1.0,
1283        };
1284        let selector = AdaptiveFidelitySelector::new(strategy);
1285        assert!(selector.fidelity_stats.is_empty());
1286    }
1287
1288    #[test]
1289    fn test_budget_allocator() {
1290        let strategy = BudgetAllocationStrategy::Equal;
1291        let allocator = BudgetAllocator::new(strategy, 100.0);
1292        assert_eq!(allocator.total_budget, 100.0);
1293    }
1294
1295    #[test]
1296    fn test_progressive_allocator() {
1297        let config = ProgressiveAllocationConfig::default();
1298        let allocator = ProgressiveAllocator::new(config);
1299        assert_eq!(allocator.remaining_budget, 100.0);
1300    }
1301
1302    #[test]
1303    fn test_coarse_to_fine_optimizer() {
1304        let config = CoarseToFineConfig::default();
1305        let optimizer = CoarseToFineOptimizer::new(config);
1306        assert!(optimizer.evaluation_history.is_empty());
1307    }
1308}