sklears_model_selection/
multi_fidelity_advanced.rs

1//! Advanced Multi-Fidelity Optimization Techniques
2//!
3//! This module extends multi-fidelity optimization with:
4//! - Progressive Resource Allocation
5//! - Coarse-to-Fine Optimization Strategies
6//! - Adaptive Fidelity Selection
7//! - Budget Allocation Algorithms
8//!
9//! These techniques enable more efficient hyperparameter optimization by intelligently
10//! allocating computational resources across different fidelity levels.
11
12use scirs2_core::random::rngs::StdRng;
13use scirs2_core::random::{Rng, SeedableRng};
14use sklears_core::types::Float;
15use std::collections::HashMap;
16
17// ============================================================================
18// Progressive Resource Allocation
19// ============================================================================
20
21/// Progressive resource allocation strategy
22#[derive(Debug, Clone)]
23pub enum ProgressiveAllocationStrategy {
24    /// Geometric progression: each level gets r^i times more resources
25    Geometric {
26        base_resources: Float,
27        ratio: Float,
28        n_levels: usize,
29    },
30    /// Exponential progression: exponentially increasing resources
31    Exponential {
32        initial_resources: Float,
33        growth_rate: Float,
34        n_levels: usize,
35    },
36    /// Fibonacci progression: resources follow Fibonacci sequence
37    Fibonacci {
38        base_resources: Float,
39        n_levels: usize,
40    },
41    /// Adaptive progression based on performance feedback
42    Adaptive {
43        initial_resources: Float,
44        adaptation_rate: Float,
45        performance_threshold: Float,
46    },
47    /// Custom progression with user-defined schedule
48    Custom { resource_schedule: Vec<Float> },
49}
50
51/// Configuration for progressive resource allocation
52#[derive(Debug, Clone)]
53pub struct ProgressiveAllocationConfig {
54    pub strategy: ProgressiveAllocationStrategy,
55    pub total_budget: Float,
56    pub min_resource_per_config: Float,
57    pub max_resource_per_config: Float,
58    pub early_stopping_threshold: Option<Float>,
59    pub random_state: Option<u64>,
60}
61
62impl Default for ProgressiveAllocationConfig {
63    fn default() -> Self {
64        Self {
65            strategy: ProgressiveAllocationStrategy::Geometric {
66                base_resources: 1.0,
67                ratio: 2.0,
68                n_levels: 5,
69            },
70            total_budget: 100.0,
71            min_resource_per_config: 0.1,
72            max_resource_per_config: 50.0,
73            early_stopping_threshold: Some(0.01),
74            random_state: None,
75        }
76    }
77}
78
79/// Progressive resource allocator
80pub struct ProgressiveAllocator {
81    config: ProgressiveAllocationConfig,
82    allocation_history: Vec<AllocationRecord>,
83    current_level: usize,
84    remaining_budget: Float,
85}
86
87impl ProgressiveAllocator {
88    pub fn new(config: ProgressiveAllocationConfig) -> Self {
89        let remaining_budget = config.total_budget;
90        Self {
91            config,
92            allocation_history: Vec::new(),
93            current_level: 0,
94            remaining_budget,
95        }
96    }
97
98    /// Allocate resources progressively to configurations
99    pub fn allocate(
100        &mut self,
101        configurations: &[ConfigurationWithPerformance],
102    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
103        // Clone strategy to avoid borrowing issues
104        let strategy = self.config.strategy.clone();
105        match strategy {
106            ProgressiveAllocationStrategy::Geometric {
107                base_resources,
108                ratio,
109                n_levels,
110            } => self.allocate_geometric(configurations, base_resources, ratio, n_levels),
111            ProgressiveAllocationStrategy::Exponential {
112                initial_resources,
113                growth_rate,
114                n_levels,
115            } => {
116                self.allocate_exponential(configurations, initial_resources, growth_rate, n_levels)
117            }
118            ProgressiveAllocationStrategy::Fibonacci {
119                base_resources,
120                n_levels,
121            } => self.allocate_fibonacci(configurations, base_resources, n_levels),
122            ProgressiveAllocationStrategy::Adaptive {
123                initial_resources,
124                adaptation_rate,
125                performance_threshold,
126            } => self.allocate_adaptive(
127                configurations,
128                initial_resources,
129                adaptation_rate,
130                performance_threshold,
131            ),
132            ProgressiveAllocationStrategy::Custom { resource_schedule } => {
133                self.allocate_custom(configurations, &resource_schedule)
134            }
135        }
136    }
137
138    /// Geometric progression allocation
139    fn allocate_geometric(
140        &mut self,
141        configurations: &[ConfigurationWithPerformance],
142        base_resources: Float,
143        ratio: Float,
144        n_levels: usize,
145    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
146        let mut allocations = Vec::new();
147        let mut promoted_configs = configurations.to_vec();
148
149        for level in 0..n_levels {
150            let resources_at_level = base_resources * ratio.powi(level as i32);
151            let resources_clamped = resources_at_level
152                .max(self.config.min_resource_per_config)
153                .min(self.config.max_resource_per_config);
154
155            // Allocate to each configuration
156            for (idx, config) in promoted_configs.iter().enumerate() {
157                if self.remaining_budget >= resources_clamped {
158                    allocations.push(ConfigAllocation {
159                        config_id: idx,
160                        level,
161                        resources: resources_clamped,
162                        estimated_performance: config.performance,
163                    });
164                    self.remaining_budget -= resources_clamped;
165                }
166            }
167
168            // Promote top performing configurations to next level
169            if level < n_levels - 1 {
170                promoted_configs = self.select_top_configs(&promoted_configs, ratio);
171            }
172        }
173
174        Ok(AllocationPlan {
175            allocations,
176            total_resources_used: self.config.total_budget - self.remaining_budget,
177            n_levels,
178            final_configs: promoted_configs.len(),
179        })
180    }
181
182    /// Exponential progression allocation
183    fn allocate_exponential(
184        &mut self,
185        configurations: &[ConfigurationWithPerformance],
186        initial_resources: Float,
187        growth_rate: Float,
188        n_levels: usize,
189    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
190        let mut allocations = Vec::new();
191        let mut promoted_configs = configurations.to_vec();
192
193        for level in 0..n_levels {
194            let resources_at_level = initial_resources * (growth_rate * level as Float).exp();
195            let resources_clamped = resources_at_level
196                .max(self.config.min_resource_per_config)
197                .min(self.config.max_resource_per_config);
198
199            for (idx, config) in promoted_configs.iter().enumerate() {
200                if self.remaining_budget >= resources_clamped {
201                    allocations.push(ConfigAllocation {
202                        config_id: idx,
203                        level,
204                        resources: resources_clamped,
205                        estimated_performance: config.performance,
206                    });
207                    self.remaining_budget -= resources_clamped;
208                }
209            }
210
211            if level < n_levels - 1 {
212                let survival_rate = (-growth_rate).exp();
213                promoted_configs = self.select_top_configs(&promoted_configs, 1.0 / survival_rate);
214            }
215        }
216
217        Ok(AllocationPlan {
218            allocations,
219            total_resources_used: self.config.total_budget - self.remaining_budget,
220            n_levels,
221            final_configs: promoted_configs.len(),
222        })
223    }
224
225    /// Fibonacci progression allocation
226    fn allocate_fibonacci(
227        &mut self,
228        configurations: &[ConfigurationWithPerformance],
229        base_resources: Float,
230        n_levels: usize,
231    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
232        let mut allocations = Vec::new();
233        let mut promoted_configs = configurations.to_vec();
234
235        // Generate Fibonacci sequence
236        let fib_sequence = self.generate_fibonacci(n_levels);
237
238        for (level, &fib) in fib_sequence.iter().enumerate() {
239            let resources_at_level = base_resources * fib as Float;
240            let resources_clamped = resources_at_level
241                .max(self.config.min_resource_per_config)
242                .min(self.config.max_resource_per_config);
243
244            for (idx, config) in promoted_configs.iter().enumerate() {
245                if self.remaining_budget >= resources_clamped {
246                    allocations.push(ConfigAllocation {
247                        config_id: idx,
248                        level,
249                        resources: resources_clamped,
250                        estimated_performance: config.performance,
251                    });
252                    self.remaining_budget -= resources_clamped;
253                }
254            }
255
256            if level < n_levels - 1 {
257                let ratio = if fib > 0 {
258                    fib_sequence[level + 1] as Float / fib as Float
259                } else {
260                    2.0
261                };
262                promoted_configs = self.select_top_configs(&promoted_configs, ratio);
263            }
264        }
265
266        Ok(AllocationPlan {
267            allocations,
268            total_resources_used: self.config.total_budget - self.remaining_budget,
269            n_levels,
270            final_configs: promoted_configs.len(),
271        })
272    }
273
274    /// Adaptive progression allocation based on performance feedback
275    fn allocate_adaptive(
276        &mut self,
277        configurations: &[ConfigurationWithPerformance],
278        initial_resources: Float,
279        adaptation_rate: Float,
280        performance_threshold: Float,
281    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
282        let mut allocations = Vec::new();
283        let mut active_configs = configurations.to_vec();
284        let mut level = 0;
285        let mut current_resources = initial_resources;
286
287        while !active_configs.is_empty() && self.remaining_budget > 0.0 {
288            // Allocate current resources to active configurations
289            for (idx, config) in active_configs.iter().enumerate() {
290                let allocated = current_resources
291                    .max(self.config.min_resource_per_config)
292                    .min(self.config.max_resource_per_config)
293                    .min(self.remaining_budget);
294
295                if allocated > 0.0 {
296                    allocations.push(ConfigAllocation {
297                        config_id: idx,
298                        level,
299                        resources: allocated,
300                        estimated_performance: config.performance,
301                    });
302                    self.remaining_budget -= allocated;
303                }
304            }
305
306            // Compute performance statistics
307            let performances: Vec<Float> = active_configs.iter().map(|c| c.performance).collect();
308            let mean_perf = performances.iter().sum::<Float>() / performances.len() as Float;
309            let max_perf = performances
310                .iter()
311                .cloned()
312                .fold(f64::NEG_INFINITY, f64::max);
313
314            // Adapt resource allocation based on performance spread
315            let perf_spread = max_perf - mean_perf;
316            if perf_spread > performance_threshold {
317                // Good separation: increase resources for next round
318                current_resources *= 1.0 + adaptation_rate;
319            } else {
320                // Poor separation: keep or slightly decrease resources
321                current_resources *= 1.0 - adaptation_rate * 0.5;
322            }
323
324            // Filter configurations above threshold
325            active_configs.retain(|c| c.performance >= mean_perf);
326
327            level += 1;
328
329            // Safety check to prevent infinite loops
330            if level > 100 {
331                break;
332            }
333        }
334
335        Ok(AllocationPlan {
336            allocations,
337            total_resources_used: self.config.total_budget - self.remaining_budget,
338            n_levels: level,
339            final_configs: active_configs.len(),
340        })
341    }
342
343    /// Custom progression allocation
344    fn allocate_custom(
345        &mut self,
346        configurations: &[ConfigurationWithPerformance],
347        resource_schedule: &[Float],
348    ) -> Result<AllocationPlan, Box<dyn std::error::Error>> {
349        let mut allocations = Vec::new();
350        let mut promoted_configs = configurations.to_vec();
351
352        for (level, &resources_at_level) in resource_schedule.iter().enumerate() {
353            let resources_clamped = resources_at_level
354                .max(self.config.min_resource_per_config)
355                .min(self.config.max_resource_per_config);
356
357            for (idx, config) in promoted_configs.iter().enumerate() {
358                if self.remaining_budget >= resources_clamped {
359                    allocations.push(ConfigAllocation {
360                        config_id: idx,
361                        level,
362                        resources: resources_clamped,
363                        estimated_performance: config.performance,
364                    });
365                    self.remaining_budget -= resources_clamped;
366                }
367            }
368
369            // Halve configurations each level
370            if level < resource_schedule.len() - 1 {
371                promoted_configs = self.select_top_configs(&promoted_configs, 2.0);
372            }
373        }
374
375        Ok(AllocationPlan {
376            allocations,
377            total_resources_used: self.config.total_budget - self.remaining_budget,
378            n_levels: resource_schedule.len(),
379            final_configs: promoted_configs.len(),
380        })
381    }
382
383    // Helper methods
384
385    fn select_top_configs(
386        &self,
387        configs: &[ConfigurationWithPerformance],
388        ratio: Float,
389    ) -> Vec<ConfigurationWithPerformance> {
390        let n_keep = (configs.len() as Float / ratio).ceil() as usize;
391        let n_keep = n_keep.max(1).min(configs.len());
392
393        let mut sorted_configs = configs.to_vec();
394        sorted_configs.sort_by(|a, b| b.performance.partial_cmp(&a.performance).unwrap());
395        sorted_configs.truncate(n_keep);
396        sorted_configs
397    }
398
399    fn generate_fibonacci(&self, n: usize) -> Vec<usize> {
400        let mut fib = vec![1, 1];
401        for i in 2..n {
402            fib.push(fib[i - 1] + fib[i - 2]);
403        }
404        fib.truncate(n);
405        fib
406    }
407}
408
409/// Configuration with performance information
410#[derive(Debug, Clone)]
411pub struct ConfigurationWithPerformance {
412    pub config_id: usize,
413    pub parameters: HashMap<String, Float>,
414    pub performance: Float,
415    pub resources_used: Float,
416}
417
418/// Allocation for a specific configuration
419#[derive(Debug, Clone)]
420pub struct ConfigAllocation {
421    pub config_id: usize,
422    pub level: usize,
423    pub resources: Float,
424    pub estimated_performance: Float,
425}
426
427/// Plan for resource allocation
428#[derive(Debug, Clone)]
429pub struct AllocationPlan {
430    pub allocations: Vec<ConfigAllocation>,
431    pub total_resources_used: Float,
432    pub n_levels: usize,
433    pub final_configs: usize,
434}
435
436/// Allocation record for history tracking
437#[derive(Debug, Clone)]
438struct AllocationRecord {
439    level: usize,
440    resources: Float,
441    n_configs: usize,
442}
443
444// ============================================================================
445// Coarse-to-Fine Optimization Strategies
446// ============================================================================
447
448/// Coarse-to-fine optimization strategy
449#[derive(Debug, Clone)]
450pub enum CoarseToFineStrategy {
451    /// Grid refinement: start with coarse grid, refine promising regions
452    GridRefinement {
453        initial_grid_size: usize,
454        refinement_factor: Float,
455        n_refinement_levels: usize,
456    },
457    /// Hierarchical sampling: progressively finer sampling
458    HierarchicalSampling {
459        initial_samples: usize,
460        samples_per_level: usize,
461        focus_radius: Float,
462    },
463    /// Zoom-in strategy: iteratively zoom into best regions
464    ZoomIn {
465        zoom_factor: Float,
466        n_zoom_levels: usize,
467        top_k_regions: usize,
468    },
469    /// Multi-scale optimization
470    MultiScale {
471        scales: Vec<Float>,
472        samples_per_scale: usize,
473    },
474}
475
476/// Configuration for coarse-to-fine optimization
477#[derive(Debug, Clone)]
478pub struct CoarseToFineConfig {
479    pub strategy: CoarseToFineStrategy,
480    pub parameter_bounds: HashMap<String, (Float, Float)>,
481    pub convergence_tolerance: Float,
482    pub max_evaluations: usize,
483    pub random_state: Option<u64>,
484}
485
486impl Default for CoarseToFineConfig {
487    fn default() -> Self {
488        Self {
489            strategy: CoarseToFineStrategy::GridRefinement {
490                initial_grid_size: 10,
491                refinement_factor: 0.5,
492                n_refinement_levels: 3,
493            },
494            parameter_bounds: HashMap::new(),
495            convergence_tolerance: 1e-4,
496            max_evaluations: 1000,
497            random_state: None,
498        }
499    }
500}
501
502/// Coarse-to-fine optimizer
503pub struct CoarseToFineOptimizer {
504    config: CoarseToFineConfig,
505    evaluation_history: Vec<EvaluationPoint>,
506    current_bounds: HashMap<String, (Float, Float)>,
507    rng: StdRng,
508}
509
510impl CoarseToFineOptimizer {
511    pub fn new(config: CoarseToFineConfig) -> Self {
512        let rng = StdRng::seed_from_u64(config.random_state.unwrap_or(42));
513        let current_bounds = config.parameter_bounds.clone();
514
515        Self {
516            config,
517            evaluation_history: Vec::new(),
518            current_bounds,
519            rng,
520        }
521    }
522
523    /// Optimize using coarse-to-fine strategy
524    pub fn optimize<F>(
525        &mut self,
526        objective_fn: F,
527    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
528    where
529        F: Fn(&HashMap<String, Float>) -> Float,
530    {
531        // Clone strategy to avoid borrowing issues
532        let strategy = self.config.strategy.clone();
533        match strategy {
534            CoarseToFineStrategy::GridRefinement {
535                initial_grid_size,
536                refinement_factor,
537                n_refinement_levels,
538            } => self.optimize_grid_refinement(
539                &objective_fn,
540                initial_grid_size,
541                refinement_factor,
542                n_refinement_levels,
543            ),
544            CoarseToFineStrategy::HierarchicalSampling {
545                initial_samples,
546                samples_per_level,
547                focus_radius,
548            } => self.optimize_hierarchical(
549                &objective_fn,
550                initial_samples,
551                samples_per_level,
552                focus_radius,
553            ),
554            CoarseToFineStrategy::ZoomIn {
555                zoom_factor,
556                n_zoom_levels,
557                top_k_regions,
558            } => self.optimize_zoomin(&objective_fn, zoom_factor, n_zoom_levels, top_k_regions),
559            CoarseToFineStrategy::MultiScale {
560                scales,
561                samples_per_scale,
562            } => self.optimize_multiscale(&objective_fn, &scales, samples_per_scale),
563        }
564    }
565
566    fn optimize_grid_refinement<F>(
567        &mut self,
568        objective_fn: &F,
569        initial_grid_size: usize,
570        refinement_factor: Float,
571        n_refinement_levels: usize,
572    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
573    where
574        F: Fn(&HashMap<String, Float>) -> Float,
575    {
576        let mut best_point = None;
577        let mut best_value = f64::NEG_INFINITY;
578
579        for level in 0..n_refinement_levels {
580            // Sample grid at current resolution
581            let grid_size = (initial_grid_size as Float * (1.0 + level as Float)).ceil() as usize;
582            let points = self.generate_grid_points(grid_size)?;
583
584            // Evaluate all points
585            for params in points {
586                let value = objective_fn(&params);
587                self.evaluation_history.push(EvaluationPoint {
588                    parameters: params.clone(),
589                    value,
590                    level,
591                });
592
593                if value > best_value {
594                    best_value = value;
595                    best_point = Some(params);
596                }
597            }
598
599            // Refine bounds around best point
600            if let Some(ref best) = best_point {
601                self.refine_bounds(best, refinement_factor);
602            }
603        }
604
605        Ok(CoarseToFineResult {
606            best_parameters: best_point.unwrap_or_default(),
607            best_value,
608            n_evaluations: self.evaluation_history.len(),
609            convergence_history: self.get_convergence_history(),
610        })
611    }
612
613    fn optimize_hierarchical<F>(
614        &mut self,
615        objective_fn: &F,
616        initial_samples: usize,
617        samples_per_level: usize,
618        focus_radius: Float,
619    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
620    where
621        F: Fn(&HashMap<String, Float>) -> Float,
622    {
623        let mut best_point = None;
624        let mut best_value = f64::NEG_INFINITY;
625        let mut level = 0;
626
627        // Initial random sampling
628        for _ in 0..initial_samples {
629            let params = self.sample_from_bounds(&self.current_bounds.clone())?;
630            let value = objective_fn(&params);
631            self.evaluation_history.push(EvaluationPoint {
632                parameters: params.clone(),
633                value,
634                level,
635            });
636
637            if value > best_value {
638                best_value = value;
639                best_point = Some(params);
640            }
641        }
642
643        // Hierarchical refinement
644        while self.evaluation_history.len() < self.config.max_evaluations {
645            level += 1;
646
647            let best_params = best_point.clone();
648            if let Some(best) = best_params {
649                // Focus sampling around best point
650                for _ in 0..samples_per_level {
651                    let params = self.sample_around_point(&best, focus_radius)?;
652                    let value = objective_fn(&params);
653                    self.evaluation_history.push(EvaluationPoint {
654                        parameters: params.clone(),
655                        value,
656                        level,
657                    });
658
659                    if value > best_value {
660                        best_value = value;
661                        best_point = Some(params);
662                    }
663                }
664            }
665
666            // Reduce focus radius for next level
667            let new_radius = focus_radius * 0.5;
668            if new_radius < self.config.convergence_tolerance {
669                break;
670            }
671        }
672
673        Ok(CoarseToFineResult {
674            best_parameters: best_point.unwrap_or_default(),
675            best_value,
676            n_evaluations: self.evaluation_history.len(),
677            convergence_history: self.get_convergence_history(),
678        })
679    }
680
681    fn optimize_zoomin<F>(
682        &mut self,
683        objective_fn: &F,
684        zoom_factor: Float,
685        n_zoom_levels: usize,
686        top_k_regions: usize,
687    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
688    where
689        F: Fn(&HashMap<String, Float>) -> Float,
690    {
691        let mut regions = vec![self.current_bounds.clone()];
692        let mut best_point = None;
693        let mut best_value = f64::NEG_INFINITY;
694
695        for level in 0..n_zoom_levels {
696            let mut region_performances = Vec::new();
697
698            // Evaluate each region
699            for region in &regions {
700                let samples = self.sample_from_bounds(region)?;
701                let value = objective_fn(&samples);
702
703                self.evaluation_history.push(EvaluationPoint {
704                    parameters: samples.clone(),
705                    value,
706                    level,
707                });
708
709                if value > best_value {
710                    best_value = value;
711                    best_point = Some(samples.clone());
712                }
713
714                region_performances.push((region.clone(), samples, value));
715            }
716
717            // Select top-k regions
718            region_performances.sort_by(|a, b| b.2.partial_cmp(&a.2).unwrap());
719            region_performances.truncate(top_k_regions);
720
721            // Zoom into top regions
722            regions = region_performances
723                .iter()
724                .map(|(region, center, _)| self.zoom_region(region, center, zoom_factor))
725                .collect();
726        }
727
728        Ok(CoarseToFineResult {
729            best_parameters: best_point.unwrap_or_default(),
730            best_value,
731            n_evaluations: self.evaluation_history.len(),
732            convergence_history: self.get_convergence_history(),
733        })
734    }
735
736    fn optimize_multiscale<F>(
737        &mut self,
738        objective_fn: &F,
739        scales: &[Float],
740        samples_per_scale: usize,
741    ) -> Result<CoarseToFineResult, Box<dyn std::error::Error>>
742    where
743        F: Fn(&HashMap<String, Float>) -> Float,
744    {
745        let mut best_point = None;
746        let mut best_value = f64::NEG_INFINITY;
747
748        for (level, &scale) in scales.iter().enumerate() {
749            // Generate samples at current scale
750            for _ in 0..samples_per_scale {
751                let current_bounds = self.current_bounds.clone();
752                let mut params = self.sample_from_bounds(&current_bounds)?;
753
754                // Apply scale perturbation
755                for (_, value) in params.iter_mut() {
756                    let perturbation = self.rng.gen_range(-scale..scale);
757                    *value += perturbation;
758                }
759
760                let value = objective_fn(&params);
761                self.evaluation_history.push(EvaluationPoint {
762                    parameters: params.clone(),
763                    value,
764                    level,
765                });
766
767                if value > best_value {
768                    best_value = value;
769                    best_point = Some(params);
770                }
771            }
772
773            // Refine bounds for next scale
774            if let Some(ref best) = best_point {
775                self.refine_bounds(best, 0.5);
776            }
777        }
778
779        Ok(CoarseToFineResult {
780            best_parameters: best_point.unwrap_or_default(),
781            best_value,
782            n_evaluations: self.evaluation_history.len(),
783            convergence_history: self.get_convergence_history(),
784        })
785    }
786
787    // Helper methods
788
789    fn generate_grid_points(
790        &self,
791        grid_size: usize,
792    ) -> Result<Vec<HashMap<String, Float>>, Box<dyn std::error::Error>> {
793        let mut points = Vec::new();
794
795        // Generate grid for each parameter
796        let param_names: Vec<_> = self.current_bounds.keys().cloned().collect();
797
798        if param_names.is_empty() {
799            return Ok(points);
800        }
801
802        // Simple 1D grid for each parameter (can be extended to multi-D)
803        for param_name in &param_names {
804            if let Some(&(min, max)) = self.current_bounds.get(param_name) {
805                for i in 0..grid_size {
806                    let value = min + (max - min) * (i as Float) / ((grid_size - 1) as Float);
807                    let mut params = HashMap::new();
808                    params.insert(param_name.clone(), value);
809                    points.push(params);
810                }
811            }
812        }
813
814        Ok(points)
815    }
816
817    fn sample_from_bounds(
818        &mut self,
819        bounds: &HashMap<String, (Float, Float)>,
820    ) -> Result<HashMap<String, Float>, Box<dyn std::error::Error>> {
821        let mut params = HashMap::new();
822
823        for (param_name, &(min, max)) in bounds {
824            let value = self.rng.gen_range(min..max);
825            params.insert(param_name.clone(), value);
826        }
827
828        Ok(params)
829    }
830
831    fn sample_around_point(
832        &mut self,
833        center: &HashMap<String, Float>,
834        radius: Float,
835    ) -> Result<HashMap<String, Float>, Box<dyn std::error::Error>> {
836        let mut params = HashMap::new();
837
838        for (param_name, &center_value) in center {
839            if let Some(&(min, max)) = self.current_bounds.get(param_name) {
840                let perturbation = self.rng.gen_range(-radius..radius);
841                let value = (center_value + perturbation).max(min).min(max);
842                params.insert(param_name.clone(), value);
843            }
844        }
845
846        Ok(params)
847    }
848
849    fn refine_bounds(&mut self, best_point: &HashMap<String, Float>, refinement_factor: Float) {
850        for (param_name, &value) in best_point {
851            if let Some(&(min, max)) = self.current_bounds.get(param_name) {
852                let range = max - min;
853                let new_range = range * refinement_factor;
854                let new_min = (value - new_range / 2.0).max(min);
855                let new_max = (value + new_range / 2.0).min(max);
856                self.current_bounds
857                    .insert(param_name.clone(), (new_min, new_max));
858            }
859        }
860    }
861
862    fn zoom_region(
863        &self,
864        region: &HashMap<String, (Float, Float)>,
865        center: &HashMap<String, Float>,
866        zoom_factor: Float,
867    ) -> HashMap<String, (Float, Float)> {
868        let mut zoomed = HashMap::new();
869
870        for (param_name, &(min, max)) in region {
871            if let Some(&center_value) = center.get(param_name) {
872                let range = max - min;
873                let new_range = range * zoom_factor;
874                let new_min = (center_value - new_range / 2.0).max(min);
875                let new_max = (center_value + new_range / 2.0).min(max);
876                zoomed.insert(param_name.clone(), (new_min, new_max));
877            }
878        }
879
880        zoomed
881    }
882
883    fn get_convergence_history(&self) -> Vec<Float> {
884        let mut best_so_far = f64::NEG_INFINITY;
885        self.evaluation_history
886            .iter()
887            .map(|point| {
888                if point.value > best_so_far {
889                    best_so_far = point.value;
890                }
891                best_so_far
892            })
893            .collect()
894    }
895}
896
897/// Evaluation point in optimization
898#[derive(Debug, Clone)]
899struct EvaluationPoint {
900    parameters: HashMap<String, Float>,
901    value: Float,
902    level: usize,
903}
904
905/// Result of coarse-to-fine optimization
906#[derive(Debug, Clone)]
907pub struct CoarseToFineResult {
908    pub best_parameters: HashMap<String, Float>,
909    pub best_value: Float,
910    pub n_evaluations: usize,
911    pub convergence_history: Vec<Float>,
912}
913
914// ============================================================================
915// Adaptive Fidelity Selection
916// ============================================================================
917
918/// Adaptive fidelity selection strategy
919#[derive(Debug, Clone)]
920pub enum AdaptiveFidelityStrategy {
921    /// UCB-based fidelity selection
922    UpperConfidenceBound { exploration_weight: Float },
923    /// Expected Improvement with fidelity cost
924    ExpectedImprovement { cost_weight: Float },
925    /// Information gain maximization
926    InformationGain { uncertainty_threshold: Float },
927    /// Cost-aware Thompson sampling
928    ThompsonSampling {
929        prior_alpha: Float,
930        prior_beta: Float,
931    },
932}
933
934/// Adaptive fidelity selector
935pub struct AdaptiveFidelitySelector {
936    strategy: AdaptiveFidelityStrategy,
937    fidelity_stats: HashMap<usize, FidelityStatistics>,
938}
939
940impl AdaptiveFidelitySelector {
941    pub fn new(strategy: AdaptiveFidelityStrategy) -> Self {
942        Self {
943            strategy,
944            fidelity_stats: HashMap::new(),
945        }
946    }
947
948    /// Select fidelity level for next evaluation
949    pub fn select_fidelity(
950        &mut self,
951        available_fidelities: &[usize],
952        budget_remaining: Float,
953    ) -> usize {
954        match &self.strategy {
955            AdaptiveFidelityStrategy::UpperConfidenceBound { exploration_weight } => {
956                self.select_ucb(available_fidelities, *exploration_weight)
957            }
958            AdaptiveFidelityStrategy::ExpectedImprovement { cost_weight } => {
959                self.select_ei(available_fidelities, *cost_weight, budget_remaining)
960            }
961            AdaptiveFidelityStrategy::InformationGain {
962                uncertainty_threshold,
963            } => self.select_ig(available_fidelities, *uncertainty_threshold),
964            AdaptiveFidelityStrategy::ThompsonSampling {
965                prior_alpha,
966                prior_beta,
967            } => self.select_thompson(available_fidelities, *prior_alpha, *prior_beta),
968        }
969    }
970
971    /// Update fidelity statistics after evaluation
972    pub fn update(&mut self, fidelity: usize, performance: Float, cost: Float) {
973        let stats = self
974            .fidelity_stats
975            .entry(fidelity)
976            .or_insert_with(|| FidelityStatistics {
977                n_evaluations: 0,
978                total_performance: 0.0,
979                total_cost: 0.0,
980                mean_performance: 0.0,
981                mean_cost: 0.0,
982            });
983
984        stats.n_evaluations += 1;
985        stats.total_performance += performance;
986        stats.total_cost += cost;
987        stats.mean_performance = stats.total_performance / stats.n_evaluations as Float;
988        stats.mean_cost = stats.total_cost / stats.n_evaluations as Float;
989    }
990
991    fn select_ucb(&self, fidelities: &[usize], exploration_weight: Float) -> usize {
992        let total_evals: usize = self.fidelity_stats.values().map(|s| s.n_evaluations).sum();
993
994        let mut best_fidelity = fidelities[0];
995        let mut best_score = f64::NEG_INFINITY;
996
997        for &fidelity in fidelities {
998            let score = if let Some(stats) = self.fidelity_stats.get(&fidelity) {
999                let exploitation = stats.mean_performance / stats.mean_cost.max(1e-6);
1000                let exploration = exploration_weight
1001                    * ((total_evals as Float).ln() / stats.n_evaluations as Float).sqrt();
1002                exploitation + exploration
1003            } else {
1004                f64::INFINITY // Unvisited fidelities get highest priority
1005            };
1006
1007            if score > best_score {
1008                best_score = score;
1009                best_fidelity = fidelity;
1010            }
1011        }
1012
1013        best_fidelity
1014    }
1015
1016    fn select_ei(&self, fidelities: &[usize], cost_weight: Float, _budget: Float) -> usize {
1017        let mut best_fidelity = fidelities[0];
1018        let mut best_score = f64::NEG_INFINITY;
1019
1020        for &fidelity in fidelities {
1021            let score = if let Some(stats) = self.fidelity_stats.get(&fidelity) {
1022                stats.mean_performance - cost_weight * stats.mean_cost
1023            } else {
1024                0.0 // Default for unvisited
1025            };
1026
1027            if score > best_score {
1028                best_score = score;
1029                best_fidelity = fidelity;
1030            }
1031        }
1032
1033        best_fidelity
1034    }
1035
1036    fn select_ig(&self, fidelities: &[usize], _threshold: Float) -> usize {
1037        // Prefer fidelities with less data (higher uncertainty)
1038        let mut best_fidelity = fidelities[0];
1039        let mut min_evals = usize::MAX;
1040
1041        for &fidelity in fidelities {
1042            let n_evals = self
1043                .fidelity_stats
1044                .get(&fidelity)
1045                .map(|s| s.n_evaluations)
1046                .unwrap_or(0);
1047            if n_evals < min_evals {
1048                min_evals = n_evals;
1049                best_fidelity = fidelity;
1050            }
1051        }
1052
1053        best_fidelity
1054    }
1055
1056    fn select_thompson(
1057        &self,
1058        fidelities: &[usize],
1059        prior_alpha: Float,
1060        prior_beta: Float,
1061    ) -> usize {
1062        let mut rng = StdRng::seed_from_u64(42);
1063        let mut best_fidelity = fidelities[0];
1064        let mut best_sample = f64::NEG_INFINITY;
1065
1066        for &fidelity in fidelities {
1067            // Beta distribution sampling (simplified)
1068            let (alpha, beta) = if let Some(stats) = self.fidelity_stats.get(&fidelity) {
1069                (
1070                    prior_alpha + stats.total_performance,
1071                    prior_beta + stats.n_evaluations as Float - stats.total_performance,
1072                )
1073            } else {
1074                (prior_alpha, prior_beta)
1075            };
1076
1077            // Simple sampling (in practice would use proper Beta distribution)
1078            let sample = rng.gen_range(0.0..(alpha / (alpha + beta)));
1079
1080            if sample > best_sample {
1081                best_sample = sample;
1082                best_fidelity = fidelity;
1083            }
1084        }
1085
1086        best_fidelity
1087    }
1088}
1089
1090#[derive(Debug, Clone)]
1091struct FidelityStatistics {
1092    n_evaluations: usize,
1093    total_performance: Float,
1094    total_cost: Float,
1095    mean_performance: Float,
1096    mean_cost: Float,
1097}
1098
1099// ============================================================================
1100// Budget Allocation Algorithms
1101// ============================================================================
1102
1103/// Budget allocation strategy
1104#[derive(Debug, Clone)]
1105pub enum BudgetAllocationStrategy {
1106    /// Equal allocation to all configurations
1107    Equal,
1108    /// Proportional to estimated performance
1109    Proportional { min_allocation: Float },
1110    /// Rank-based allocation
1111    RankBased { allocation_ratios: Vec<Float> },
1112    /// Adaptive allocation based on uncertainty
1113    UncertaintyBased { exploration_factor: Float },
1114    /// Racing algorithm
1115    Racing { confidence_level: Float },
1116}
1117
1118/// Budget allocator
1119pub struct BudgetAllocator {
1120    strategy: BudgetAllocationStrategy,
1121    total_budget: Float,
1122    allocations: HashMap<usize, Float>,
1123}
1124
1125impl BudgetAllocator {
1126    pub fn new(strategy: BudgetAllocationStrategy, total_budget: Float) -> Self {
1127        Self {
1128            strategy,
1129            total_budget,
1130            allocations: HashMap::new(),
1131        }
1132    }
1133
1134    /// Allocate budget among configurations
1135    pub fn allocate(
1136        &mut self,
1137        configurations: &[ConfigurationWithPerformance],
1138    ) -> HashMap<usize, Float> {
1139        // Clone strategy to avoid borrowing issues
1140        let strategy = self.strategy.clone();
1141        match strategy {
1142            BudgetAllocationStrategy::Equal => self.allocate_equal(configurations),
1143            BudgetAllocationStrategy::Proportional { min_allocation } => {
1144                self.allocate_proportional(configurations, min_allocation)
1145            }
1146            BudgetAllocationStrategy::RankBased { allocation_ratios } => {
1147                self.allocate_rank_based(configurations, &allocation_ratios)
1148            }
1149            BudgetAllocationStrategy::UncertaintyBased { exploration_factor } => {
1150                self.allocate_uncertainty(configurations, exploration_factor)
1151            }
1152            BudgetAllocationStrategy::Racing { confidence_level } => {
1153                self.allocate_racing(configurations, confidence_level)
1154            }
1155        }
1156    }
1157
1158    fn allocate_equal(
1159        &mut self,
1160        configs: &[ConfigurationWithPerformance],
1161    ) -> HashMap<usize, Float> {
1162        let per_config = self.total_budget / configs.len() as Float;
1163        configs
1164            .iter()
1165            .enumerate()
1166            .map(|(i, _)| (i, per_config))
1167            .collect()
1168    }
1169
1170    fn allocate_proportional(
1171        &mut self,
1172        configs: &[ConfigurationWithPerformance],
1173        min_allocation: Float,
1174    ) -> HashMap<usize, Float> {
1175        let total_perf: Float = configs.iter().map(|c| c.performance).sum();
1176        let available = self.total_budget - min_allocation * configs.len() as Float;
1177
1178        configs
1179            .iter()
1180            .enumerate()
1181            .map(|(i, c)| {
1182                let proportional = if total_perf > 0.0 {
1183                    (c.performance / total_perf) * available
1184                } else {
1185                    0.0
1186                };
1187                (i, min_allocation + proportional)
1188            })
1189            .collect()
1190    }
1191
1192    fn allocate_rank_based(
1193        &mut self,
1194        configs: &[ConfigurationWithPerformance],
1195        ratios: &[Float],
1196    ) -> HashMap<usize, Float> {
1197        let mut sorted: Vec<_> = configs.iter().enumerate().collect();
1198        sorted.sort_by(|a, b| b.1.performance.partial_cmp(&a.1.performance).unwrap());
1199
1200        let total_ratio: Float = ratios.iter().sum();
1201        sorted
1202            .iter()
1203            .enumerate()
1204            .map(|(rank, (orig_idx, _))| {
1205                let ratio = ratios.get(rank).cloned().unwrap_or(0.0);
1206                let allocation = (ratio / total_ratio) * self.total_budget;
1207                (*orig_idx, allocation)
1208            })
1209            .collect()
1210    }
1211
1212    fn allocate_uncertainty(
1213        &mut self,
1214        configs: &[ConfigurationWithPerformance],
1215        exploration: Float,
1216    ) -> HashMap<usize, Float> {
1217        // Simplified: allocate based on inverse of resources already used
1218        let total_inverse: Float = configs.iter().map(|c| 1.0 / (c.resources_used + 1.0)).sum();
1219
1220        configs
1221            .iter()
1222            .enumerate()
1223            .map(|(i, c)| {
1224                let uncertainty = 1.0 / (c.resources_used + 1.0);
1225                let exploitation = c.performance;
1226                let score = exploitation + exploration * uncertainty;
1227                let allocation = (score / total_inverse) * self.total_budget;
1228                (i, allocation)
1229            })
1230            .collect()
1231    }
1232
1233    fn allocate_racing(
1234        &mut self,
1235        configs: &[ConfigurationWithPerformance],
1236        _confidence: Float,
1237    ) -> HashMap<usize, Float> {
1238        // Racing: allocate more to statistically indistinguishable configurations
1239        let initial = self.total_budget / configs.len() as Float;
1240
1241        configs
1242            .iter()
1243            .enumerate()
1244            .map(|(i, _)| (i, initial))
1245            .collect()
1246    }
1247}
1248
1249// ============================================================================
1250// Tests
1251// ============================================================================
1252
1253#[cfg(test)]
1254mod tests {
1255    use super::*;
1256
1257    #[test]
1258    fn test_progressive_allocation_config() {
1259        let config = ProgressiveAllocationConfig::default();
1260        assert_eq!(config.total_budget, 100.0);
1261        assert_eq!(config.min_resource_per_config, 0.1);
1262    }
1263
1264    #[test]
1265    fn test_coarse_to_fine_config() {
1266        let config = CoarseToFineConfig::default();
1267        assert_eq!(config.max_evaluations, 1000);
1268    }
1269
1270    #[test]
1271    fn test_adaptive_fidelity_selector() {
1272        let strategy = AdaptiveFidelityStrategy::UpperConfidenceBound {
1273            exploration_weight: 1.0,
1274        };
1275        let selector = AdaptiveFidelitySelector::new(strategy);
1276        assert!(selector.fidelity_stats.is_empty());
1277    }
1278
1279    #[test]
1280    fn test_budget_allocator() {
1281        let strategy = BudgetAllocationStrategy::Equal;
1282        let allocator = BudgetAllocator::new(strategy, 100.0);
1283        assert_eq!(allocator.total_budget, 100.0);
1284    }
1285
1286    #[test]
1287    fn test_progressive_allocator() {
1288        let config = ProgressiveAllocationConfig::default();
1289        let allocator = ProgressiveAllocator::new(config);
1290        assert_eq!(allocator.remaining_budget, 100.0);
1291    }
1292
1293    #[test]
1294    fn test_coarse_to_fine_optimizer() {
1295        let config = CoarseToFineConfig::default();
1296        let optimizer = CoarseToFineOptimizer::new(config);
1297        assert!(optimizer.evaluation_history.is_empty());
1298    }
1299}