quantrs2_ml/automl/search/
hyperparameter_optimizer.rs

1//! Quantum Hyperparameter Optimizer
2//!
3//! This module provides hyperparameter optimization specifically for quantum ML models.
4
5use crate::automl::config::{HyperparameterSearchSpace, QuantumHyperparameterSpace};
6use crate::automl::pipeline::QuantumMLPipeline;
7use crate::error::Result;
8use scirs2_core::ndarray::{Array1, Array2};
9use std::collections::HashMap;
10
11/// Quantum hyperparameter optimizer
12#[derive(Debug, Clone)]
13pub struct QuantumHyperparameterOptimizer {
14    /// Optimization strategy
15    strategy: HyperparameterOptimizationStrategy,
16
17    /// Search space
18    search_space: HyperparameterSearchSpace,
19
20    /// Optimization history
21    optimization_history: OptimizationHistory,
22
23    /// Best configuration found
24    best_configuration: Option<HyperparameterConfiguration>,
25}
26
27/// Hyperparameter optimization strategies
28#[derive(Debug, Clone)]
29pub enum HyperparameterOptimizationStrategy {
30    RandomSearch,
31    GridSearch,
32    BayesianOptimization,
33    EvolutionarySearch,
34    QuantumAnnealing,
35    QuantumVariational,
36    HybridQuantumClassical,
37}
38
39/// Hyperparameter configuration
40#[derive(Debug, Clone)]
41pub struct HyperparameterConfiguration {
42    /// Classical hyperparameters
43    pub classical_params: HashMap<String, f64>,
44
45    /// Quantum hyperparameters
46    pub quantum_params: HashMap<String, f64>,
47
48    /// Architecture parameters
49    pub architecture_params: HashMap<String, usize>,
50
51    /// Performance score
52    pub performance_score: f64,
53}
54
55/// Optimization history
56#[derive(Debug, Clone)]
57pub struct OptimizationHistory {
58    /// Trial history
59    pub trials: Vec<OptimizationTrial>,
60
61    /// Best trial
62    pub best_trial: Option<OptimizationTrial>,
63
64    /// Convergence history
65    pub convergence_history: Vec<f64>,
66}
67
68/// Optimization trial
69#[derive(Debug, Clone)]
70pub struct OptimizationTrial {
71    /// Trial ID
72    pub trial_id: usize,
73
74    /// Configuration tested
75    pub configuration: HyperparameterConfiguration,
76
77    /// Performance achieved
78    pub performance: f64,
79
80    /// Resource usage
81    pub resource_usage: ResourceUsage,
82
83    /// Trial duration
84    pub duration: f64,
85}
86
87/// Resource usage tracking
88#[derive(Debug, Clone)]
89pub struct ResourceUsage {
90    /// Memory usage (MB)
91    pub memory_mb: f64,
92
93    /// Quantum resources used
94    pub quantum_resources: QuantumResourceUsage,
95
96    /// Training time
97    pub training_time: f64,
98}
99
100/// Quantum resource usage
101#[derive(Debug, Clone)]
102pub struct QuantumResourceUsage {
103    /// Qubits used
104    pub qubits_used: usize,
105
106    /// Circuit depth
107    pub circuit_depth: usize,
108
109    /// Number of gates
110    pub num_gates: usize,
111
112    /// Coherence time used
113    pub coherence_time_used: f64,
114}
115
116impl QuantumHyperparameterOptimizer {
117    /// Create a new hyperparameter optimizer
118    pub fn new(search_space: &HyperparameterSearchSpace) -> Self {
119        Self {
120            strategy: HyperparameterOptimizationStrategy::BayesianOptimization,
121            search_space: search_space.clone(),
122            optimization_history: OptimizationHistory::new(),
123            best_configuration: None,
124        }
125    }
126
127    /// Optimize hyperparameters for a given pipeline
128    pub fn optimize(
129        &mut self,
130        pipeline: QuantumMLPipeline,
131        X: &Array2<f64>,
132        y: &Array1<f64>,
133    ) -> Result<QuantumMLPipeline> {
134        match self.strategy {
135            HyperparameterOptimizationStrategy::RandomSearch => self.random_search(pipeline, X, y),
136            HyperparameterOptimizationStrategy::BayesianOptimization => {
137                self.bayesian_optimization(pipeline, X, y)
138            }
139            _ => {
140                // For now, default to random search
141                self.random_search(pipeline, X, y)
142            }
143        }
144    }
145
146    /// Get the best configuration found
147    pub fn best_configuration(&self) -> Option<&HyperparameterConfiguration> {
148        self.best_configuration.as_ref()
149    }
150
151    /// Get optimization history
152    pub fn history(&self) -> &OptimizationHistory {
153        &self.optimization_history
154    }
155
156    // Private methods
157
158    fn random_search(
159        &mut self,
160        mut pipeline: QuantumMLPipeline,
161        X: &Array2<f64>,
162        y: &Array1<f64>,
163    ) -> Result<QuantumMLPipeline> {
164        let num_trials = 20; // Configurable
165        let mut best_pipeline = pipeline.clone();
166        let mut best_score = f64::NEG_INFINITY;
167
168        for trial_id in 0..num_trials {
169            // Generate random configuration
170            let config = self.generate_random_configuration();
171
172            // Apply configuration to pipeline
173            pipeline.apply_hyperparameters(&config)?;
174
175            // Evaluate pipeline
176            let score = self.evaluate_configuration(&pipeline, X, y)?;
177
178            // Record trial
179            let trial = OptimizationTrial {
180                trial_id,
181                configuration: config.clone(),
182                performance: score,
183                resource_usage: ResourceUsage::default(),
184                duration: 0.0, // TODO: measure actual time
185            };
186            self.optimization_history.trials.push(trial);
187
188            // Update best if better
189            if score > best_score {
190                best_score = score;
191                best_pipeline = pipeline.clone();
192                self.best_configuration = Some(config);
193                self.optimization_history.best_trial = Some(
194                    self.optimization_history
195                        .trials
196                        .last()
197                        .expect("trials should not be empty")
198                        .clone(),
199                );
200            }
201
202            self.optimization_history
203                .convergence_history
204                .push(best_score);
205        }
206
207        Ok(best_pipeline)
208    }
209
210    fn bayesian_optimization(
211        &mut self,
212        pipeline: QuantumMLPipeline,
213        X: &Array2<f64>,
214        y: &Array1<f64>,
215    ) -> Result<QuantumMLPipeline> {
216        // Simplified Bayesian optimization
217        // In practice, this would use a Gaussian Process
218        self.random_search(pipeline, X, y)
219    }
220
221    fn generate_random_configuration(&self) -> HyperparameterConfiguration {
222        use fastrand;
223
224        let mut classical_params = HashMap::new();
225        let mut quantum_params = HashMap::new();
226        let mut architecture_params = HashMap::new();
227
228        // Sample learning rate
229        let lr_min = self.search_space.learning_rates.0;
230        let lr_max = self.search_space.learning_rates.1;
231        let learning_rate = lr_min + fastrand::f64() * (lr_max - lr_min);
232        classical_params.insert("learning_rate".to_string(), learning_rate);
233
234        // Sample regularization
235        let reg_min = self.search_space.regularization.0;
236        let reg_max = self.search_space.regularization.1;
237        let regularization = reg_min + fastrand::f64() * (reg_max - reg_min);
238        classical_params.insert("regularization".to_string(), regularization);
239
240        // Sample batch size
241        if !self.search_space.batch_sizes.is_empty() {
242            let batch_size_idx = fastrand::usize(..self.search_space.batch_sizes.len());
243            let batch_size = self.search_space.batch_sizes[batch_size_idx] as f64;
244            classical_params.insert("batch_size".to_string(), batch_size);
245        }
246
247        // Sample quantum parameters
248        let qubit_min = self.search_space.quantum_params.num_qubits.0;
249        let qubit_max = self.search_space.quantum_params.num_qubits.1;
250        let num_qubits = qubit_min + fastrand::usize(..(qubit_max - qubit_min + 1));
251        quantum_params.insert("num_qubits".to_string(), num_qubits as f64);
252
253        let depth_min = self.search_space.quantum_params.circuit_depth.0;
254        let depth_max = self.search_space.quantum_params.circuit_depth.1;
255        let circuit_depth = depth_min + fastrand::usize(..(depth_max - depth_min + 1));
256        quantum_params.insert("circuit_depth".to_string(), circuit_depth as f64);
257
258        HyperparameterConfiguration {
259            classical_params,
260            quantum_params,
261            architecture_params,
262            performance_score: 0.0,
263        }
264    }
265
266    fn evaluate_configuration(
267        &self,
268        pipeline: &QuantumMLPipeline,
269        X: &Array2<f64>,
270        y: &Array1<f64>,
271    ) -> Result<f64> {
272        // Simple holdout evaluation
273        let split_point = (X.nrows() as f64 * 0.8) as usize;
274
275        let X_train = X
276            .slice(scirs2_core::ndarray::s![0..split_point, ..])
277            .to_owned();
278        let y_train = y.slice(scirs2_core::ndarray::s![0..split_point]).to_owned();
279        let X_val = X
280            .slice(scirs2_core::ndarray::s![split_point.., ..])
281            .to_owned();
282        let y_val = y.slice(scirs2_core::ndarray::s![split_point..]).to_owned();
283
284        let mut pipeline_copy = pipeline.clone();
285        pipeline_copy.fit(&X_train, &y_train)?;
286        let predictions = pipeline_copy.predict(&X_val)?;
287
288        // Calculate accuracy or R2 score
289        let score = predictions
290            .iter()
291            .zip(y_val.iter())
292            .map(|(pred, true_val)| (pred - true_val).powi(2))
293            .sum::<f64>()
294            / predictions.len() as f64;
295
296        Ok(-score) // Negative MSE (higher is better)
297    }
298}
299
300impl OptimizationHistory {
301    fn new() -> Self {
302        Self {
303            trials: Vec::new(),
304            best_trial: None,
305            convergence_history: Vec::new(),
306        }
307    }
308}
309
310impl Default for ResourceUsage {
311    fn default() -> Self {
312        Self {
313            memory_mb: 0.0,
314            quantum_resources: QuantumResourceUsage::default(),
315            training_time: 0.0,
316        }
317    }
318}
319
320impl Default for QuantumResourceUsage {
321    fn default() -> Self {
322        Self {
323            qubits_used: 0,
324            circuit_depth: 0,
325            num_gates: 0,
326            coherence_time_used: 0.0,
327        }
328    }
329}