quantrs2_ml/automl/search/
hyperparameter_optimizer.rs

1//! Quantum Hyperparameter Optimizer
2//!
3//! This module provides hyperparameter optimization specifically for quantum ML models.
4
5use crate::automl::config::{HyperparameterSearchSpace, QuantumHyperparameterSpace};
6use crate::automl::pipeline::QuantumMLPipeline;
7use crate::error::Result;
8use scirs2_core::ndarray::{Array1, Array2};
9use std::collections::HashMap;
10
11/// Quantum hyperparameter optimizer
12#[derive(Debug, Clone)]
13pub struct QuantumHyperparameterOptimizer {
14    /// Optimization strategy
15    strategy: HyperparameterOptimizationStrategy,
16
17    /// Search space
18    search_space: HyperparameterSearchSpace,
19
20    /// Optimization history
21    optimization_history: OptimizationHistory,
22
23    /// Best configuration found
24    best_configuration: Option<HyperparameterConfiguration>,
25}
26
27/// Hyperparameter optimization strategies
28#[derive(Debug, Clone)]
29pub enum HyperparameterOptimizationStrategy {
30    RandomSearch,
31    GridSearch,
32    BayesianOptimization,
33    EvolutionarySearch,
34    QuantumAnnealing,
35    QuantumVariational,
36    HybridQuantumClassical,
37}
38
39/// Hyperparameter configuration
40#[derive(Debug, Clone)]
41pub struct HyperparameterConfiguration {
42    /// Classical hyperparameters
43    pub classical_params: HashMap<String, f64>,
44
45    /// Quantum hyperparameters
46    pub quantum_params: HashMap<String, f64>,
47
48    /// Architecture parameters
49    pub architecture_params: HashMap<String, usize>,
50
51    /// Performance score
52    pub performance_score: f64,
53}
54
55/// Optimization history
56#[derive(Debug, Clone)]
57pub struct OptimizationHistory {
58    /// Trial history
59    pub trials: Vec<OptimizationTrial>,
60
61    /// Best trial
62    pub best_trial: Option<OptimizationTrial>,
63
64    /// Convergence history
65    pub convergence_history: Vec<f64>,
66}
67
68/// Optimization trial
69#[derive(Debug, Clone)]
70pub struct OptimizationTrial {
71    /// Trial ID
72    pub trial_id: usize,
73
74    /// Configuration tested
75    pub configuration: HyperparameterConfiguration,
76
77    /// Performance achieved
78    pub performance: f64,
79
80    /// Resource usage
81    pub resource_usage: ResourceUsage,
82
83    /// Trial duration
84    pub duration: f64,
85}
86
87/// Resource usage tracking
88#[derive(Debug, Clone)]
89pub struct ResourceUsage {
90    /// Memory usage (MB)
91    pub memory_mb: f64,
92
93    /// Quantum resources used
94    pub quantum_resources: QuantumResourceUsage,
95
96    /// Training time
97    pub training_time: f64,
98}
99
100/// Quantum resource usage
101#[derive(Debug, Clone)]
102pub struct QuantumResourceUsage {
103    /// Qubits used
104    pub qubits_used: usize,
105
106    /// Circuit depth
107    pub circuit_depth: usize,
108
109    /// Number of gates
110    pub num_gates: usize,
111
112    /// Coherence time used
113    pub coherence_time_used: f64,
114}
115
116impl QuantumHyperparameterOptimizer {
117    /// Create a new hyperparameter optimizer
118    pub fn new(search_space: &HyperparameterSearchSpace) -> Self {
119        Self {
120            strategy: HyperparameterOptimizationStrategy::BayesianOptimization,
121            search_space: search_space.clone(),
122            optimization_history: OptimizationHistory::new(),
123            best_configuration: None,
124        }
125    }
126
127    /// Optimize hyperparameters for a given pipeline
128    pub fn optimize(
129        &mut self,
130        pipeline: QuantumMLPipeline,
131        X: &Array2<f64>,
132        y: &Array1<f64>,
133    ) -> Result<QuantumMLPipeline> {
134        match self.strategy {
135            HyperparameterOptimizationStrategy::RandomSearch => self.random_search(pipeline, X, y),
136            HyperparameterOptimizationStrategy::BayesianOptimization => {
137                self.bayesian_optimization(pipeline, X, y)
138            }
139            _ => {
140                // For now, default to random search
141                self.random_search(pipeline, X, y)
142            }
143        }
144    }
145
146    /// Get the best configuration found
147    pub fn best_configuration(&self) -> Option<&HyperparameterConfiguration> {
148        self.best_configuration.as_ref()
149    }
150
151    /// Get optimization history
152    pub fn history(&self) -> &OptimizationHistory {
153        &self.optimization_history
154    }
155
156    // Private methods
157
158    fn random_search(
159        &mut self,
160        mut pipeline: QuantumMLPipeline,
161        X: &Array2<f64>,
162        y: &Array1<f64>,
163    ) -> Result<QuantumMLPipeline> {
164        let num_trials = 20; // Configurable
165        let mut best_pipeline = pipeline.clone();
166        let mut best_score = f64::NEG_INFINITY;
167
168        for trial_id in 0..num_trials {
169            // Generate random configuration
170            let config = self.generate_random_configuration();
171
172            // Apply configuration to pipeline
173            pipeline.apply_hyperparameters(&config)?;
174
175            // Evaluate pipeline
176            let score = self.evaluate_configuration(&pipeline, X, y)?;
177
178            // Record trial
179            let trial = OptimizationTrial {
180                trial_id,
181                configuration: config.clone(),
182                performance: score,
183                resource_usage: ResourceUsage::default(),
184                duration: 0.0, // TODO: measure actual time
185            };
186            self.optimization_history.trials.push(trial);
187
188            // Update best if better
189            if score > best_score {
190                best_score = score;
191                best_pipeline = pipeline.clone();
192                self.best_configuration = Some(config);
193                self.optimization_history.best_trial =
194                    Some(self.optimization_history.trials.last().unwrap().clone());
195            }
196
197            self.optimization_history
198                .convergence_history
199                .push(best_score);
200        }
201
202        Ok(best_pipeline)
203    }
204
205    fn bayesian_optimization(
206        &mut self,
207        pipeline: QuantumMLPipeline,
208        X: &Array2<f64>,
209        y: &Array1<f64>,
210    ) -> Result<QuantumMLPipeline> {
211        // Simplified Bayesian optimization
212        // In practice, this would use a Gaussian Process
213        self.random_search(pipeline, X, y)
214    }
215
216    fn generate_random_configuration(&self) -> HyperparameterConfiguration {
217        use fastrand;
218
219        let mut classical_params = HashMap::new();
220        let mut quantum_params = HashMap::new();
221        let mut architecture_params = HashMap::new();
222
223        // Sample learning rate
224        let lr_min = self.search_space.learning_rates.0;
225        let lr_max = self.search_space.learning_rates.1;
226        let learning_rate = lr_min + fastrand::f64() * (lr_max - lr_min);
227        classical_params.insert("learning_rate".to_string(), learning_rate);
228
229        // Sample regularization
230        let reg_min = self.search_space.regularization.0;
231        let reg_max = self.search_space.regularization.1;
232        let regularization = reg_min + fastrand::f64() * (reg_max - reg_min);
233        classical_params.insert("regularization".to_string(), regularization);
234
235        // Sample batch size
236        if !self.search_space.batch_sizes.is_empty() {
237            let batch_size_idx = fastrand::usize(..self.search_space.batch_sizes.len());
238            let batch_size = self.search_space.batch_sizes[batch_size_idx] as f64;
239            classical_params.insert("batch_size".to_string(), batch_size);
240        }
241
242        // Sample quantum parameters
243        let qubit_min = self.search_space.quantum_params.num_qubits.0;
244        let qubit_max = self.search_space.quantum_params.num_qubits.1;
245        let num_qubits = qubit_min + fastrand::usize(..(qubit_max - qubit_min + 1));
246        quantum_params.insert("num_qubits".to_string(), num_qubits as f64);
247
248        let depth_min = self.search_space.quantum_params.circuit_depth.0;
249        let depth_max = self.search_space.quantum_params.circuit_depth.1;
250        let circuit_depth = depth_min + fastrand::usize(..(depth_max - depth_min + 1));
251        quantum_params.insert("circuit_depth".to_string(), circuit_depth as f64);
252
253        HyperparameterConfiguration {
254            classical_params,
255            quantum_params,
256            architecture_params,
257            performance_score: 0.0,
258        }
259    }
260
261    fn evaluate_configuration(
262        &self,
263        pipeline: &QuantumMLPipeline,
264        X: &Array2<f64>,
265        y: &Array1<f64>,
266    ) -> Result<f64> {
267        // Simple holdout evaluation
268        let split_point = (X.nrows() as f64 * 0.8) as usize;
269
270        let X_train = X.slice(scirs2_core::ndarray::s![0..split_point, ..]).to_owned();
271        let y_train = y.slice(scirs2_core::ndarray::s![0..split_point]).to_owned();
272        let X_val = X.slice(scirs2_core::ndarray::s![split_point.., ..]).to_owned();
273        let y_val = y.slice(scirs2_core::ndarray::s![split_point..]).to_owned();
274
275        let mut pipeline_copy = pipeline.clone();
276        pipeline_copy.fit(&X_train, &y_train)?;
277        let predictions = pipeline_copy.predict(&X_val)?;
278
279        // Calculate accuracy or R2 score
280        let score = predictions
281            .iter()
282            .zip(y_val.iter())
283            .map(|(pred, true_val)| (pred - true_val).powi(2))
284            .sum::<f64>()
285            / predictions.len() as f64;
286
287        Ok(-score) // Negative MSE (higher is better)
288    }
289}
290
291impl OptimizationHistory {
292    fn new() -> Self {
293        Self {
294            trials: Vec::new(),
295            best_trial: None,
296            convergence_history: Vec::new(),
297        }
298    }
299}
300
301impl Default for ResourceUsage {
302    fn default() -> Self {
303        Self {
304            memory_mb: 0.0,
305            quantum_resources: QuantumResourceUsage::default(),
306            training_time: 0.0,
307        }
308    }
309}
310
311impl Default for QuantumResourceUsage {
312    fn default() -> Self {
313        Self {
314            qubits_used: 0,
315            circuit_depth: 0,
316            num_gates: 0,
317            coherence_time_used: 0.0,
318        }
319    }
320}