quantrs2_ml/automl/search/
hyperparameter_optimizer.rs1use crate::automl::config::{HyperparameterSearchSpace, QuantumHyperparameterSpace};
6use crate::automl::pipeline::QuantumMLPipeline;
7use crate::error::Result;
8use scirs2_core::ndarray::{Array1, Array2};
9use std::collections::HashMap;
10
11#[derive(Debug, Clone)]
13pub struct QuantumHyperparameterOptimizer {
14 strategy: HyperparameterOptimizationStrategy,
16
17 search_space: HyperparameterSearchSpace,
19
20 optimization_history: OptimizationHistory,
22
23 best_configuration: Option<HyperparameterConfiguration>,
25}
26
27#[derive(Debug, Clone)]
29pub enum HyperparameterOptimizationStrategy {
30 RandomSearch,
31 GridSearch,
32 BayesianOptimization,
33 EvolutionarySearch,
34 QuantumAnnealing,
35 QuantumVariational,
36 HybridQuantumClassical,
37}
38
39#[derive(Debug, Clone)]
41pub struct HyperparameterConfiguration {
42 pub classical_params: HashMap<String, f64>,
44
45 pub quantum_params: HashMap<String, f64>,
47
48 pub architecture_params: HashMap<String, usize>,
50
51 pub performance_score: f64,
53}
54
55#[derive(Debug, Clone)]
57pub struct OptimizationHistory {
58 pub trials: Vec<OptimizationTrial>,
60
61 pub best_trial: Option<OptimizationTrial>,
63
64 pub convergence_history: Vec<f64>,
66}
67
68#[derive(Debug, Clone)]
70pub struct OptimizationTrial {
71 pub trial_id: usize,
73
74 pub configuration: HyperparameterConfiguration,
76
77 pub performance: f64,
79
80 pub resource_usage: ResourceUsage,
82
83 pub duration: f64,
85}
86
87#[derive(Debug, Clone)]
89pub struct ResourceUsage {
90 pub memory_mb: f64,
92
93 pub quantum_resources: QuantumResourceUsage,
95
96 pub training_time: f64,
98}
99
100#[derive(Debug, Clone)]
102pub struct QuantumResourceUsage {
103 pub qubits_used: usize,
105
106 pub circuit_depth: usize,
108
109 pub num_gates: usize,
111
112 pub coherence_time_used: f64,
114}
115
116impl QuantumHyperparameterOptimizer {
117 pub fn new(search_space: &HyperparameterSearchSpace) -> Self {
119 Self {
120 strategy: HyperparameterOptimizationStrategy::BayesianOptimization,
121 search_space: search_space.clone(),
122 optimization_history: OptimizationHistory::new(),
123 best_configuration: None,
124 }
125 }
126
127 pub fn optimize(
129 &mut self,
130 pipeline: QuantumMLPipeline,
131 X: &Array2<f64>,
132 y: &Array1<f64>,
133 ) -> Result<QuantumMLPipeline> {
134 match self.strategy {
135 HyperparameterOptimizationStrategy::RandomSearch => self.random_search(pipeline, X, y),
136 HyperparameterOptimizationStrategy::BayesianOptimization => {
137 self.bayesian_optimization(pipeline, X, y)
138 }
139 _ => {
140 self.random_search(pipeline, X, y)
142 }
143 }
144 }
145
146 pub fn best_configuration(&self) -> Option<&HyperparameterConfiguration> {
148 self.best_configuration.as_ref()
149 }
150
151 pub fn history(&self) -> &OptimizationHistory {
153 &self.optimization_history
154 }
155
156 fn random_search(
159 &mut self,
160 mut pipeline: QuantumMLPipeline,
161 X: &Array2<f64>,
162 y: &Array1<f64>,
163 ) -> Result<QuantumMLPipeline> {
164 let num_trials = 20; let mut best_pipeline = pipeline.clone();
166 let mut best_score = f64::NEG_INFINITY;
167
168 for trial_id in 0..num_trials {
169 let config = self.generate_random_configuration();
171
172 pipeline.apply_hyperparameters(&config)?;
174
175 let score = self.evaluate_configuration(&pipeline, X, y)?;
177
178 let trial = OptimizationTrial {
180 trial_id,
181 configuration: config.clone(),
182 performance: score,
183 resource_usage: ResourceUsage::default(),
184 duration: 0.0, };
186 self.optimization_history.trials.push(trial);
187
188 if score > best_score {
190 best_score = score;
191 best_pipeline = pipeline.clone();
192 self.best_configuration = Some(config);
193 self.optimization_history.best_trial = Some(
194 self.optimization_history
195 .trials
196 .last()
197 .expect("trials should not be empty")
198 .clone(),
199 );
200 }
201
202 self.optimization_history
203 .convergence_history
204 .push(best_score);
205 }
206
207 Ok(best_pipeline)
208 }
209
210 fn bayesian_optimization(
211 &mut self,
212 pipeline: QuantumMLPipeline,
213 X: &Array2<f64>,
214 y: &Array1<f64>,
215 ) -> Result<QuantumMLPipeline> {
216 self.random_search(pipeline, X, y)
219 }
220
221 fn generate_random_configuration(&self) -> HyperparameterConfiguration {
222 use fastrand;
223
224 let mut classical_params = HashMap::new();
225 let mut quantum_params = HashMap::new();
226 let mut architecture_params = HashMap::new();
227
228 let lr_min = self.search_space.learning_rates.0;
230 let lr_max = self.search_space.learning_rates.1;
231 let learning_rate = lr_min + fastrand::f64() * (lr_max - lr_min);
232 classical_params.insert("learning_rate".to_string(), learning_rate);
233
234 let reg_min = self.search_space.regularization.0;
236 let reg_max = self.search_space.regularization.1;
237 let regularization = reg_min + fastrand::f64() * (reg_max - reg_min);
238 classical_params.insert("regularization".to_string(), regularization);
239
240 if !self.search_space.batch_sizes.is_empty() {
242 let batch_size_idx = fastrand::usize(..self.search_space.batch_sizes.len());
243 let batch_size = self.search_space.batch_sizes[batch_size_idx] as f64;
244 classical_params.insert("batch_size".to_string(), batch_size);
245 }
246
247 let qubit_min = self.search_space.quantum_params.num_qubits.0;
249 let qubit_max = self.search_space.quantum_params.num_qubits.1;
250 let num_qubits = qubit_min + fastrand::usize(..(qubit_max - qubit_min + 1));
251 quantum_params.insert("num_qubits".to_string(), num_qubits as f64);
252
253 let depth_min = self.search_space.quantum_params.circuit_depth.0;
254 let depth_max = self.search_space.quantum_params.circuit_depth.1;
255 let circuit_depth = depth_min + fastrand::usize(..(depth_max - depth_min + 1));
256 quantum_params.insert("circuit_depth".to_string(), circuit_depth as f64);
257
258 HyperparameterConfiguration {
259 classical_params,
260 quantum_params,
261 architecture_params,
262 performance_score: 0.0,
263 }
264 }
265
266 fn evaluate_configuration(
267 &self,
268 pipeline: &QuantumMLPipeline,
269 X: &Array2<f64>,
270 y: &Array1<f64>,
271 ) -> Result<f64> {
272 let split_point = (X.nrows() as f64 * 0.8) as usize;
274
275 let X_train = X
276 .slice(scirs2_core::ndarray::s![0..split_point, ..])
277 .to_owned();
278 let y_train = y.slice(scirs2_core::ndarray::s![0..split_point]).to_owned();
279 let X_val = X
280 .slice(scirs2_core::ndarray::s![split_point.., ..])
281 .to_owned();
282 let y_val = y.slice(scirs2_core::ndarray::s![split_point..]).to_owned();
283
284 let mut pipeline_copy = pipeline.clone();
285 pipeline_copy.fit(&X_train, &y_train)?;
286 let predictions = pipeline_copy.predict(&X_val)?;
287
288 let score = predictions
290 .iter()
291 .zip(y_val.iter())
292 .map(|(pred, true_val)| (pred - true_val).powi(2))
293 .sum::<f64>()
294 / predictions.len() as f64;
295
296 Ok(-score) }
298}
299
300impl OptimizationHistory {
301 fn new() -> Self {
302 Self {
303 trials: Vec::new(),
304 best_trial: None,
305 convergence_history: Vec::new(),
306 }
307 }
308}
309
310impl Default for ResourceUsage {
311 fn default() -> Self {
312 Self {
313 memory_mb: 0.0,
314 quantum_resources: QuantumResourceUsage::default(),
315 training_time: 0.0,
316 }
317 }
318}
319
320impl Default for QuantumResourceUsage {
321 fn default() -> Self {
322 Self {
323 qubits_used: 0,
324 circuit_depth: 0,
325 num_gates: 0,
326 coherence_time_used: 0.0,
327 }
328 }
329}