quantrs2_ml/automl/search/
hyperparameter_optimizer.rs1use crate::automl::config::{HyperparameterSearchSpace, QuantumHyperparameterSpace};
6use crate::automl::pipeline::QuantumMLPipeline;
7use crate::error::Result;
8use scirs2_core::ndarray::{Array1, Array2};
9use std::collections::HashMap;
10
11#[derive(Debug, Clone)]
13pub struct QuantumHyperparameterOptimizer {
14 strategy: HyperparameterOptimizationStrategy,
16
17 search_space: HyperparameterSearchSpace,
19
20 optimization_history: OptimizationHistory,
22
23 best_configuration: Option<HyperparameterConfiguration>,
25}
26
27#[derive(Debug, Clone)]
29pub enum HyperparameterOptimizationStrategy {
30 RandomSearch,
31 GridSearch,
32 BayesianOptimization,
33 EvolutionarySearch,
34 QuantumAnnealing,
35 QuantumVariational,
36 HybridQuantumClassical,
37}
38
39#[derive(Debug, Clone)]
41pub struct HyperparameterConfiguration {
42 pub classical_params: HashMap<String, f64>,
44
45 pub quantum_params: HashMap<String, f64>,
47
48 pub architecture_params: HashMap<String, usize>,
50
51 pub performance_score: f64,
53}
54
55#[derive(Debug, Clone)]
57pub struct OptimizationHistory {
58 pub trials: Vec<OptimizationTrial>,
60
61 pub best_trial: Option<OptimizationTrial>,
63
64 pub convergence_history: Vec<f64>,
66}
67
68#[derive(Debug, Clone)]
70pub struct OptimizationTrial {
71 pub trial_id: usize,
73
74 pub configuration: HyperparameterConfiguration,
76
77 pub performance: f64,
79
80 pub resource_usage: ResourceUsage,
82
83 pub duration: f64,
85}
86
87#[derive(Debug, Clone)]
89pub struct ResourceUsage {
90 pub memory_mb: f64,
92
93 pub quantum_resources: QuantumResourceUsage,
95
96 pub training_time: f64,
98}
99
100#[derive(Debug, Clone)]
102pub struct QuantumResourceUsage {
103 pub qubits_used: usize,
105
106 pub circuit_depth: usize,
108
109 pub num_gates: usize,
111
112 pub coherence_time_used: f64,
114}
115
116impl QuantumHyperparameterOptimizer {
117 pub fn new(search_space: &HyperparameterSearchSpace) -> Self {
119 Self {
120 strategy: HyperparameterOptimizationStrategy::BayesianOptimization,
121 search_space: search_space.clone(),
122 optimization_history: OptimizationHistory::new(),
123 best_configuration: None,
124 }
125 }
126
127 pub fn optimize(
129 &mut self,
130 pipeline: QuantumMLPipeline,
131 X: &Array2<f64>,
132 y: &Array1<f64>,
133 ) -> Result<QuantumMLPipeline> {
134 match self.strategy {
135 HyperparameterOptimizationStrategy::RandomSearch => self.random_search(pipeline, X, y),
136 HyperparameterOptimizationStrategy::BayesianOptimization => {
137 self.bayesian_optimization(pipeline, X, y)
138 }
139 _ => {
140 self.random_search(pipeline, X, y)
142 }
143 }
144 }
145
146 pub fn best_configuration(&self) -> Option<&HyperparameterConfiguration> {
148 self.best_configuration.as_ref()
149 }
150
151 pub fn history(&self) -> &OptimizationHistory {
153 &self.optimization_history
154 }
155
156 fn random_search(
159 &mut self,
160 mut pipeline: QuantumMLPipeline,
161 X: &Array2<f64>,
162 y: &Array1<f64>,
163 ) -> Result<QuantumMLPipeline> {
164 let num_trials = 20; let mut best_pipeline = pipeline.clone();
166 let mut best_score = f64::NEG_INFINITY;
167
168 for trial_id in 0..num_trials {
169 let config = self.generate_random_configuration();
171
172 pipeline.apply_hyperparameters(&config)?;
174
175 let score = self.evaluate_configuration(&pipeline, X, y)?;
177
178 let trial = OptimizationTrial {
180 trial_id,
181 configuration: config.clone(),
182 performance: score,
183 resource_usage: ResourceUsage::default(),
184 duration: 0.0, };
186 self.optimization_history.trials.push(trial);
187
188 if score > best_score {
190 best_score = score;
191 best_pipeline = pipeline.clone();
192 self.best_configuration = Some(config);
193 self.optimization_history.best_trial =
194 Some(self.optimization_history.trials.last().unwrap().clone());
195 }
196
197 self.optimization_history
198 .convergence_history
199 .push(best_score);
200 }
201
202 Ok(best_pipeline)
203 }
204
205 fn bayesian_optimization(
206 &mut self,
207 pipeline: QuantumMLPipeline,
208 X: &Array2<f64>,
209 y: &Array1<f64>,
210 ) -> Result<QuantumMLPipeline> {
211 self.random_search(pipeline, X, y)
214 }
215
216 fn generate_random_configuration(&self) -> HyperparameterConfiguration {
217 use fastrand;
218
219 let mut classical_params = HashMap::new();
220 let mut quantum_params = HashMap::new();
221 let mut architecture_params = HashMap::new();
222
223 let lr_min = self.search_space.learning_rates.0;
225 let lr_max = self.search_space.learning_rates.1;
226 let learning_rate = lr_min + fastrand::f64() * (lr_max - lr_min);
227 classical_params.insert("learning_rate".to_string(), learning_rate);
228
229 let reg_min = self.search_space.regularization.0;
231 let reg_max = self.search_space.regularization.1;
232 let regularization = reg_min + fastrand::f64() * (reg_max - reg_min);
233 classical_params.insert("regularization".to_string(), regularization);
234
235 if !self.search_space.batch_sizes.is_empty() {
237 let batch_size_idx = fastrand::usize(..self.search_space.batch_sizes.len());
238 let batch_size = self.search_space.batch_sizes[batch_size_idx] as f64;
239 classical_params.insert("batch_size".to_string(), batch_size);
240 }
241
242 let qubit_min = self.search_space.quantum_params.num_qubits.0;
244 let qubit_max = self.search_space.quantum_params.num_qubits.1;
245 let num_qubits = qubit_min + fastrand::usize(..(qubit_max - qubit_min + 1));
246 quantum_params.insert("num_qubits".to_string(), num_qubits as f64);
247
248 let depth_min = self.search_space.quantum_params.circuit_depth.0;
249 let depth_max = self.search_space.quantum_params.circuit_depth.1;
250 let circuit_depth = depth_min + fastrand::usize(..(depth_max - depth_min + 1));
251 quantum_params.insert("circuit_depth".to_string(), circuit_depth as f64);
252
253 HyperparameterConfiguration {
254 classical_params,
255 quantum_params,
256 architecture_params,
257 performance_score: 0.0,
258 }
259 }
260
261 fn evaluate_configuration(
262 &self,
263 pipeline: &QuantumMLPipeline,
264 X: &Array2<f64>,
265 y: &Array1<f64>,
266 ) -> Result<f64> {
267 let split_point = (X.nrows() as f64 * 0.8) as usize;
269
270 let X_train = X.slice(scirs2_core::ndarray::s![0..split_point, ..]).to_owned();
271 let y_train = y.slice(scirs2_core::ndarray::s![0..split_point]).to_owned();
272 let X_val = X.slice(scirs2_core::ndarray::s![split_point.., ..]).to_owned();
273 let y_val = y.slice(scirs2_core::ndarray::s![split_point..]).to_owned();
274
275 let mut pipeline_copy = pipeline.clone();
276 pipeline_copy.fit(&X_train, &y_train)?;
277 let predictions = pipeline_copy.predict(&X_val)?;
278
279 let score = predictions
281 .iter()
282 .zip(y_val.iter())
283 .map(|(pred, true_val)| (pred - true_val).powi(2))
284 .sum::<f64>()
285 / predictions.len() as f64;
286
287 Ok(-score) }
289}
290
291impl OptimizationHistory {
292 fn new() -> Self {
293 Self {
294 trials: Vec::new(),
295 best_trial: None,
296 convergence_history: Vec::new(),
297 }
298 }
299}
300
301impl Default for ResourceUsage {
302 fn default() -> Self {
303 Self {
304 memory_mb: 0.0,
305 quantum_resources: QuantumResourceUsage::default(),
306 training_time: 0.0,
307 }
308 }
309}
310
311impl Default for QuantumResourceUsage {
312 fn default() -> Self {
313 Self {
314 qubits_used: 0,
315 circuit_depth: 0,
316 num_gates: 0,
317 coherence_time_used: 0.0,
318 }
319 }
320}