1use crate::{
11 error::{QuantRS2Error, QuantRS2Result},
12 variational::VariationalCircuit,
13};
14use scirs2_core::ndarray::{Array1, Array2};
15use crate::optimization_stubs::{minimize, Method, OptimizeResult, Options};
17use crate::parallel_ops_stubs::*;
18use rustc_hash::FxHashMap;
20use std::sync::{Arc, Mutex};
21
22pub struct VariationalQuantumOptimizer {
31 method: OptimizationMethod,
33 config: OptimizationConfig,
35 history: OptimizationHistory,
37 fisher_cache: Option<FisherCache>,
39}
40
41#[derive(Debug, Clone)]
43pub enum OptimizationMethod {
44 GradientDescent { learning_rate: f64 },
46 Momentum { learning_rate: f64, momentum: f64 },
48 Adam {
50 learning_rate: f64,
51 beta1: f64,
52 beta2: f64,
53 epsilon: f64,
54 },
55 RMSprop {
57 learning_rate: f64,
58 decay_rate: f64,
59 epsilon: f64,
60 },
61 NaturalGradient {
63 learning_rate: f64,
64 regularization: f64,
65 },
66 BFGS,
68 LBFGS { memory_size: usize },
70 ConjugateGradient,
72 NelderMead,
74 Powell,
76 SPSA {
78 a: f64,
79 c: f64,
80 alpha: f64,
81 gamma: f64,
82 },
83 QNSPSA {
85 learning_rate: f64,
86 regularization: f64,
87 spsa_epsilon: f64,
88 },
89}
90
91#[derive(Clone)]
93pub struct OptimizationConfig {
94 pub max_iterations: usize,
96 pub f_tol: f64,
98 pub g_tol: f64,
100 pub x_tol: f64,
102 pub parallel_gradients: bool,
104 pub batch_size: Option<usize>,
106 pub seed: Option<u64>,
108 pub callback: Option<Arc<dyn Fn(&[f64], f64) + Send + Sync>>,
110 pub patience: Option<usize>,
112 pub grad_clip: Option<f64>,
114}
115
116impl Default for OptimizationConfig {
117 fn default() -> Self {
118 Self {
119 max_iterations: 100,
120 f_tol: 1e-8,
121 g_tol: 1e-8,
122 x_tol: 1e-8,
123 parallel_gradients: true,
124 batch_size: None,
125 seed: None,
126 callback: None,
127 patience: None,
128 grad_clip: None,
129 }
130 }
131}
132
133#[derive(Debug, Clone)]
135pub struct OptimizationHistory {
136 pub parameters: Vec<Vec<f64>>,
138 pub loss_values: Vec<f64>,
140 pub gradient_norms: Vec<f64>,
142 pub iteration_times: Vec<f64>,
144 pub total_iterations: usize,
146 pub converged: bool,
148}
149
150impl OptimizationHistory {
151 const fn new() -> Self {
152 Self {
153 parameters: Vec::new(),
154 loss_values: Vec::new(),
155 gradient_norms: Vec::new(),
156 iteration_times: Vec::new(),
157 total_iterations: 0,
158 converged: false,
159 }
160 }
161}
162
163struct FisherCache {
165 matrix: Arc<Mutex<Option<Array2<f64>>>>,
167 params: Arc<Mutex<Option<Vec<f64>>>>,
169 threshold: f64,
171}
172
173struct OptimizerState {
175 momentum: FxHashMap<String, f64>,
177 adam_m: FxHashMap<String, f64>,
179 adam_v: FxHashMap<String, f64>,
181 rms_avg: FxHashMap<String, f64>,
183 iteration: usize,
185}
186
187impl VariationalQuantumOptimizer {
188 pub fn new(method: OptimizationMethod, config: OptimizationConfig) -> Self {
190 let fisher_cache = match &method {
191 OptimizationMethod::NaturalGradient { .. } | OptimizationMethod::QNSPSA { .. } => {
192 Some(FisherCache {
193 matrix: Arc::new(Mutex::new(None)),
194 params: Arc::new(Mutex::new(None)),
195 threshold: 1e-3,
196 })
197 }
198 _ => None,
199 };
200
201 Self {
202 method,
203 config,
204 history: OptimizationHistory::new(),
205 fisher_cache,
206 }
207 }
208
209 pub fn optimize(
211 &mut self,
212 circuit: &mut VariationalCircuit,
213 cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + 'static,
214 ) -> QuantRS2Result<OptimizationResult> {
215 let cost_fn = Arc::new(cost_fn);
216
217 match &self.method {
218 OptimizationMethod::BFGS
219 | OptimizationMethod::LBFGS { .. }
220 | OptimizationMethod::ConjugateGradient
221 | OptimizationMethod::NelderMead
222 | OptimizationMethod::Powell => self.optimize_with_scirs2(circuit, cost_fn),
223 _ => self.optimize_custom(circuit, cost_fn),
224 }
225 }
226
227 fn optimize_with_scirs2(
229 &mut self,
230 circuit: &mut VariationalCircuit,
231 cost_fn: Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
232 ) -> QuantRS2Result<OptimizationResult> {
233 let param_names = circuit.parameter_names();
234 let initial_params: Vec<f64> = param_names
235 .iter()
236 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
237 .collect();
238
239 let circuit_clone = Arc::new(Mutex::new(circuit.clone()));
240 let param_names_clone = param_names.clone();
241
242 let objective = move |params: &scirs2_core::ndarray::ArrayView1<f64>| -> f64 {
244 let params_slice = match params.as_slice() {
245 Some(slice) => slice,
246 None => return f64::INFINITY, };
248 let mut param_map = FxHashMap::default();
249 for (name, &value) in param_names_clone.iter().zip(params_slice) {
250 param_map.insert(name.clone(), value);
251 }
252
253 let mut circuit = circuit_clone.lock().unwrap_or_else(|e| e.into_inner());
254 if circuit.set_parameters(¶m_map).is_err() {
255 return f64::INFINITY;
256 }
257
258 cost_fn(&*circuit).unwrap_or(f64::INFINITY)
259 };
260
261 let method = match &self.method {
263 OptimizationMethod::BFGS | OptimizationMethod::ConjugateGradient => Method::BFGS, OptimizationMethod::LBFGS { memory_size: _ } => Method::LBFGS,
265 OptimizationMethod::NelderMead => Method::NelderMead,
266 OptimizationMethod::Powell => Method::Powell,
267 _ => unreachable!(),
268 };
269
270 let options = Options {
272 max_iter: self.config.max_iterations,
273 ftol: self.config.f_tol,
274 gtol: self.config.g_tol,
275 xtol: self.config.x_tol,
276 ..Default::default()
277 };
278
279 let start_time = std::time::Instant::now();
281 let initial_array = scirs2_core::ndarray::Array1::from_vec(initial_params);
282 let result = minimize(objective, &initial_array, method, Some(options))
283 .map_err(|e| QuantRS2Error::InvalidInput(format!("Optimization failed: {e:?}")))?;
284
285 let mut final_params = FxHashMap::default();
287 let result_slice = result.x.as_slice().ok_or_else(|| {
288 QuantRS2Error::RuntimeError("Optimization result array is non-contiguous".to_string())
289 })?;
290 for (name, &value) in param_names.iter().zip(result_slice) {
291 final_params.insert(name.clone(), value);
292 }
293 circuit.set_parameters(&final_params)?;
294
295 self.history.parameters.push(result.x.to_vec());
297 self.history.loss_values.push(result.fun);
298 self.history.total_iterations = result.iterations;
299 self.history.converged = result.success;
300
301 Ok(OptimizationResult {
302 optimal_parameters: final_params,
303 final_loss: result.fun,
304 iterations: result.iterations,
305 converged: result.success,
306 optimization_time: start_time.elapsed().as_secs_f64(),
307 history: self.history.clone(),
308 })
309 }
310
311 fn optimize_custom(
313 &mut self,
314 circuit: &mut VariationalCircuit,
315 cost_fn: Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
316 ) -> QuantRS2Result<OptimizationResult> {
317 let mut state = OptimizerState {
318 momentum: FxHashMap::default(),
319 adam_m: FxHashMap::default(),
320 adam_v: FxHashMap::default(),
321 rms_avg: FxHashMap::default(),
322 iteration: 0,
323 };
324
325 let param_names = circuit.parameter_names();
326 let start_time = std::time::Instant::now();
327 let mut best_loss = f64::INFINITY;
328 let mut patience_counter = 0;
329
330 for iter in 0..self.config.max_iterations {
331 let iter_start = std::time::Instant::now();
332
333 let loss = cost_fn(circuit)?;
335
336 if loss < best_loss - self.config.f_tol {
338 best_loss = loss;
339 patience_counter = 0;
340 } else if let Some(patience) = self.config.patience {
341 patience_counter += 1;
342 if patience_counter >= patience {
343 self.history.converged = true;
344 break;
345 }
346 }
347
348 let gradients = self.compute_gradients(circuit, &cost_fn)?;
350
351 let gradients = if let Some(max_norm) = self.config.grad_clip {
353 self.clip_gradients(gradients, max_norm)
354 } else {
355 gradients
356 };
357
358 self.update_parameters(circuit, &gradients, &mut state)?;
360
361 let current_params: Vec<f64> = param_names
363 .iter()
364 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
365 .collect();
366
367 let grad_norm = gradients.values().map(|g| g * g).sum::<f64>().sqrt();
368
369 self.history.parameters.push(current_params);
370 self.history.loss_values.push(loss);
371 self.history.gradient_norms.push(grad_norm);
372 self.history
373 .iteration_times
374 .push(iter_start.elapsed().as_secs_f64() * 1000.0);
375 self.history.total_iterations = iter + 1;
376
377 if let Some(callback) = &self.config.callback {
379 let params: Vec<f64> = param_names
380 .iter()
381 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
382 .collect();
383 callback(¶ms, loss);
384 }
385
386 if grad_norm < self.config.g_tol {
388 self.history.converged = true;
389 break;
390 }
391
392 state.iteration += 1;
393 }
394
395 let final_params = circuit.get_parameters();
396 let final_loss = cost_fn(circuit)?;
397
398 Ok(OptimizationResult {
399 optimal_parameters: final_params,
400 final_loss,
401 iterations: self.history.total_iterations,
402 converged: self.history.converged,
403 optimization_time: start_time.elapsed().as_secs_f64(),
404 history: self.history.clone(),
405 })
406 }
407
408 fn compute_gradients(
410 &self,
411 circuit: &VariationalCircuit,
412 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
413 ) -> QuantRS2Result<FxHashMap<String, f64>> {
414 let param_names = circuit.parameter_names();
415
416 if self.config.parallel_gradients {
417 let gradients: Vec<(String, f64)> = param_names
419 .par_iter()
420 .map(|param_name| {
421 let grad = self
422 .compute_single_gradient(circuit, param_name, cost_fn)
423 .unwrap_or(0.0);
424 (param_name.clone(), grad)
425 })
426 .collect();
427
428 Ok(gradients.into_iter().collect())
429 } else {
430 let mut gradients = FxHashMap::default();
432 for param_name in ¶m_names {
433 let grad = self.compute_single_gradient(circuit, param_name, cost_fn)?;
434 gradients.insert(param_name.clone(), grad);
435 }
436 Ok(gradients)
437 }
438 }
439
440 fn compute_single_gradient(
442 &self,
443 circuit: &VariationalCircuit,
444 param_name: &str,
445 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
446 ) -> QuantRS2Result<f64> {
447 match &self.method {
448 OptimizationMethod::SPSA { c, .. } => {
449 self.spsa_gradient(circuit, param_name, cost_fn, *c)
451 }
452 _ => {
453 self.parameter_shift_gradient(circuit, param_name, cost_fn)
455 }
456 }
457 }
458
459 fn parameter_shift_gradient(
461 &self,
462 circuit: &VariationalCircuit,
463 param_name: &str,
464 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
465 ) -> QuantRS2Result<f64> {
466 let current_params = circuit.get_parameters();
467 let current_value = *current_params.get(param_name).ok_or_else(|| {
468 QuantRS2Error::InvalidInput(format!("Parameter {param_name} not found"))
469 })?;
470
471 let mut circuit_plus = circuit.clone();
473 let mut params_plus = current_params.clone();
474 params_plus.insert(
475 param_name.to_string(),
476 current_value + std::f64::consts::PI / 2.0,
477 );
478 circuit_plus.set_parameters(¶ms_plus)?;
479 let loss_plus = cost_fn(&circuit_plus)?;
480
481 let mut circuit_minus = circuit.clone();
483 let mut params_minus = current_params;
484 params_minus.insert(
485 param_name.to_string(),
486 current_value - std::f64::consts::PI / 2.0,
487 );
488 circuit_minus.set_parameters(¶ms_minus)?;
489 let loss_minus = cost_fn(&circuit_minus)?;
490
491 Ok((loss_plus - loss_minus) / 2.0)
492 }
493
494 fn spsa_gradient(
496 &self,
497 circuit: &VariationalCircuit,
498 param_name: &str,
499 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
500 epsilon: f64,
501 ) -> QuantRS2Result<f64> {
502 use scirs2_core::random::prelude::*;
503
504 let mut rng = if let Some(seed) = self.config.seed {
505 StdRng::seed_from_u64(seed)
506 } else {
507 StdRng::from_seed(thread_rng().gen())
508 };
509
510 let current_params = circuit.get_parameters();
511 let perturbation = if rng.gen::<bool>() { epsilon } else { -epsilon };
512
513 let mut circuit_plus = circuit.clone();
515 let mut params_plus = current_params.clone();
516 for (name, value) in &mut params_plus {
517 if name == param_name {
518 *value += perturbation;
519 }
520 }
521 circuit_plus.set_parameters(¶ms_plus)?;
522 let loss_plus = cost_fn(&circuit_plus)?;
523
524 let mut circuit_minus = circuit.clone();
526 let mut params_minus = current_params;
527 for (name, value) in &mut params_minus {
528 if name == param_name {
529 *value -= perturbation;
530 }
531 }
532 circuit_minus.set_parameters(¶ms_minus)?;
533 let loss_minus = cost_fn(&circuit_minus)?;
534
535 Ok((loss_plus - loss_minus) / (2.0 * perturbation))
536 }
537
538 fn clip_gradients(
540 &self,
541 mut gradients: FxHashMap<String, f64>,
542 max_norm: f64,
543 ) -> FxHashMap<String, f64> {
544 let norm = gradients.values().map(|g| g * g).sum::<f64>().sqrt();
545
546 if norm > max_norm {
547 let scale = max_norm / norm;
548 for grad in gradients.values_mut() {
549 *grad *= scale;
550 }
551 }
552
553 gradients
554 }
555
556 fn update_parameters(
558 &self,
559 circuit: &mut VariationalCircuit,
560 gradients: &FxHashMap<String, f64>,
561 state: &mut OptimizerState,
562 ) -> QuantRS2Result<()> {
563 let mut new_params = circuit.get_parameters();
564
565 match &self.method {
566 OptimizationMethod::GradientDescent { learning_rate } => {
567 for (param_name, &grad) in gradients {
569 if let Some(value) = new_params.get_mut(param_name) {
570 *value -= learning_rate * grad;
571 }
572 }
573 }
574 OptimizationMethod::Momentum {
575 learning_rate,
576 momentum,
577 } => {
578 for (param_name, &grad) in gradients {
580 let velocity = state.momentum.entry(param_name.clone()).or_insert(0.0);
581 *velocity = momentum * *velocity - learning_rate * grad;
582
583 if let Some(value) = new_params.get_mut(param_name) {
584 *value += *velocity;
585 }
586 }
587 }
588 OptimizationMethod::Adam {
589 learning_rate,
590 beta1,
591 beta2,
592 epsilon,
593 } => {
594 let t = state.iteration as f64 + 1.0;
596 let lr_t = learning_rate * (1.0 - beta2.powf(t)).sqrt() / (1.0 - beta1.powf(t));
597
598 for (param_name, &grad) in gradients {
599 let m = state.adam_m.entry(param_name.clone()).or_insert(0.0);
600 let v = state.adam_v.entry(param_name.clone()).or_insert(0.0);
601
602 *m = (1.0 - beta1).mul_add(grad, beta1 * *m);
603 *v = ((1.0 - beta2) * grad).mul_add(grad, beta2 * *v);
604
605 if let Some(value) = new_params.get_mut(param_name) {
606 *value -= lr_t * *m / (v.sqrt() + epsilon);
607 }
608 }
609 }
610 OptimizationMethod::RMSprop {
611 learning_rate,
612 decay_rate,
613 epsilon,
614 } => {
615 for (param_name, &grad) in gradients {
617 let avg = state.rms_avg.entry(param_name.clone()).or_insert(0.0);
618 *avg = ((1.0 - decay_rate) * grad).mul_add(grad, decay_rate * *avg);
619
620 if let Some(value) = new_params.get_mut(param_name) {
621 *value -= learning_rate * grad / (avg.sqrt() + epsilon);
622 }
623 }
624 }
625 OptimizationMethod::NaturalGradient {
626 learning_rate,
627 regularization,
628 } => {
629 let fisher_inv =
631 self.compute_fisher_inverse(circuit, gradients, *regularization)?;
632 let natural_grad = self.apply_fisher_inverse(&fisher_inv, gradients);
633
634 for (param_name, &nat_grad) in &natural_grad {
635 if let Some(value) = new_params.get_mut(param_name) {
636 *value -= learning_rate * nat_grad;
637 }
638 }
639 }
640 OptimizationMethod::SPSA { a, alpha, .. } => {
641 let ak = a / (state.iteration as f64 + 1.0).powf(*alpha);
643
644 for (param_name, &grad) in gradients {
645 if let Some(value) = new_params.get_mut(param_name) {
646 *value -= ak * grad;
647 }
648 }
649 }
650 OptimizationMethod::QNSPSA {
651 learning_rate,
652 regularization,
653 ..
654 } => {
655 let fisher_inv =
657 self.compute_fisher_inverse(circuit, gradients, *regularization)?;
658 let natural_grad = self.apply_fisher_inverse(&fisher_inv, gradients);
659
660 for (param_name, &nat_grad) in &natural_grad {
661 if let Some(value) = new_params.get_mut(param_name) {
662 *value -= learning_rate * nat_grad;
663 }
664 }
665 }
666 _ => {
667 return Err(QuantRS2Error::InvalidInput(
669 "Invalid optimization method".to_string(),
670 ));
671 }
672 }
673
674 circuit.set_parameters(&new_params)
675 }
676
677 fn compute_fisher_inverse(
679 &self,
680 circuit: &VariationalCircuit,
681 gradients: &FxHashMap<String, f64>,
682 regularization: f64,
683 ) -> QuantRS2Result<Array2<f64>> {
684 let param_names: Vec<_> = gradients.keys().cloned().collect();
685 let n_params = param_names.len();
686
687 if let Some(cache) = &self.fisher_cache {
689 let cached_matrix_opt = cache
690 .matrix
691 .lock()
692 .map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))?;
693 if let Some(cached_matrix) = cached_matrix_opt.as_ref() {
694 let cached_params_opt = cache
695 .params
696 .lock()
697 .map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))?;
698 if let Some(cached_params) = cached_params_opt.as_ref() {
699 let current_params: Vec<f64> = param_names
700 .iter()
701 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
702 .collect();
703
704 let diff_norm: f64 = current_params
705 .iter()
706 .zip(cached_params.iter())
707 .map(|(a, b)| (a - b).powi(2))
708 .sum::<f64>()
709 .sqrt();
710
711 if diff_norm < cache.threshold {
712 return Ok(cached_matrix.clone());
713 }
714 }
715 }
716 }
717
718 let mut fisher = Array2::zeros((n_params, n_params));
720
721 for i in 0..n_params {
724 for j in i..n_params {
725 let value = gradients[¶m_names[i]] * gradients[¶m_names[j]];
727 fisher[[i, j]] = value;
728 fisher[[j, i]] = value;
729 }
730 }
731
732 for i in 0..n_params {
734 fisher[[i, i]] += regularization;
735 }
736
737 let n = fisher.nrows();
741 let mut fisher_inv = Array2::eye(n);
742
743 if n == 1 {
746 fisher_inv[[0, 0]] = 1.0 / fisher[[0, 0]];
747 } else if n == 2 {
748 let det = fisher[[0, 0]].mul_add(fisher[[1, 1]], -(fisher[[0, 1]] * fisher[[1, 0]]));
749 if det.abs() < 1e-10 {
750 return Err(QuantRS2Error::InvalidInput(
751 "Fisher matrix is singular".to_string(),
752 ));
753 }
754 fisher_inv[[0, 0]] = fisher[[1, 1]] / det;
755 fisher_inv[[0, 1]] = -fisher[[0, 1]] / det;
756 fisher_inv[[1, 0]] = -fisher[[1, 0]] / det;
757 fisher_inv[[1, 1]] = fisher[[0, 0]] / det;
758 } else {
759 }
762
763 if let Some(cache) = &self.fisher_cache {
765 let current_params: Vec<f64> = param_names
766 .iter()
767 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
768 .collect();
769
770 *cache
771 .matrix
772 .lock()
773 .map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))? =
774 Some(fisher_inv.clone());
775 *cache
776 .params
777 .lock()
778 .map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))? =
779 Some(current_params);
780 }
781
782 Ok(fisher_inv)
783 }
784
785 fn apply_fisher_inverse(
787 &self,
788 fisher_inv: &Array2<f64>,
789 gradients: &FxHashMap<String, f64>,
790 ) -> FxHashMap<String, f64> {
791 let param_names: Vec<_> = gradients.keys().cloned().collect();
792 let grad_vec: Vec<f64> = param_names.iter().map(|name| gradients[name]).collect();
793
794 let grad_array = Array1::from_vec(grad_vec);
795 let natural_grad = fisher_inv.dot(&grad_array);
796
797 let mut result = FxHashMap::default();
798 for (i, name) in param_names.iter().enumerate() {
799 result.insert(name.clone(), natural_grad[i]);
800 }
801
802 result
803 }
804}
805
806#[derive(Debug, Clone)]
808pub struct OptimizationResult {
809 pub optimal_parameters: FxHashMap<String, f64>,
811 pub final_loss: f64,
813 pub iterations: usize,
815 pub converged: bool,
817 pub optimization_time: f64,
819 pub history: OptimizationHistory,
821}
822
823pub fn create_vqe_optimizer() -> VariationalQuantumOptimizer {
825 let config = OptimizationConfig {
826 max_iterations: 200,
827 f_tol: 1e-10,
828 g_tol: 1e-10,
829 parallel_gradients: true,
830 grad_clip: Some(1.0),
831 ..Default::default()
832 };
833
834 VariationalQuantumOptimizer::new(OptimizationMethod::LBFGS { memory_size: 10 }, config)
835}
836
837pub fn create_qaoa_optimizer() -> VariationalQuantumOptimizer {
839 let config = OptimizationConfig {
840 max_iterations: 100,
841 parallel_gradients: true,
842 ..Default::default()
843 };
844
845 VariationalQuantumOptimizer::new(OptimizationMethod::BFGS, config)
846}
847
848pub fn create_natural_gradient_optimizer(learning_rate: f64) -> VariationalQuantumOptimizer {
850 let config = OptimizationConfig {
851 max_iterations: 100,
852 parallel_gradients: true,
853 ..Default::default()
854 };
855
856 VariationalQuantumOptimizer::new(
857 OptimizationMethod::NaturalGradient {
858 learning_rate,
859 regularization: 1e-4,
860 },
861 config,
862 )
863}
864
865pub fn create_spsa_optimizer() -> VariationalQuantumOptimizer {
867 let config = OptimizationConfig {
868 max_iterations: 500,
869 seed: Some(42),
870 ..Default::default()
871 };
872
873 VariationalQuantumOptimizer::new(
874 OptimizationMethod::SPSA {
875 a: 0.1,
876 c: 0.1,
877 alpha: 0.602,
878 gamma: 0.101,
879 },
880 config,
881 )
882}
883
884pub struct ConstrainedVariationalOptimizer {
886 base_optimizer: VariationalQuantumOptimizer,
888 constraints: Vec<Constraint>,
890}
891
892#[derive(Clone)]
894pub struct Constraint {
895 pub function: Arc<dyn Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync>,
897 pub constraint_type: ConstraintType,
899 pub value: f64,
901}
902
903#[derive(Debug, Clone, Copy)]
905pub enum ConstraintType {
906 Eq,
908 Ineq,
910}
911
912impl ConstrainedVariationalOptimizer {
913 pub const fn new(base_optimizer: VariationalQuantumOptimizer) -> Self {
915 Self {
916 base_optimizer,
917 constraints: Vec::new(),
918 }
919 }
920
921 pub fn add_equality_constraint(
923 &mut self,
924 constraint_fn: impl Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync + 'static,
925 value: f64,
926 ) {
927 self.constraints.push(Constraint {
928 function: Arc::new(constraint_fn),
929 constraint_type: ConstraintType::Eq,
930 value,
931 });
932 }
933
934 pub fn add_inequality_constraint(
936 &mut self,
937 constraint_fn: impl Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync + 'static,
938 value: f64,
939 ) {
940 self.constraints.push(Constraint {
941 function: Arc::new(constraint_fn),
942 constraint_type: ConstraintType::Ineq,
943 value,
944 });
945 }
946
947 pub fn optimize(
949 &mut self,
950 circuit: &mut VariationalCircuit,
951 cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + 'static,
952 ) -> QuantRS2Result<OptimizationResult> {
953 if self.constraints.is_empty() {
954 return self.base_optimizer.optimize(circuit, cost_fn);
955 }
956
957 let cost_fn = Arc::new(cost_fn);
959 let constraints = self.constraints.clone();
960 let penalty_weight = 1000.0;
961
962 let penalized_cost = move |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
963 let base_cost = cost_fn(circuit)?;
964 let params = circuit.get_parameters();
965
966 let mut penalty = 0.0;
967 for constraint in &constraints {
968 let constraint_value = (constraint.function)(¶ms);
969 match constraint.constraint_type {
970 ConstraintType::Eq => {
971 penalty += penalty_weight * (constraint_value - constraint.value).powi(2);
972 }
973 ConstraintType::Ineq => {
974 if constraint_value > constraint.value {
975 penalty +=
976 penalty_weight * (constraint_value - constraint.value).powi(2);
977 }
978 }
979 }
980 }
981
982 Ok(base_cost + penalty)
983 };
984
985 self.base_optimizer.optimize(circuit, penalized_cost)
986 }
987}
988
989pub struct HyperparameterOptimizer {
991 search_space: FxHashMap<String, (f64, f64)>,
993 n_trials: usize,
995 inner_method: OptimizationMethod,
997}
998
999impl HyperparameterOptimizer {
1000 pub fn new(n_trials: usize) -> Self {
1002 Self {
1003 search_space: FxHashMap::default(),
1004 n_trials,
1005 inner_method: OptimizationMethod::BFGS,
1006 }
1007 }
1008
1009 pub fn add_hyperparameter(&mut self, name: String, min_value: f64, max_value: f64) {
1011 self.search_space.insert(name, (min_value, max_value));
1012 }
1013
1014 pub fn optimize(
1016 &self,
1017 circuit_builder: impl Fn(&FxHashMap<String, f64>) -> VariationalCircuit + Send + Sync,
1018 cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + Clone + 'static,
1019 ) -> QuantRS2Result<HyperparameterResult> {
1020 use scirs2_core::random::prelude::*;
1021
1022 let mut rng = StdRng::from_seed(thread_rng().gen());
1023 let mut best_hyperparams = FxHashMap::default();
1024 let mut best_loss = f64::INFINITY;
1025 let mut all_trials = Vec::new();
1026
1027 for _trial in 0..self.n_trials {
1028 let mut hyperparams = FxHashMap::default();
1030 for (name, &(min_val, max_val)) in &self.search_space {
1031 let value = rng.gen_range(min_val..max_val);
1032 hyperparams.insert(name.clone(), value);
1033 }
1034
1035 let mut circuit = circuit_builder(&hyperparams);
1037
1038 let config = OptimizationConfig {
1040 max_iterations: 50,
1041 ..Default::default()
1042 };
1043
1044 let mut optimizer = VariationalQuantumOptimizer::new(self.inner_method.clone(), config);
1045
1046 let result = optimizer.optimize(&mut circuit, cost_fn.clone())?;
1047
1048 all_trials.push(HyperparameterTrial {
1049 hyperparameters: hyperparams.clone(),
1050 final_loss: result.final_loss,
1051 optimal_parameters: result.optimal_parameters,
1052 });
1053
1054 if result.final_loss < best_loss {
1055 best_loss = result.final_loss;
1056 best_hyperparams = hyperparams;
1057 }
1058 }
1059
1060 Ok(HyperparameterResult {
1061 best_hyperparameters: best_hyperparams,
1062 best_loss,
1063 all_trials,
1064 })
1065 }
1066}
1067
1068#[derive(Debug, Clone)]
1070pub struct HyperparameterResult {
1071 pub best_hyperparameters: FxHashMap<String, f64>,
1073 pub best_loss: f64,
1075 pub all_trials: Vec<HyperparameterTrial>,
1077}
1078
1079#[derive(Debug, Clone)]
1081pub struct HyperparameterTrial {
1082 pub hyperparameters: FxHashMap<String, f64>,
1084 pub final_loss: f64,
1086 pub optimal_parameters: FxHashMap<String, f64>,
1088}
1089
1090impl Clone for VariationalCircuit {
1092 fn clone(&self) -> Self {
1093 Self {
1094 gates: self.gates.clone(),
1095 param_map: self.param_map.clone(),
1096 num_qubits: self.num_qubits,
1097 }
1098 }
1099}
1100
1101#[cfg(test)]
1102mod tests {
1103 use super::*;
1104 use crate::qubit::QubitId;
1105 use crate::variational::VariationalGate;
1106
1107 #[test]
1108 fn test_gradient_descent_optimizer() {
1109 let mut circuit = VariationalCircuit::new(1);
1110 circuit.add_gate(VariationalGate::rx(QubitId(0), "theta".to_string(), 0.0));
1111
1112 let config = OptimizationConfig {
1113 max_iterations: 10,
1114 ..Default::default()
1115 };
1116
1117 let mut optimizer = VariationalQuantumOptimizer::new(
1118 OptimizationMethod::GradientDescent { learning_rate: 0.1 },
1119 config,
1120 );
1121
1122 let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
1124 let theta = circuit
1125 .get_parameters()
1126 .get("theta")
1127 .copied()
1128 .unwrap_or(0.0);
1129 Ok((theta - 1.0).powi(2))
1130 };
1131
1132 let result = optimizer
1133 .optimize(&mut circuit, cost_fn)
1134 .expect("Optimization should succeed");
1135
1136 assert!(result.converged || result.iterations == 10);
1137 assert!((result.optimal_parameters["theta"] - 1.0).abs() < 0.1);
1138 }
1139
1140 #[test]
1141 fn test_adam_optimizer() {
1142 let mut circuit = VariationalCircuit::new(2);
1143 circuit.add_gate(VariationalGate::ry(QubitId(0), "alpha".to_string(), 0.5));
1144 circuit.add_gate(VariationalGate::rz(QubitId(1), "beta".to_string(), 0.5));
1145
1146 let config = OptimizationConfig {
1147 max_iterations: 100,
1148 f_tol: 1e-6,
1149 g_tol: 1e-6,
1150 ..Default::default()
1151 };
1152
1153 let mut optimizer = VariationalQuantumOptimizer::new(
1154 OptimizationMethod::Adam {
1155 learning_rate: 0.1,
1156 beta1: 0.9,
1157 beta2: 0.999,
1158 epsilon: 1e-8,
1159 },
1160 config,
1161 );
1162
1163 let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
1165 let params = circuit.get_parameters();
1166 let alpha = params.get("alpha").copied().unwrap_or(0.0);
1167 let beta = params.get("beta").copied().unwrap_or(0.0);
1168 Ok(alpha.powi(2) + beta.powi(2))
1169 };
1170
1171 let result = optimizer
1172 .optimize(&mut circuit, cost_fn)
1173 .expect("Optimization should succeed");
1174
1175 assert!(result.optimal_parameters["alpha"].abs() < 0.1);
1176 assert!(result.optimal_parameters["beta"].abs() < 0.1);
1177 }
1178
1179 #[test]
1180 fn test_constrained_optimization() {
1181 let mut circuit = VariationalCircuit::new(1);
1182 circuit.add_gate(VariationalGate::rx(QubitId(0), "x".to_string(), 2.0));
1183
1184 let base_optimizer =
1185 VariationalQuantumOptimizer::new(OptimizationMethod::BFGS, Default::default());
1186
1187 let mut constrained_opt = ConstrainedVariationalOptimizer::new(base_optimizer);
1188
1189 constrained_opt
1191 .add_inequality_constraint(|params| 1.0 - params.get("x").copied().unwrap_or(0.0), 0.0);
1192
1193 let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
1195 let x = circuit.get_parameters().get("x").copied().unwrap_or(0.0);
1196 Ok(x.powi(2))
1197 };
1198
1199 let result = constrained_opt
1200 .optimize(&mut circuit, cost_fn)
1201 .expect("Constrained optimization should succeed");
1202
1203 let optimized_x = result.optimal_parameters["x"];
1204 assert!(optimized_x >= 1.0 - 1e-6);
1205 assert!(optimized_x <= 2.0 + 1e-6);
1206 }
1207}