1use crate::{
11 error::{QuantRS2Error, QuantRS2Result},
12 variational::VariationalCircuit,
13};
14use scirs2_core::ndarray::{Array1, Array2};
15use crate::optimization_stubs::{minimize, Method, OptimizeResult, Options};
17use crate::parallel_ops_stubs::*;
18use rustc_hash::FxHashMap;
20use std::sync::{Arc, Mutex};
21
22pub struct VariationalQuantumOptimizer {
31 method: OptimizationMethod,
33 config: OptimizationConfig,
35 history: OptimizationHistory,
37 fisher_cache: Option<FisherCache>,
39}
40
41#[derive(Debug, Clone)]
43pub enum OptimizationMethod {
44 GradientDescent { learning_rate: f64 },
46 Momentum { learning_rate: f64, momentum: f64 },
48 Adam {
50 learning_rate: f64,
51 beta1: f64,
52 beta2: f64,
53 epsilon: f64,
54 },
55 RMSprop {
57 learning_rate: f64,
58 decay_rate: f64,
59 epsilon: f64,
60 },
61 NaturalGradient {
63 learning_rate: f64,
64 regularization: f64,
65 },
66 BFGS,
68 LBFGS { memory_size: usize },
70 ConjugateGradient,
72 NelderMead,
74 Powell,
76 SPSA {
78 a: f64,
79 c: f64,
80 alpha: f64,
81 gamma: f64,
82 },
83 QNSPSA {
85 learning_rate: f64,
86 regularization: f64,
87 spsa_epsilon: f64,
88 },
89}
90
91#[derive(Clone)]
93pub struct OptimizationConfig {
94 pub max_iterations: usize,
96 pub f_tol: f64,
98 pub g_tol: f64,
100 pub x_tol: f64,
102 pub parallel_gradients: bool,
104 pub batch_size: Option<usize>,
106 pub seed: Option<u64>,
108 pub callback: Option<Arc<dyn Fn(&[f64], f64) + Send + Sync>>,
110 pub patience: Option<usize>,
112 pub grad_clip: Option<f64>,
114}
115
116impl Default for OptimizationConfig {
117 fn default() -> Self {
118 Self {
119 max_iterations: 100,
120 f_tol: 1e-8,
121 g_tol: 1e-8,
122 x_tol: 1e-8,
123 parallel_gradients: true,
124 batch_size: None,
125 seed: None,
126 callback: None,
127 patience: None,
128 grad_clip: None,
129 }
130 }
131}
132
133#[derive(Debug, Clone)]
135pub struct OptimizationHistory {
136 pub parameters: Vec<Vec<f64>>,
138 pub loss_values: Vec<f64>,
140 pub gradient_norms: Vec<f64>,
142 pub iteration_times: Vec<f64>,
144 pub total_iterations: usize,
146 pub converged: bool,
148}
149
150impl OptimizationHistory {
151 fn new() -> Self {
152 Self {
153 parameters: Vec::new(),
154 loss_values: Vec::new(),
155 gradient_norms: Vec::new(),
156 iteration_times: Vec::new(),
157 total_iterations: 0,
158 converged: false,
159 }
160 }
161}
162
163struct FisherCache {
165 matrix: Arc<Mutex<Option<Array2<f64>>>>,
167 params: Arc<Mutex<Option<Vec<f64>>>>,
169 threshold: f64,
171}
172
173struct OptimizerState {
175 momentum: FxHashMap<String, f64>,
177 adam_m: FxHashMap<String, f64>,
179 adam_v: FxHashMap<String, f64>,
181 rms_avg: FxHashMap<String, f64>,
183 iteration: usize,
185}
186
187impl VariationalQuantumOptimizer {
188 pub fn new(method: OptimizationMethod, config: OptimizationConfig) -> Self {
190 let fisher_cache = match &method {
191 OptimizationMethod::NaturalGradient { .. } | OptimizationMethod::QNSPSA { .. } => {
192 Some(FisherCache {
193 matrix: Arc::new(Mutex::new(None)),
194 params: Arc::new(Mutex::new(None)),
195 threshold: 1e-3,
196 })
197 }
198 _ => None,
199 };
200
201 Self {
202 method,
203 config,
204 history: OptimizationHistory::new(),
205 fisher_cache,
206 }
207 }
208
209 pub fn optimize(
211 &mut self,
212 circuit: &mut VariationalCircuit,
213 cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + 'static,
214 ) -> QuantRS2Result<OptimizationResult> {
215 let cost_fn = Arc::new(cost_fn);
216
217 match &self.method {
218 OptimizationMethod::BFGS
219 | OptimizationMethod::LBFGS { .. }
220 | OptimizationMethod::ConjugateGradient
221 | OptimizationMethod::NelderMead
222 | OptimizationMethod::Powell => self.optimize_with_scirs2(circuit, cost_fn),
223 _ => self.optimize_custom(circuit, cost_fn),
224 }
225 }
226
227 fn optimize_with_scirs2(
229 &mut self,
230 circuit: &mut VariationalCircuit,
231 cost_fn: Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
232 ) -> QuantRS2Result<OptimizationResult> {
233 let param_names = circuit.parameter_names();
234 let initial_params: Vec<f64> = param_names
235 .iter()
236 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
237 .collect();
238
239 let circuit_clone = Arc::new(Mutex::new(circuit.clone()));
240 let param_names_clone = param_names.clone();
241
242 let objective = move |params: &scirs2_core::ndarray::ArrayView1<f64>| -> f64 {
244 let params_slice = params.as_slice().unwrap();
245 let mut param_map = FxHashMap::default();
246 for (name, &value) in param_names_clone.iter().zip(params_slice) {
247 param_map.insert(name.clone(), value);
248 }
249
250 let mut circuit = circuit_clone.lock().unwrap();
251 if circuit.set_parameters(¶m_map).is_err() {
252 return f64::INFINITY;
253 }
254
255 match cost_fn(&*circuit) {
256 Ok(loss) => loss,
257 Err(_) => f64::INFINITY,
258 }
259 };
260
261 let method = match &self.method {
263 OptimizationMethod::BFGS => Method::BFGS,
264 OptimizationMethod::LBFGS { memory_size: _ } => Method::LBFGS,
265 OptimizationMethod::ConjugateGradient => Method::BFGS, OptimizationMethod::NelderMead => Method::NelderMead,
267 OptimizationMethod::Powell => Method::Powell,
268 _ => unreachable!(),
269 };
270
271 let options = Options {
273 max_iter: self.config.max_iterations,
274 ftol: self.config.f_tol,
275 gtol: self.config.g_tol,
276 xtol: self.config.x_tol,
277 ..Default::default()
278 };
279
280 let start_time = std::time::Instant::now();
282 let initial_array = scirs2_core::ndarray::Array1::from_vec(initial_params.clone());
283 let result = minimize(objective, &initial_array, method, Some(options))
284 .map_err(|e| QuantRS2Error::InvalidInput(format!("Optimization failed: {:?}", e)))?;
285
286 let mut final_params = FxHashMap::default();
288 for (name, &value) in param_names.iter().zip(result.x.as_slice().unwrap()) {
289 final_params.insert(name.clone(), value);
290 }
291 circuit.set_parameters(&final_params)?;
292
293 self.history.parameters.push(result.x.to_vec());
295 self.history.loss_values.push(result.fun);
296 self.history.total_iterations = result.iterations;
297 self.history.converged = result.success;
298
299 Ok(OptimizationResult {
300 optimal_parameters: final_params,
301 final_loss: result.fun,
302 iterations: result.iterations,
303 converged: result.success,
304 optimization_time: start_time.elapsed().as_secs_f64(),
305 history: self.history.clone(),
306 })
307 }
308
309 fn optimize_custom(
311 &mut self,
312 circuit: &mut VariationalCircuit,
313 cost_fn: Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
314 ) -> QuantRS2Result<OptimizationResult> {
315 let mut state = OptimizerState {
316 momentum: FxHashMap::default(),
317 adam_m: FxHashMap::default(),
318 adam_v: FxHashMap::default(),
319 rms_avg: FxHashMap::default(),
320 iteration: 0,
321 };
322
323 let param_names = circuit.parameter_names();
324 let start_time = std::time::Instant::now();
325 let mut best_loss = f64::INFINITY;
326 let mut patience_counter = 0;
327
328 for iter in 0..self.config.max_iterations {
329 let iter_start = std::time::Instant::now();
330
331 let loss = cost_fn(circuit)?;
333
334 if loss < best_loss - self.config.f_tol {
336 best_loss = loss;
337 patience_counter = 0;
338 } else if let Some(patience) = self.config.patience {
339 patience_counter += 1;
340 if patience_counter >= patience {
341 self.history.converged = true;
342 break;
343 }
344 }
345
346 let gradients = self.compute_gradients(circuit, &cost_fn)?;
348
349 let gradients = if let Some(max_norm) = self.config.grad_clip {
351 self.clip_gradients(gradients, max_norm)
352 } else {
353 gradients
354 };
355
356 self.update_parameters(circuit, &gradients, &mut state)?;
358
359 let current_params: Vec<f64> = param_names
361 .iter()
362 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
363 .collect();
364
365 let grad_norm = gradients.values().map(|g| g * g).sum::<f64>().sqrt();
366
367 self.history.parameters.push(current_params);
368 self.history.loss_values.push(loss);
369 self.history.gradient_norms.push(grad_norm);
370 self.history
371 .iteration_times
372 .push(iter_start.elapsed().as_secs_f64() * 1000.0);
373 self.history.total_iterations = iter + 1;
374
375 if let Some(callback) = &self.config.callback {
377 let params: Vec<f64> = param_names
378 .iter()
379 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
380 .collect();
381 callback(¶ms, loss);
382 }
383
384 if grad_norm < self.config.g_tol {
386 self.history.converged = true;
387 break;
388 }
389
390 state.iteration += 1;
391 }
392
393 let final_params = circuit.get_parameters();
394 let final_loss = cost_fn(circuit)?;
395
396 Ok(OptimizationResult {
397 optimal_parameters: final_params,
398 final_loss,
399 iterations: self.history.total_iterations,
400 converged: self.history.converged,
401 optimization_time: start_time.elapsed().as_secs_f64(),
402 history: self.history.clone(),
403 })
404 }
405
406 fn compute_gradients(
408 &self,
409 circuit: &VariationalCircuit,
410 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
411 ) -> QuantRS2Result<FxHashMap<String, f64>> {
412 let param_names = circuit.parameter_names();
413
414 if self.config.parallel_gradients {
415 let gradients: Vec<(String, f64)> = param_names
417 .par_iter()
418 .map(|param_name| {
419 let grad = self
420 .compute_single_gradient(circuit, param_name, cost_fn)
421 .unwrap_or(0.0);
422 (param_name.clone(), grad)
423 })
424 .collect();
425
426 Ok(gradients.into_iter().collect())
427 } else {
428 let mut gradients = FxHashMap::default();
430 for param_name in ¶m_names {
431 let grad = self.compute_single_gradient(circuit, param_name, cost_fn)?;
432 gradients.insert(param_name.clone(), grad);
433 }
434 Ok(gradients)
435 }
436 }
437
438 fn compute_single_gradient(
440 &self,
441 circuit: &VariationalCircuit,
442 param_name: &str,
443 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
444 ) -> QuantRS2Result<f64> {
445 match &self.method {
446 OptimizationMethod::SPSA { c, .. } => {
447 self.spsa_gradient(circuit, param_name, cost_fn, *c)
449 }
450 _ => {
451 self.parameter_shift_gradient(circuit, param_name, cost_fn)
453 }
454 }
455 }
456
457 fn parameter_shift_gradient(
459 &self,
460 circuit: &VariationalCircuit,
461 param_name: &str,
462 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
463 ) -> QuantRS2Result<f64> {
464 let current_params = circuit.get_parameters();
465 let current_value = *current_params.get(param_name).ok_or_else(|| {
466 QuantRS2Error::InvalidInput(format!("Parameter {} not found", param_name))
467 })?;
468
469 let mut circuit_plus = circuit.clone();
471 let mut params_plus = current_params.clone();
472 params_plus.insert(
473 param_name.to_string(),
474 current_value + std::f64::consts::PI / 2.0,
475 );
476 circuit_plus.set_parameters(¶ms_plus)?;
477 let loss_plus = cost_fn(&circuit_plus)?;
478
479 let mut circuit_minus = circuit.clone();
481 let mut params_minus = current_params.clone();
482 params_minus.insert(
483 param_name.to_string(),
484 current_value - std::f64::consts::PI / 2.0,
485 );
486 circuit_minus.set_parameters(¶ms_minus)?;
487 let loss_minus = cost_fn(&circuit_minus)?;
488
489 Ok((loss_plus - loss_minus) / 2.0)
490 }
491
492 fn spsa_gradient(
494 &self,
495 circuit: &VariationalCircuit,
496 param_name: &str,
497 cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
498 epsilon: f64,
499 ) -> QuantRS2Result<f64> {
500 use scirs2_core::random::prelude::*;
501
502 let mut rng = if let Some(seed) = self.config.seed {
503 StdRng::seed_from_u64(seed)
504 } else {
505 StdRng::from_seed(thread_rng().gen())
506 };
507
508 let current_params = circuit.get_parameters();
509 let perturbation = if rng.gen::<bool>() {
510 epsilon
511 } else {
512 -epsilon
513 };
514
515 let mut circuit_plus = circuit.clone();
517 let mut params_plus = current_params.clone();
518 for (name, value) in params_plus.iter_mut() {
519 if name == param_name {
520 *value += perturbation;
521 }
522 }
523 circuit_plus.set_parameters(¶ms_plus)?;
524 let loss_plus = cost_fn(&circuit_plus)?;
525
526 let mut circuit_minus = circuit.clone();
528 let mut params_minus = current_params.clone();
529 for (name, value) in params_minus.iter_mut() {
530 if name == param_name {
531 *value -= perturbation;
532 }
533 }
534 circuit_minus.set_parameters(¶ms_minus)?;
535 let loss_minus = cost_fn(&circuit_minus)?;
536
537 Ok((loss_plus - loss_minus) / (2.0 * perturbation))
538 }
539
540 fn clip_gradients(
542 &self,
543 mut gradients: FxHashMap<String, f64>,
544 max_norm: f64,
545 ) -> FxHashMap<String, f64> {
546 let norm = gradients.values().map(|g| g * g).sum::<f64>().sqrt();
547
548 if norm > max_norm {
549 let scale = max_norm / norm;
550 for grad in gradients.values_mut() {
551 *grad *= scale;
552 }
553 }
554
555 gradients
556 }
557
558 fn update_parameters(
560 &mut self,
561 circuit: &mut VariationalCircuit,
562 gradients: &FxHashMap<String, f64>,
563 state: &mut OptimizerState,
564 ) -> QuantRS2Result<()> {
565 let mut new_params = circuit.get_parameters();
566
567 match &self.method {
568 OptimizationMethod::GradientDescent { learning_rate } => {
569 for (param_name, &grad) in gradients {
571 if let Some(value) = new_params.get_mut(param_name) {
572 *value -= learning_rate * grad;
573 }
574 }
575 }
576 OptimizationMethod::Momentum {
577 learning_rate,
578 momentum,
579 } => {
580 for (param_name, &grad) in gradients {
582 let velocity = state.momentum.entry(param_name.clone()).or_insert(0.0);
583 *velocity = momentum * *velocity - learning_rate * grad;
584
585 if let Some(value) = new_params.get_mut(param_name) {
586 *value += *velocity;
587 }
588 }
589 }
590 OptimizationMethod::Adam {
591 learning_rate,
592 beta1,
593 beta2,
594 epsilon,
595 } => {
596 let t = state.iteration as f64 + 1.0;
598 let lr_t = learning_rate * (1.0 - beta2.powf(t)).sqrt() / (1.0 - beta1.powf(t));
599
600 for (param_name, &grad) in gradients {
601 let m = state.adam_m.entry(param_name.clone()).or_insert(0.0);
602 let v = state.adam_v.entry(param_name.clone()).or_insert(0.0);
603
604 *m = beta1 * *m + (1.0 - beta1) * grad;
605 *v = beta2 * *v + (1.0 - beta2) * grad * grad;
606
607 if let Some(value) = new_params.get_mut(param_name) {
608 *value -= lr_t * *m / (v.sqrt() + epsilon);
609 }
610 }
611 }
612 OptimizationMethod::RMSprop {
613 learning_rate,
614 decay_rate,
615 epsilon,
616 } => {
617 for (param_name, &grad) in gradients {
619 let avg = state.rms_avg.entry(param_name.clone()).or_insert(0.0);
620 *avg = decay_rate * *avg + (1.0 - decay_rate) * grad * grad;
621
622 if let Some(value) = new_params.get_mut(param_name) {
623 *value -= learning_rate * grad / (avg.sqrt() + epsilon);
624 }
625 }
626 }
627 OptimizationMethod::NaturalGradient {
628 learning_rate,
629 regularization,
630 } => {
631 let fisher_inv =
633 self.compute_fisher_inverse(circuit, gradients, *regularization)?;
634 let natural_grad = self.apply_fisher_inverse(&fisher_inv, gradients);
635
636 for (param_name, &nat_grad) in &natural_grad {
637 if let Some(value) = new_params.get_mut(param_name) {
638 *value -= learning_rate * nat_grad;
639 }
640 }
641 }
642 OptimizationMethod::SPSA { a, alpha, .. } => {
643 let ak = a / (state.iteration as f64 + 1.0).powf(*alpha);
645
646 for (param_name, &grad) in gradients {
647 if let Some(value) = new_params.get_mut(param_name) {
648 *value -= ak * grad;
649 }
650 }
651 }
652 OptimizationMethod::QNSPSA {
653 learning_rate,
654 regularization,
655 ..
656 } => {
657 let fisher_inv =
659 self.compute_fisher_inverse(circuit, gradients, *regularization)?;
660 let natural_grad = self.apply_fisher_inverse(&fisher_inv, gradients);
661
662 for (param_name, &nat_grad) in &natural_grad {
663 if let Some(value) = new_params.get_mut(param_name) {
664 *value -= learning_rate * nat_grad;
665 }
666 }
667 }
668 _ => {
669 return Err(QuantRS2Error::InvalidInput(
671 "Invalid optimization method".to_string(),
672 ));
673 }
674 }
675
676 circuit.set_parameters(&new_params)
677 }
678
679 fn compute_fisher_inverse(
681 &self,
682 circuit: &VariationalCircuit,
683 gradients: &FxHashMap<String, f64>,
684 regularization: f64,
685 ) -> QuantRS2Result<Array2<f64>> {
686 let param_names: Vec<_> = gradients.keys().cloned().collect();
687 let n_params = param_names.len();
688
689 if let Some(cache) = &self.fisher_cache {
691 if let Some(cached_matrix) = cache.matrix.lock().unwrap().as_ref() {
692 if let Some(cached_params) = cache.params.lock().unwrap().as_ref() {
693 let current_params: Vec<f64> = param_names
694 .iter()
695 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
696 .collect();
697
698 let diff_norm: f64 = current_params
699 .iter()
700 .zip(cached_params.iter())
701 .map(|(a, b)| (a - b).powi(2))
702 .sum::<f64>()
703 .sqrt();
704
705 if diff_norm < cache.threshold {
706 return Ok(cached_matrix.clone());
707 }
708 }
709 }
710 }
711
712 let mut fisher = Array2::zeros((n_params, n_params));
714
715 for i in 0..n_params {
718 for j in i..n_params {
719 let value = gradients[¶m_names[i]] * gradients[¶m_names[j]];
721 fisher[[i, j]] = value;
722 fisher[[j, i]] = value;
723 }
724 }
725
726 for i in 0..n_params {
728 fisher[[i, i]] += regularization;
729 }
730
731 let n = fisher.nrows();
735 let mut fisher_inv = Array2::eye(n);
736
737 if n == 1 {
740 fisher_inv[[0, 0]] = 1.0 / fisher[[0, 0]];
741 } else if n == 2 {
742 let det = fisher[[0, 0]] * fisher[[1, 1]] - fisher[[0, 1]] * fisher[[1, 0]];
743 if det.abs() < 1e-10 {
744 return Err(QuantRS2Error::InvalidInput(
745 "Fisher matrix is singular".to_string(),
746 ));
747 }
748 fisher_inv[[0, 0]] = fisher[[1, 1]] / det;
749 fisher_inv[[0, 1]] = -fisher[[0, 1]] / det;
750 fisher_inv[[1, 0]] = -fisher[[1, 0]] / det;
751 fisher_inv[[1, 1]] = fisher[[0, 0]] / det;
752 } else {
753 }
756
757 if let Some(cache) = &self.fisher_cache {
759 let current_params: Vec<f64> = param_names
760 .iter()
761 .map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
762 .collect();
763
764 *cache.matrix.lock().unwrap() = Some(fisher_inv.clone());
765 *cache.params.lock().unwrap() = Some(current_params);
766 }
767
768 Ok(fisher_inv)
769 }
770
771 fn apply_fisher_inverse(
773 &self,
774 fisher_inv: &Array2<f64>,
775 gradients: &FxHashMap<String, f64>,
776 ) -> FxHashMap<String, f64> {
777 let param_names: Vec<_> = gradients.keys().cloned().collect();
778 let grad_vec: Vec<f64> = param_names.iter().map(|name| gradients[name]).collect();
779
780 let grad_array = Array1::from_vec(grad_vec);
781 let natural_grad = fisher_inv.dot(&grad_array);
782
783 let mut result = FxHashMap::default();
784 for (i, name) in param_names.iter().enumerate() {
785 result.insert(name.clone(), natural_grad[i]);
786 }
787
788 result
789 }
790}
791
792#[derive(Debug, Clone)]
794pub struct OptimizationResult {
795 pub optimal_parameters: FxHashMap<String, f64>,
797 pub final_loss: f64,
799 pub iterations: usize,
801 pub converged: bool,
803 pub optimization_time: f64,
805 pub history: OptimizationHistory,
807}
808
809pub fn create_vqe_optimizer() -> VariationalQuantumOptimizer {
811 let config = OptimizationConfig {
812 max_iterations: 200,
813 f_tol: 1e-10,
814 g_tol: 1e-10,
815 parallel_gradients: true,
816 grad_clip: Some(1.0),
817 ..Default::default()
818 };
819
820 VariationalQuantumOptimizer::new(OptimizationMethod::LBFGS { memory_size: 10 }, config)
821}
822
823pub fn create_qaoa_optimizer() -> VariationalQuantumOptimizer {
825 let config = OptimizationConfig {
826 max_iterations: 100,
827 parallel_gradients: true,
828 ..Default::default()
829 };
830
831 VariationalQuantumOptimizer::new(OptimizationMethod::BFGS, config)
832}
833
834pub fn create_natural_gradient_optimizer(learning_rate: f64) -> VariationalQuantumOptimizer {
836 let config = OptimizationConfig {
837 max_iterations: 100,
838 parallel_gradients: true,
839 ..Default::default()
840 };
841
842 VariationalQuantumOptimizer::new(
843 OptimizationMethod::NaturalGradient {
844 learning_rate,
845 regularization: 1e-4,
846 },
847 config,
848 )
849}
850
851pub fn create_spsa_optimizer() -> VariationalQuantumOptimizer {
853 let config = OptimizationConfig {
854 max_iterations: 500,
855 seed: Some(42),
856 ..Default::default()
857 };
858
859 VariationalQuantumOptimizer::new(
860 OptimizationMethod::SPSA {
861 a: 0.1,
862 c: 0.1,
863 alpha: 0.602,
864 gamma: 0.101,
865 },
866 config,
867 )
868}
869
870pub struct ConstrainedVariationalOptimizer {
872 base_optimizer: VariationalQuantumOptimizer,
874 constraints: Vec<Constraint>,
876}
877
878#[derive(Clone)]
880pub struct Constraint {
881 pub function: Arc<dyn Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync>,
883 pub constraint_type: ConstraintType,
885 pub value: f64,
887}
888
889#[derive(Debug, Clone, Copy)]
891pub enum ConstraintType {
892 Eq,
894 Ineq,
896}
897
898impl ConstrainedVariationalOptimizer {
899 pub fn new(base_optimizer: VariationalQuantumOptimizer) -> Self {
901 Self {
902 base_optimizer,
903 constraints: Vec::new(),
904 }
905 }
906
907 pub fn add_equality_constraint(
909 &mut self,
910 constraint_fn: impl Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync + 'static,
911 value: f64,
912 ) {
913 self.constraints.push(Constraint {
914 function: Arc::new(constraint_fn),
915 constraint_type: ConstraintType::Eq,
916 value,
917 });
918 }
919
920 pub fn add_inequality_constraint(
922 &mut self,
923 constraint_fn: impl Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync + 'static,
924 value: f64,
925 ) {
926 self.constraints.push(Constraint {
927 function: Arc::new(constraint_fn),
928 constraint_type: ConstraintType::Ineq,
929 value,
930 });
931 }
932
933 pub fn optimize(
935 &mut self,
936 circuit: &mut VariationalCircuit,
937 cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + 'static,
938 ) -> QuantRS2Result<OptimizationResult> {
939 if self.constraints.is_empty() {
940 return self.base_optimizer.optimize(circuit, cost_fn);
941 }
942
943 let cost_fn = Arc::new(cost_fn);
945 let constraints = self.constraints.clone();
946 let penalty_weight = 1000.0;
947
948 let penalized_cost = move |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
949 let base_cost = cost_fn(circuit)?;
950 let params = circuit.get_parameters();
951
952 let mut penalty = 0.0;
953 for constraint in &constraints {
954 let constraint_value = (constraint.function)(¶ms);
955 match constraint.constraint_type {
956 ConstraintType::Eq => {
957 penalty += penalty_weight * (constraint_value - constraint.value).powi(2);
958 }
959 ConstraintType::Ineq => {
960 if constraint_value > constraint.value {
961 penalty +=
962 penalty_weight * (constraint_value - constraint.value).powi(2);
963 }
964 }
965 }
966 }
967
968 Ok(base_cost + penalty)
969 };
970
971 self.base_optimizer.optimize(circuit, penalized_cost)
972 }
973}
974
975pub struct HyperparameterOptimizer {
977 search_space: FxHashMap<String, (f64, f64)>,
979 n_trials: usize,
981 inner_method: OptimizationMethod,
983}
984
985impl HyperparameterOptimizer {
986 pub fn new(n_trials: usize) -> Self {
988 Self {
989 search_space: FxHashMap::default(),
990 n_trials,
991 inner_method: OptimizationMethod::BFGS,
992 }
993 }
994
995 pub fn add_hyperparameter(&mut self, name: String, min_value: f64, max_value: f64) {
997 self.search_space.insert(name, (min_value, max_value));
998 }
999
1000 pub fn optimize(
1002 &self,
1003 circuit_builder: impl Fn(&FxHashMap<String, f64>) -> VariationalCircuit + Send + Sync,
1004 cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + Clone + 'static,
1005 ) -> QuantRS2Result<HyperparameterResult> {
1006 use scirs2_core::random::prelude::*;
1007
1008 let mut rng = StdRng::from_seed(thread_rng().gen());
1009 let mut best_hyperparams = FxHashMap::default();
1010 let mut best_loss = f64::INFINITY;
1011 let mut all_trials = Vec::new();
1012
1013 for _trial in 0..self.n_trials {
1014 let mut hyperparams = FxHashMap::default();
1016 for (name, &(min_val, max_val)) in &self.search_space {
1017 let value = rng.gen_range(min_val..max_val);
1018 hyperparams.insert(name.clone(), value);
1019 }
1020
1021 let mut circuit = circuit_builder(&hyperparams);
1023
1024 let config = OptimizationConfig {
1026 max_iterations: 50,
1027 ..Default::default()
1028 };
1029
1030 let mut optimizer = VariationalQuantumOptimizer::new(self.inner_method.clone(), config);
1031
1032 let result = optimizer.optimize(&mut circuit, cost_fn.clone())?;
1033
1034 all_trials.push(HyperparameterTrial {
1035 hyperparameters: hyperparams.clone(),
1036 final_loss: result.final_loss,
1037 optimal_parameters: result.optimal_parameters,
1038 });
1039
1040 if result.final_loss < best_loss {
1041 best_loss = result.final_loss;
1042 best_hyperparams = hyperparams;
1043 }
1044 }
1045
1046 Ok(HyperparameterResult {
1047 best_hyperparameters: best_hyperparams,
1048 best_loss,
1049 all_trials,
1050 })
1051 }
1052}
1053
1054#[derive(Debug, Clone)]
1056pub struct HyperparameterResult {
1057 pub best_hyperparameters: FxHashMap<String, f64>,
1059 pub best_loss: f64,
1061 pub all_trials: Vec<HyperparameterTrial>,
1063}
1064
1065#[derive(Debug, Clone)]
1067pub struct HyperparameterTrial {
1068 pub hyperparameters: FxHashMap<String, f64>,
1070 pub final_loss: f64,
1072 pub optimal_parameters: FxHashMap<String, f64>,
1074}
1075
1076impl Clone for VariationalCircuit {
1078 fn clone(&self) -> Self {
1079 Self {
1080 gates: self.gates.clone(),
1081 param_map: self.param_map.clone(),
1082 num_qubits: self.num_qubits,
1083 }
1084 }
1085}
1086
1087#[cfg(test)]
1088mod tests {
1089 use super::*;
1090 use crate::qubit::QubitId;
1091 use crate::variational::VariationalGate;
1092
1093 #[test]
1094 fn test_gradient_descent_optimizer() {
1095 let mut circuit = VariationalCircuit::new(1);
1096 circuit.add_gate(VariationalGate::rx(QubitId(0), "theta".to_string(), 0.0));
1097
1098 let config = OptimizationConfig {
1099 max_iterations: 10,
1100 ..Default::default()
1101 };
1102
1103 let mut optimizer = VariationalQuantumOptimizer::new(
1104 OptimizationMethod::GradientDescent { learning_rate: 0.1 },
1105 config,
1106 );
1107
1108 let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
1110 let theta = circuit
1111 .get_parameters()
1112 .get("theta")
1113 .copied()
1114 .unwrap_or(0.0);
1115 Ok((theta - 1.0).powi(2))
1116 };
1117
1118 let result = optimizer.optimize(&mut circuit, cost_fn).unwrap();
1119
1120 assert!(result.converged || result.iterations == 10);
1121 assert!((result.optimal_parameters["theta"] - 1.0).abs() < 0.1);
1122 }
1123
1124 #[test]
1125 fn test_adam_optimizer() {
1126 let mut circuit = VariationalCircuit::new(2);
1127 circuit.add_gate(VariationalGate::ry(QubitId(0), "alpha".to_string(), 0.5));
1128 circuit.add_gate(VariationalGate::rz(QubitId(1), "beta".to_string(), 0.5));
1129
1130 let config = OptimizationConfig {
1131 max_iterations: 100,
1132 f_tol: 1e-6,
1133 g_tol: 1e-6,
1134 ..Default::default()
1135 };
1136
1137 let mut optimizer = VariationalQuantumOptimizer::new(
1138 OptimizationMethod::Adam {
1139 learning_rate: 0.1,
1140 beta1: 0.9,
1141 beta2: 0.999,
1142 epsilon: 1e-8,
1143 },
1144 config,
1145 );
1146
1147 let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
1149 let params = circuit.get_parameters();
1150 let alpha = params.get("alpha").copied().unwrap_or(0.0);
1151 let beta = params.get("beta").copied().unwrap_or(0.0);
1152 Ok(alpha.powi(2) + beta.powi(2))
1153 };
1154
1155 let result = optimizer.optimize(&mut circuit, cost_fn).unwrap();
1156
1157 assert!(result.optimal_parameters["alpha"].abs() < 0.1);
1158 assert!(result.optimal_parameters["beta"].abs() < 0.1);
1159 }
1160
1161 #[test]
1162 fn test_constrained_optimization() {
1163 let mut circuit = VariationalCircuit::new(1);
1164 circuit.add_gate(VariationalGate::rx(QubitId(0), "x".to_string(), 2.0));
1165
1166 let base_optimizer =
1167 VariationalQuantumOptimizer::new(OptimizationMethod::BFGS, Default::default());
1168
1169 let mut constrained_opt = ConstrainedVariationalOptimizer::new(base_optimizer);
1170
1171 constrained_opt
1173 .add_inequality_constraint(|params| 1.0 - params.get("x").copied().unwrap_or(0.0), 0.0);
1174
1175 let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
1177 let x = circuit.get_parameters().get("x").copied().unwrap_or(0.0);
1178 Ok(x.powi(2))
1179 };
1180
1181 let result = constrained_opt.optimize(&mut circuit, cost_fn).unwrap();
1182
1183 assert!((result.optimal_parameters["x"] - 1.1).abs() < 0.01);
1186 }
1187}