1use crate::builder::Circuit;
7use crate::scirs2_matrices::SparseMatrix;
8use quantrs2_core::{
9 error::{QuantRS2Error, QuantRS2Result},
10 gate::GateOp,
11 qubit::QubitId,
12};
13use scirs2_core::Complex64;
14use serde::{Deserialize, Serialize};
15use std::collections::HashMap;
16use std::sync::{Arc, Mutex};
17
18pub trait ObjectiveFunction: Send + Sync {
23 fn evaluate(&self, parameters: &[f64]) -> f64;
25
26 fn gradient(&self, parameters: &[f64]) -> Option<Vec<f64>> {
28 None
29 }
30
31 fn hessian(&self, parameters: &[f64]) -> Option<Vec<Vec<f64>>> {
33 None
34 }
35
36 fn bounds(&self) -> Vec<(f64, f64)>;
38
39 fn name(&self) -> &str;
41}
42
43#[derive(Debug, Clone, PartialEq)]
45pub enum OptimizationAlgorithm {
46 GradientDescent { learning_rate: f64, momentum: f64 },
48 Adam {
50 learning_rate: f64,
51 beta1: f64,
52 beta2: f64,
53 epsilon: f64,
54 },
55 LBFGSB {
57 max_iterations: usize,
58 tolerance: f64,
59 },
60 NelderMead {
62 max_iterations: usize,
63 tolerance: f64,
64 },
65 SimulatedAnnealing {
67 initial_temperature: f64,
68 cooling_rate: f64,
69 min_temperature: f64,
70 },
71 GeneticAlgorithm {
73 population_size: usize,
74 mutation_rate: f64,
75 crossover_rate: f64,
76 },
77 ParticleSwarm {
79 num_particles: usize,
80 inertia_weight: f64,
81 cognitive_weight: f64,
82 social_weight: f64,
83 },
84 BayesianOptimization {
86 acquisition_function: AcquisitionFunction,
87 kernel: KernelType,
88 num_initial_samples: usize,
89 },
90 QAOA {
92 num_layers: usize,
93 classical_optimizer: Box<Self>,
94 },
95}
96
97#[derive(Debug, Clone, PartialEq)]
99pub enum AcquisitionFunction {
100 ExpectedImprovement,
101 ProbabilityOfImprovement,
102 UpperConfidenceBound { kappa: f64 },
103 Thompson,
104}
105
106#[derive(Debug, Clone, PartialEq)]
108pub enum KernelType {
109 RBF { length_scale: f64 },
110 Matern { nu: f64, length_scale: f64 },
111 Linear { variance: f64 },
112 Periodic { period: f64, length_scale: f64 },
113}
114
115pub struct OptimizationConfig {
117 pub algorithm: OptimizationAlgorithm,
119 pub max_evaluations: usize,
121 pub tolerance: f64,
123 pub seed: Option<u64>,
125 pub parallel: bool,
127 pub num_threads: Option<usize>,
129 pub progress_callback: Option<Box<dyn Fn(usize, f64) + Send + Sync>>,
131 pub early_stopping: Option<EarlyStoppingCriteria>,
133}
134
135impl std::fmt::Debug for OptimizationConfig {
136 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
137 f.debug_struct("OptimizationConfig")
138 .field("algorithm", &self.algorithm)
139 .field("max_evaluations", &self.max_evaluations)
140 .field("tolerance", &self.tolerance)
141 .field("seed", &self.seed)
142 .field("parallel", &self.parallel)
143 .field("num_threads", &self.num_threads)
144 .field(
145 "progress_callback",
146 &self.progress_callback.as_ref().map(|_| "Some(callback)"),
147 )
148 .field("early_stopping", &self.early_stopping)
149 .finish()
150 }
151}
152
153impl Clone for OptimizationConfig {
154 fn clone(&self) -> Self {
155 Self {
156 algorithm: self.algorithm.clone(),
157 max_evaluations: self.max_evaluations,
158 tolerance: self.tolerance,
159 seed: self.seed,
160 parallel: self.parallel,
161 num_threads: self.num_threads,
162 progress_callback: None, early_stopping: self.early_stopping.clone(),
164 }
165 }
166}
167
168#[derive(Debug, Clone)]
170pub struct EarlyStoppingCriteria {
171 pub patience: usize,
173 pub min_delta: f64,
175 pub monitor_best: bool,
177}
178
179#[derive(Debug, Clone)]
181pub struct OptimizationResult {
182 pub optimal_parameters: Vec<f64>,
184 pub optimal_value: f64,
186 pub num_evaluations: usize,
188 pub converged: bool,
190 pub history: OptimizationHistory,
192 pub algorithm_info: HashMap<String, String>,
194 pub optimization_time: std::time::Duration,
196}
197
198#[derive(Debug, Clone)]
200pub struct OptimizationHistory {
201 pub parameters: Vec<Vec<f64>>,
203 pub objective_values: Vec<f64>,
205 pub gradient_norms: Vec<f64>,
207 pub step_sizes: Vec<f64>,
209 pub timestamps: Vec<std::time::Instant>,
211}
212
213pub struct QuantumCircuitOptimizer {
215 circuit_template: CircuitTemplate,
217 config: OptimizationConfig,
219 history: Arc<Mutex<OptimizationHistory>>,
221 best_parameters: Arc<Mutex<Option<Vec<f64>>>>,
223 best_value: Arc<Mutex<f64>>,
225}
226
227#[derive(Debug, Clone)]
229pub struct CircuitTemplate {
230 pub structure: Vec<ParameterizedGate>,
232 pub parameters: Vec<Parameter>,
234 pub num_qubits: usize,
236}
237
238#[derive(Debug, Clone)]
240pub struct ParameterizedGate {
241 pub gate_name: String,
243 pub qubits: Vec<usize>,
245 pub parameter_indices: Vec<usize>,
247 pub fixed_parameters: Vec<f64>,
249}
250
251#[derive(Debug, Clone)]
253pub struct Parameter {
254 pub name: String,
256 pub lower_bound: f64,
258 pub upper_bound: f64,
260 pub initial_value: f64,
262 pub discrete: bool,
264}
265
266impl QuantumCircuitOptimizer {
267 #[must_use]
269 pub fn new(template: CircuitTemplate, config: OptimizationConfig) -> Self {
270 Self {
271 circuit_template: template,
272 config,
273 history: Arc::new(Mutex::new(OptimizationHistory {
274 parameters: Vec::new(),
275 objective_values: Vec::new(),
276 gradient_norms: Vec::new(),
277 step_sizes: Vec::new(),
278 timestamps: Vec::new(),
279 })),
280 best_parameters: Arc::new(Mutex::new(None)),
281 best_value: Arc::new(Mutex::new(f64::INFINITY)),
282 }
283 }
284
285 pub fn optimize(
287 &mut self,
288 objective: Arc<dyn ObjectiveFunction>,
289 ) -> QuantRS2Result<OptimizationResult> {
290 let start_time = std::time::Instant::now();
291
292 let initial_params: Vec<f64> = self
294 .circuit_template
295 .parameters
296 .iter()
297 .map(|p| p.initial_value)
298 .collect();
299
300 let bounds = objective.bounds();
302 if bounds.len() != initial_params.len() {
303 return Err(QuantRS2Error::InvalidInput(
304 "Parameter count mismatch with bounds".to_string(),
305 ));
306 }
307
308 let result = match &self.config.algorithm {
310 OptimizationAlgorithm::GradientDescent {
311 learning_rate,
312 momentum,
313 } => self.optimize_gradient_descent(
314 objective,
315 &initial_params,
316 *learning_rate,
317 *momentum,
318 ),
319 OptimizationAlgorithm::Adam {
320 learning_rate,
321 beta1,
322 beta2,
323 epsilon,
324 } => self.optimize_adam(
325 objective,
326 &initial_params,
327 *learning_rate,
328 *beta1,
329 *beta2,
330 *epsilon,
331 ),
332 OptimizationAlgorithm::LBFGSB {
333 max_iterations,
334 tolerance,
335 } => self.optimize_lbfgs(objective, &initial_params, *max_iterations, *tolerance),
336 OptimizationAlgorithm::NelderMead {
337 max_iterations,
338 tolerance,
339 } => self.optimize_nelder_mead(objective, &initial_params, *max_iterations, *tolerance),
340 OptimizationAlgorithm::SimulatedAnnealing {
341 initial_temperature,
342 cooling_rate,
343 min_temperature,
344 } => self.optimize_simulated_annealing(
345 objective,
346 &initial_params,
347 *initial_temperature,
348 *cooling_rate,
349 *min_temperature,
350 ),
351 OptimizationAlgorithm::BayesianOptimization {
352 acquisition_function,
353 kernel,
354 num_initial_samples,
355 } => self.optimize_bayesian(
356 objective,
357 &initial_params,
358 acquisition_function,
359 kernel,
360 *num_initial_samples,
361 ),
362 _ => Err(QuantRS2Error::InvalidInput(
363 "Algorithm not yet implemented".to_string(),
364 )),
365 }?;
366
367 let history = self
368 .history
369 .lock()
370 .map_err(|e| QuantRS2Error::RuntimeError(format!("Failed to lock history: {}", e)))?
371 .clone();
372
373 Ok(OptimizationResult {
374 optimal_parameters: result.0,
375 optimal_value: result.1,
376 num_evaluations: result.2,
377 converged: result.3,
378 history,
379 algorithm_info: HashMap::new(),
380 optimization_time: start_time.elapsed(),
381 })
382 }
383
384 fn optimize_gradient_descent(
386 &self,
387 objective: Arc<dyn ObjectiveFunction>,
388 initial_params: &[f64],
389 learning_rate: f64,
390 momentum: f64,
391 ) -> QuantRS2Result<(Vec<f64>, f64, usize, bool)> {
392 let mut params = initial_params.to_vec();
393 let mut velocity = vec![0.0; params.len()];
394 let mut evaluations = 0;
395 let mut best_value = f64::INFINITY;
396
397 for iteration in 0..self.config.max_evaluations {
398 let value = objective.evaluate(¶ms);
400 evaluations += 1;
401
402 if value < best_value {
404 best_value = value;
405 if let Ok(mut guard) = self.best_parameters.lock() {
406 *guard = Some(params.clone());
407 }
408 if let Ok(mut guard) = self.best_value.lock() {
409 *guard = best_value;
410 }
411 }
412
413 self.record_iteration(¶ms, value, iteration);
415
416 if iteration > 0 {
418 let prev_value = self
419 .history
420 .lock()
421 .ok()
422 .and_then(|h| h.objective_values.get(iteration - 1).copied())
423 .unwrap_or(value);
424 if (prev_value - value).abs() < self.config.tolerance {
425 return Ok((params, best_value, evaluations, true));
426 }
427 }
428
429 let gradient = if let Some(grad) = objective.gradient(¶ms) {
431 grad
432 } else {
433 self.numerical_gradient(&*objective, ¶ms)?
434 };
435
436 for i in 0..params.len() {
438 velocity[i] = momentum.mul_add(velocity[i], -(learning_rate * gradient[i]));
439 params[i] += velocity[i];
440
441 let bounds = objective.bounds();
443 params[i] = params[i].max(bounds[i].0).min(bounds[i].1);
444 }
445
446 if let Some(callback) = &self.config.progress_callback {
448 callback(iteration, value);
449 }
450 }
451
452 Ok((params, best_value, evaluations, false))
453 }
454
455 fn optimize_adam(
457 &self,
458 objective: Arc<dyn ObjectiveFunction>,
459 initial_params: &[f64],
460 learning_rate: f64,
461 beta1: f64,
462 beta2: f64,
463 epsilon: f64,
464 ) -> QuantRS2Result<(Vec<f64>, f64, usize, bool)> {
465 let mut params = initial_params.to_vec();
466 let mut m = vec![0.0; params.len()]; let mut v = vec![0.0; params.len()]; let mut evaluations = 0;
469 let mut best_value = f64::INFINITY;
470
471 for iteration in 0..self.config.max_evaluations {
472 let t = iteration + 1;
473
474 let value = objective.evaluate(¶ms);
476 evaluations += 1;
477
478 if value < best_value {
480 best_value = value;
481 if let Ok(mut guard) = self.best_parameters.lock() {
482 *guard = Some(params.clone());
483 }
484 if let Ok(mut guard) = self.best_value.lock() {
485 *guard = best_value;
486 }
487 }
488
489 self.record_iteration(¶ms, value, iteration);
491
492 if iteration > 0 {
494 let prev_value = self
495 .history
496 .lock()
497 .ok()
498 .and_then(|h| h.objective_values.get(iteration - 1).copied())
499 .unwrap_or(value);
500 if (prev_value - value).abs() < self.config.tolerance {
501 return Ok((params, best_value, evaluations, true));
502 }
503 }
504
505 let gradient = if let Some(grad) = objective.gradient(¶ms) {
507 grad
508 } else {
509 self.numerical_gradient(&*objective, ¶ms)?
510 };
511
512 for i in 0..params.len() {
514 m[i] = beta1.mul_add(m[i], (1.0 - beta1) * gradient[i]);
515 v[i] = beta2.mul_add(v[i], (1.0 - beta2) * gradient[i] * gradient[i]);
516
517 let m_hat = m[i] / (1.0 - beta1.powi(t as i32));
519 let v_hat = v[i] / (1.0 - beta2.powi(t as i32));
520
521 params[i] -= learning_rate * m_hat / (v_hat.sqrt() + epsilon);
523
524 let bounds = objective.bounds();
526 params[i] = params[i].max(bounds[i].0).min(bounds[i].1);
527 }
528
529 if let Some(callback) = &self.config.progress_callback {
531 callback(iteration, value);
532 }
533 }
534
535 Ok((params, best_value, evaluations, false))
536 }
537
538 fn optimize_lbfgs(
540 &self,
541 objective: Arc<dyn ObjectiveFunction>,
542 initial_params: &[f64],
543 max_iterations: usize,
544 tolerance: f64,
545 ) -> QuantRS2Result<(Vec<f64>, f64, usize, bool)> {
546 self.optimize_gradient_descent(objective, initial_params, 0.01, 0.9)
549 }
550
551 fn optimize_nelder_mead(
553 &self,
554 objective: Arc<dyn ObjectiveFunction>,
555 initial_params: &[f64],
556 max_iterations: usize,
557 tolerance: f64,
558 ) -> QuantRS2Result<(Vec<f64>, f64, usize, bool)> {
559 let n = initial_params.len();
560 let mut simplex = Vec::new();
561 let mut evaluations = 0;
562
563 simplex.push(initial_params.to_vec());
565 for i in 0..n {
566 let mut vertex = initial_params.to_vec();
567 vertex[i] += if vertex[i] == 0.0 {
568 0.00025
569 } else {
570 vertex[i] * 0.05
571 };
572 simplex.push(vertex);
573 }
574
575 let mut values: Vec<f64> = simplex
577 .iter()
578 .map(|params| {
579 evaluations += 1;
580 objective.evaluate(params)
581 })
582 .collect();
583
584 for iteration in 0..max_iterations {
585 let mut indices: Vec<usize> = (0..simplex.len()).collect();
587 indices.sort_by(|&i, &j| {
588 values[i]
589 .partial_cmp(&values[j])
590 .unwrap_or(std::cmp::Ordering::Equal)
591 });
592
593 let best_value = values[indices[0]];
594 let worst_idx = indices[n];
595 let second_worst_idx = indices[n - 1];
596
597 self.record_iteration(&simplex[indices[0]], best_value, iteration);
599
600 let range = values[worst_idx] - values[indices[0]];
602 if range < tolerance {
603 return Ok((simplex[indices[0]].clone(), best_value, evaluations, true));
604 }
605
606 let mut centroid = vec![0.0; n];
608 for i in 0..n {
609 for j in 0..n {
610 centroid[j] += simplex[indices[i]][j];
611 }
612 }
613 for j in 0..n {
614 centroid[j] /= n as f64;
615 }
616
617 let alpha = 1.0;
619 let mut reflected = vec![0.0; n];
620 for j in 0..n {
621 reflected[j] = centroid[j] + alpha * (centroid[j] - simplex[worst_idx][j]);
622 }
623
624 let bounds = objective.bounds();
626 for j in 0..n {
627 reflected[j] = reflected[j].max(bounds[j].0).min(bounds[j].1);
628 }
629
630 let reflected_value = objective.evaluate(&reflected);
631 evaluations += 1;
632
633 if values[indices[0]] <= reflected_value && reflected_value < values[second_worst_idx] {
634 simplex[worst_idx] = reflected;
636 values[worst_idx] = reflected_value;
637 } else if reflected_value < values[indices[0]] {
638 let gamma = 2.0;
640 let mut expanded = vec![0.0; n];
641 for j in 0..n {
642 expanded[j] = centroid[j] + gamma * (reflected[j] - centroid[j]);
643 expanded[j] = expanded[j].max(bounds[j].0).min(bounds[j].1);
644 }
645
646 let expanded_value = objective.evaluate(&expanded);
647 evaluations += 1;
648
649 if expanded_value < reflected_value {
650 simplex[worst_idx] = expanded;
651 values[worst_idx] = expanded_value;
652 } else {
653 simplex[worst_idx] = reflected;
654 values[worst_idx] = reflected_value;
655 }
656 } else {
657 let rho = 0.5;
659 let mut contracted = vec![0.0; n];
660 for j in 0..n {
661 contracted[j] = centroid[j] + rho * (simplex[worst_idx][j] - centroid[j]);
662 contracted[j] = contracted[j].max(bounds[j].0).min(bounds[j].1);
663 }
664
665 let contracted_value = objective.evaluate(&contracted);
666 evaluations += 1;
667
668 if contracted_value < values[worst_idx] {
669 simplex[worst_idx] = contracted;
670 values[worst_idx] = contracted_value;
671 } else {
672 let sigma = 0.5;
674 for i in 1..=n {
675 for j in 0..n {
676 simplex[i][j] = simplex[indices[0]][j]
677 + sigma * (simplex[i][j] - simplex[indices[0]][j]);
678 simplex[i][j] = simplex[i][j].max(bounds[j].0).min(bounds[j].1);
679 }
680 values[i] = objective.evaluate(&simplex[i]);
681 evaluations += 1;
682 }
683 }
684 }
685
686 if let Some(callback) = &self.config.progress_callback {
688 callback(iteration, best_value);
689 }
690 }
691
692 let mut best_idx = 0;
694 let mut best_value = values[0];
695 for i in 1..values.len() {
696 if values[i] < best_value {
697 best_value = values[i];
698 best_idx = i;
699 }
700 }
701
702 Ok((simplex[best_idx].clone(), best_value, evaluations, false))
703 }
704
705 fn optimize_simulated_annealing(
707 &self,
708 objective: Arc<dyn ObjectiveFunction>,
709 initial_params: &[f64],
710 initial_temperature: f64,
711 cooling_rate: f64,
712 min_temperature: f64,
713 ) -> QuantRS2Result<(Vec<f64>, f64, usize, bool)> {
714 use scirs2_core::random::prelude::*;
715 let mut rng = thread_rng();
716
717 let mut current_params = initial_params.to_vec();
718 let mut current_value = objective.evaluate(¤t_params);
719 let mut best_params = current_params.clone();
720 let mut best_value = current_value;
721 let mut temperature = initial_temperature;
722 let mut evaluations = 1;
723
724 let bounds = objective.bounds();
725
726 for iteration in 0..self.config.max_evaluations {
727 if temperature < min_temperature {
728 break;
729 }
730
731 let mut neighbor_params = current_params.clone();
733 for i in 0..neighbor_params.len() {
734 let range = bounds[i].1 - bounds[i].0;
735 let step = rng.gen_range(-0.1..0.1) * range * temperature / initial_temperature;
736 neighbor_params[i] = (neighbor_params[i] + step)
737 .max(bounds[i].0)
738 .min(bounds[i].1);
739 }
740
741 let neighbor_value = objective.evaluate(&neighbor_params);
742 evaluations += 1;
743
744 let delta = neighbor_value - current_value;
746 if delta < 0.0 || rng.gen::<f64>() < (-delta / temperature).exp() {
747 current_params = neighbor_params;
748 current_value = neighbor_value;
749
750 if current_value < best_value {
751 best_params.clone_from(¤t_params);
752 best_value = current_value;
753 }
754 }
755
756 self.record_iteration(¤t_params, current_value, iteration);
758
759 temperature *= cooling_rate;
761
762 if let Some(callback) = &self.config.progress_callback {
764 callback(iteration, best_value);
765 }
766 }
767
768 Ok((
769 best_params,
770 best_value,
771 evaluations,
772 temperature < min_temperature,
773 ))
774 }
775
776 fn optimize_bayesian(
778 &self,
779 objective: Arc<dyn ObjectiveFunction>,
780 initial_params: &[f64],
781 acquisition_function: &AcquisitionFunction,
782 kernel: &KernelType,
783 num_initial_samples: usize,
784 ) -> QuantRS2Result<(Vec<f64>, f64, usize, bool)> {
785 self.optimize_nelder_mead(
788 objective,
789 initial_params,
790 self.config.max_evaluations,
791 self.config.tolerance,
792 )
793 }
794
795 fn numerical_gradient(
797 &self,
798 objective: &dyn ObjectiveFunction,
799 params: &[f64],
800 ) -> QuantRS2Result<Vec<f64>> {
801 let epsilon = 1e-8;
802 let mut gradient = vec![0.0; params.len()];
803
804 for i in 0..params.len() {
805 let mut params_plus = params.to_vec();
806 let mut params_minus = params.to_vec();
807
808 params_plus[i] += epsilon;
809 params_minus[i] -= epsilon;
810
811 let f_plus = objective.evaluate(¶ms_plus);
812 let f_minus = objective.evaluate(¶ms_minus);
813
814 gradient[i] = (f_plus - f_minus) / (2.0 * epsilon);
815 }
816
817 Ok(gradient)
818 }
819
820 fn record_iteration(&self, params: &[f64], value: f64, iteration: usize) {
822 if let Ok(mut history) = self.history.lock() {
823 history.parameters.push(params.to_vec());
824 history.objective_values.push(value);
825 history.gradient_norms.push(0.0); history.step_sizes.push(0.0); history.timestamps.push(std::time::Instant::now());
828 }
829 }
830
831 #[must_use]
833 pub fn get_best_parameters(&self) -> Option<Vec<f64>> {
834 self.best_parameters.lock().ok().and_then(|g| g.clone())
835 }
836
837 #[must_use]
839 pub fn get_best_value(&self) -> f64 {
840 self.best_value.lock().ok().map_or(f64::INFINITY, |g| *g)
841 }
842
843 pub fn build_circuit(&self, parameters: &[f64]) -> QuantRS2Result<Circuit<32>> {
845 if parameters.len() != self.circuit_template.parameters.len() {
846 return Err(QuantRS2Error::InvalidInput(
847 "Parameter count mismatch".to_string(),
848 ));
849 }
850
851 let mut circuit = Circuit::<32>::new();
853
854 for gate_template in &self.circuit_template.structure {
856 }
859
860 Ok(circuit)
861 }
862}
863
864pub struct VQEObjective {
866 hamiltonian: SparseMatrix,
868 circuit_template: CircuitTemplate,
870 bounds: Vec<(f64, f64)>,
872}
873
874impl VQEObjective {
875 #[must_use]
877 pub fn new(hamiltonian: SparseMatrix, circuit_template: CircuitTemplate) -> Self {
878 let bounds = circuit_template
879 .parameters
880 .iter()
881 .map(|p| (p.lower_bound, p.upper_bound))
882 .collect();
883
884 Self {
885 hamiltonian,
886 circuit_template,
887 bounds,
888 }
889 }
890}
891
892impl ObjectiveFunction for VQEObjective {
893 fn evaluate(&self, parameters: &[f64]) -> f64 {
894 parameters.iter().map(|x| x * x).sum::<f64>()
905 }
906
907 fn bounds(&self) -> Vec<(f64, f64)> {
908 self.bounds.clone()
909 }
910
911 fn name(&self) -> &'static str {
912 "VQE"
913 }
914}
915
916pub struct QAOAObjective {
918 problem_hamiltonian: SparseMatrix,
920 mixer_hamiltonian: SparseMatrix,
922 num_layers: usize,
924 bounds: Vec<(f64, f64)>,
926}
927
928impl QAOAObjective {
929 #[must_use]
931 pub fn new(
932 problem_hamiltonian: SparseMatrix,
933 mixer_hamiltonian: SparseMatrix,
934 num_layers: usize,
935 ) -> Self {
936 let bounds = vec![(0.0, 2.0 * std::f64::consts::PI); 2 * num_layers];
938
939 Self {
940 problem_hamiltonian,
941 mixer_hamiltonian,
942 num_layers,
943 bounds,
944 }
945 }
946}
947
948impl ObjectiveFunction for QAOAObjective {
949 fn evaluate(&self, parameters: &[f64]) -> f64 {
950 parameters.iter().map(|x| x.sin().powi(2)).sum::<f64>()
956 }
957
958 fn bounds(&self) -> Vec<(f64, f64)> {
959 self.bounds.clone()
960 }
961
962 fn name(&self) -> &'static str {
963 "QAOA"
964 }
965}
966
967impl Default for OptimizationConfig {
968 fn default() -> Self {
969 Self {
970 algorithm: OptimizationAlgorithm::Adam {
971 learning_rate: 0.01,
972 beta1: 0.9,
973 beta2: 0.999,
974 epsilon: 1e-8,
975 },
976 max_evaluations: 1000,
977 tolerance: 1e-6,
978 seed: None,
979 parallel: false,
980 num_threads: None,
981 progress_callback: None,
982 early_stopping: None,
983 }
984 }
985}
986
987#[cfg(test)]
988mod tests {
989 use super::*;
990
991 #[test]
992 fn test_optimization_config_creation() {
993 let config = OptimizationConfig::default();
994 assert_eq!(config.max_evaluations, 1000);
995 assert_eq!(config.tolerance, 1e-6);
996 }
997
998 #[test]
999 fn test_vqe_objective() {
1000 let hamiltonian = SparseMatrix::identity(4);
1001 let template = CircuitTemplate {
1002 structure: Vec::new(),
1003 parameters: vec![Parameter {
1004 name: "theta".to_string(),
1005 lower_bound: 0.0,
1006 upper_bound: 2.0 * std::f64::consts::PI,
1007 initial_value: 0.5,
1008 discrete: false,
1009 }],
1010 num_qubits: 2,
1011 };
1012
1013 let objective = VQEObjective::new(hamiltonian, template);
1014 let value = objective.evaluate(&[0.5]);
1015 assert!(value >= 0.0);
1016 }
1017
1018 #[test]
1019 fn test_qaoa_objective() {
1020 let problem_h = SparseMatrix::identity(4);
1021 let mixer_h = SparseMatrix::identity(4);
1022
1023 let objective = QAOAObjective::new(problem_h, mixer_h, 2);
1024 assert_eq!(objective.bounds().len(), 4); let value = objective.evaluate(&[0.5, 1.0, 1.5, 2.0]);
1027 assert!(value >= 0.0);
1028 }
1029
1030 #[test]
1031 fn test_circuit_template() {
1032 let template = CircuitTemplate {
1033 structure: vec![ParameterizedGate {
1034 gate_name: "RY".to_string(),
1035 qubits: vec![0],
1036 parameter_indices: vec![0],
1037 fixed_parameters: Vec::new(),
1038 }],
1039 parameters: vec![Parameter {
1040 name: "theta".to_string(),
1041 lower_bound: 0.0,
1042 upper_bound: 2.0 * std::f64::consts::PI,
1043 initial_value: 0.0,
1044 discrete: false,
1045 }],
1046 num_qubits: 1,
1047 };
1048
1049 assert_eq!(template.parameters.len(), 1);
1050 assert_eq!(template.structure.len(), 1);
1051 }
1052
1053 struct TestObjective;
1054
1055 impl ObjectiveFunction for TestObjective {
1056 fn evaluate(&self, parameters: &[f64]) -> f64 {
1057 parameters.iter().map(|x| (x - 1.0).powi(2)).sum()
1058 }
1059
1060 fn bounds(&self) -> Vec<(f64, f64)> {
1061 vec![(-5.0, 5.0); 2]
1062 }
1063
1064 fn name(&self) -> &'static str {
1065 "test"
1066 }
1067 }
1068
1069 #[test]
1070 fn test_optimizer_creation() {
1071 let template = CircuitTemplate {
1072 structure: Vec::new(),
1073 parameters: vec![
1074 Parameter {
1075 name: "x1".to_string(),
1076 lower_bound: -5.0,
1077 upper_bound: 5.0,
1078 initial_value: 0.0,
1079 discrete: false,
1080 },
1081 Parameter {
1082 name: "x2".to_string(),
1083 lower_bound: -5.0,
1084 upper_bound: 5.0,
1085 initial_value: 0.0,
1086 discrete: false,
1087 },
1088 ],
1089 num_qubits: 1,
1090 };
1091
1092 let config = OptimizationConfig::default();
1093 let optimizer = QuantumCircuitOptimizer::new(template, config);
1094
1095 assert_eq!(optimizer.circuit_template.parameters.len(), 2);
1096 }
1097}