1use crate::error::{Result, SimulatorError};
8use crate::pauli::{PauliOperatorSum, PauliString};
9use crate::statevector::StateVectorSimulator;
10use quantrs2_core::gate::GateOp;
11use scirs2_core::ndarray::{Array1, Array2};
12use scirs2_core::random::prelude::*;
13use scirs2_core::Complex64;
14use std::f64::consts::PI;
15
16#[cfg(feature = "optimize")]
17use crate::optirs_integration::{OptiRSConfig, OptiRSQuantumOptimizer};
18
19#[derive(Debug, Clone, Copy)]
21pub enum GradientMethod {
22 ParameterShift,
24 FiniteDifference { step_size: f64 },
26 SPSA { step_size: f64 },
28}
29
30#[derive(Debug, Clone)]
32pub struct AutoDiffContext {
33 pub parameters: Vec<f64>,
35 pub parameter_names: Vec<String>,
37 pub method: GradientMethod,
39 pub gradients: Vec<f64>,
41 pub grad_evaluations: usize,
43 pub func_evaluations: usize,
45}
46
47impl AutoDiffContext {
48 pub fn new(parameters: Vec<f64>, method: GradientMethod) -> Self {
50 let num_params = parameters.len();
51 Self {
52 parameters,
53 parameter_names: (0..num_params).map(|i| format!("θ{i}")).collect(),
54 method,
55 gradients: vec![0.0; num_params],
56 grad_evaluations: 0,
57 func_evaluations: 0,
58 }
59 }
60
61 pub fn with_parameter_names(mut self, names: Vec<String>) -> Self {
63 assert_eq!(names.len(), self.parameters.len());
64 self.parameter_names = names;
65 self
66 }
67
68 pub fn update_parameters(&mut self, new_params: Vec<f64>) {
70 assert_eq!(new_params.len(), self.parameters.len());
71 self.parameters = new_params;
72 }
73
74 pub fn get_parameter(&self, name: &str) -> Option<f64> {
76 self.parameter_names
77 .iter()
78 .position(|n| n == name)
79 .map(|i| self.parameters[i])
80 }
81
82 pub fn set_parameter(&mut self, name: &str, value: f64) -> Result<()> {
84 if let Some(i) = self.parameter_names.iter().position(|n| n == name) {
85 self.parameters[i] = value;
86 Ok(())
87 } else {
88 Err(SimulatorError::InvalidInput(format!(
89 "Parameter '{name}' not found"
90 )))
91 }
92 }
93}
94
95pub trait ParametricGate: Send + Sync {
97 fn name(&self) -> &str;
99
100 fn qubits(&self) -> Vec<usize>;
102
103 fn parameter_indices(&self) -> Vec<usize>;
105
106 fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>>;
108
109 fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>>;
111
112 fn parameter_shift_gradient(
114 &self,
115 params: &[f64],
116 param_idx: usize,
117 ) -> Result<(Array2<Complex64>, Array2<Complex64>)> {
118 let shift = PI / 2.0;
119 let mut params_plus = params.to_vec();
120 let mut params_minus = params.to_vec();
121
122 if param_idx < params.len() {
123 params_plus[param_idx] += shift;
124 params_minus[param_idx] -= shift;
125 }
126
127 let matrix_plus = self.matrix(¶ms_plus)?;
128 let matrix_minus = self.matrix(¶ms_minus)?;
129
130 Ok((matrix_plus, matrix_minus))
131 }
132}
133
134pub struct ParametricRX {
136 pub qubit: usize,
137 pub param_idx: usize,
138}
139
140impl ParametricGate for ParametricRX {
141 fn name(&self) -> &'static str {
142 "RX"
143 }
144
145 fn qubits(&self) -> Vec<usize> {
146 vec![self.qubit]
147 }
148
149 fn parameter_indices(&self) -> Vec<usize> {
150 vec![self.param_idx]
151 }
152
153 fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
154 let theta = params[self.param_idx];
155 let cos_half = (theta / 2.0).cos();
156 let sin_half = (theta / 2.0).sin();
157
158 Ok(scirs2_core::ndarray::array![
159 [Complex64::new(cos_half, 0.), Complex64::new(0., -sin_half)],
160 [Complex64::new(0., -sin_half), Complex64::new(cos_half, 0.)]
161 ])
162 }
163
164 fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
165 if param_idx != self.param_idx {
166 return Ok(Array2::zeros((2, 2)));
167 }
168
169 let theta = params[self.param_idx];
170 let cos_half = (theta / 2.0).cos();
171 let sin_half = (theta / 2.0).sin();
172
173 Ok(scirs2_core::ndarray::array![
175 [
176 Complex64::new(-sin_half / 2.0, 0.),
177 Complex64::new(0., -cos_half / 2.0)
178 ],
179 [
180 Complex64::new(0., -cos_half / 2.0),
181 Complex64::new(-sin_half / 2.0, 0.)
182 ]
183 ])
184 }
185}
186
187pub struct ParametricRY {
188 pub qubit: usize,
189 pub param_idx: usize,
190}
191
192impl ParametricGate for ParametricRY {
193 fn name(&self) -> &'static str {
194 "RY"
195 }
196
197 fn qubits(&self) -> Vec<usize> {
198 vec![self.qubit]
199 }
200
201 fn parameter_indices(&self) -> Vec<usize> {
202 vec![self.param_idx]
203 }
204
205 fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
206 let theta = params[self.param_idx];
207 let cos_half = (theta / 2.0).cos();
208 let sin_half = (theta / 2.0).sin();
209
210 Ok(scirs2_core::ndarray::array![
211 [Complex64::new(cos_half, 0.), Complex64::new(-sin_half, 0.)],
212 [Complex64::new(sin_half, 0.), Complex64::new(cos_half, 0.)]
213 ])
214 }
215
216 fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
217 if param_idx != self.param_idx {
218 return Ok(Array2::zeros((2, 2)));
219 }
220
221 let theta = params[self.param_idx];
222 let cos_half = (theta / 2.0).cos();
223 let sin_half = (theta / 2.0).sin();
224
225 Ok(scirs2_core::ndarray::array![
226 [
227 Complex64::new(-sin_half / 2.0, 0.),
228 Complex64::new(-cos_half / 2.0, 0.)
229 ],
230 [
231 Complex64::new(cos_half / 2.0, 0.),
232 Complex64::new(-sin_half / 2.0, 0.)
233 ]
234 ])
235 }
236}
237
238pub struct ParametricRZ {
239 pub qubit: usize,
240 pub param_idx: usize,
241}
242
243impl ParametricGate for ParametricRZ {
244 fn name(&self) -> &'static str {
245 "RZ"
246 }
247
248 fn qubits(&self) -> Vec<usize> {
249 vec![self.qubit]
250 }
251
252 fn parameter_indices(&self) -> Vec<usize> {
253 vec![self.param_idx]
254 }
255
256 fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
257 let theta = params[self.param_idx];
258 let exp_pos = Complex64::from_polar(1.0, theta / 2.0);
259 let exp_neg = Complex64::from_polar(1.0, -theta / 2.0);
260
261 Ok(scirs2_core::ndarray::array![
262 [exp_neg, Complex64::new(0., 0.)],
263 [Complex64::new(0., 0.), exp_pos]
264 ])
265 }
266
267 fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
268 if param_idx != self.param_idx {
269 return Ok(Array2::zeros((2, 2)));
270 }
271
272 let theta = params[self.param_idx];
273 let exp_pos = Complex64::from_polar(1.0, theta / 2.0);
274 let exp_neg = Complex64::from_polar(1.0, -theta / 2.0);
275
276 Ok(scirs2_core::ndarray::array![
277 [exp_neg * Complex64::new(0., -0.5), Complex64::new(0., 0.)],
278 [Complex64::new(0., 0.), exp_pos * Complex64::new(0., 0.5)]
279 ])
280 }
281}
282
283pub struct ParametricCircuit {
285 pub gates: Vec<Box<dyn ParametricGate>>,
287 pub num_qubits: usize,
289 pub num_parameters: usize,
291}
292
293impl ParametricCircuit {
294 pub fn new(num_qubits: usize) -> Self {
296 Self {
297 gates: Vec::new(),
298 num_qubits,
299 num_parameters: 0,
300 }
301 }
302
303 pub fn add_gate(&mut self, gate: Box<dyn ParametricGate>) {
305 for ¶m_idx in &gate.parameter_indices() {
307 self.num_parameters = self.num_parameters.max(param_idx + 1);
308 }
309 self.gates.push(gate);
310 }
311
312 pub fn rx(&mut self, qubit: usize, param_idx: usize) {
314 self.add_gate(Box::new(ParametricRX { qubit, param_idx }));
315 }
316
317 pub fn ry(&mut self, qubit: usize, param_idx: usize) {
319 self.add_gate(Box::new(ParametricRY { qubit, param_idx }));
320 }
321
322 pub fn rz(&mut self, qubit: usize, param_idx: usize) {
324 self.add_gate(Box::new(ParametricRZ { qubit, param_idx }));
325 }
326
327 pub fn evaluate(&self, params: &[f64]) -> Result<Array1<Complex64>> {
329 if params.len() != self.num_parameters {
330 return Err(SimulatorError::InvalidInput(format!(
331 "Expected {} parameters, got {}",
332 self.num_parameters,
333 params.len()
334 )));
335 }
336
337 let mut simulator = StateVectorSimulator::new();
339
340 for gate in &self.gates {
342 let matrix = gate.matrix(params)?;
343 let qubits = gate.qubits();
344
345 if qubits.len() == 1 {
346 } else if qubits.len() == 2 {
349 }
351 }
352
353 let mut state = Array1::zeros(1 << self.num_qubits);
355 state[0] = Complex64::new(1.0, 0.0); Ok(state)
357 }
358
359 pub fn gradient_expectation(
361 &self,
362 observable: &PauliOperatorSum,
363 params: &[f64],
364 method: GradientMethod,
365 ) -> Result<Vec<f64>> {
366 match method {
367 GradientMethod::ParameterShift => self.parameter_shift_gradient(observable, params),
368 GradientMethod::FiniteDifference { step_size } => {
369 self.finite_difference_gradient(observable, params, step_size)
370 }
371 GradientMethod::SPSA { step_size } => self.spsa_gradient(observable, params, step_size),
372 }
373 }
374
375 fn parameter_shift_gradient(
377 &self,
378 observable: &PauliOperatorSum,
379 params: &[f64],
380 ) -> Result<Vec<f64>> {
381 let mut gradients = vec![0.0; self.num_parameters];
382
383 for param_idx in 0..self.num_parameters {
386 let shift = PI / 2.0;
387
388 let mut params_plus = params.to_vec();
390 params_plus[param_idx] += shift;
391 let state_plus = self.evaluate(¶ms_plus)?;
392 let expectation_plus = compute_expectation_value(&state_plus, observable)?;
393
394 let mut params_minus = params.to_vec();
396 params_minus[param_idx] -= shift;
397 let state_minus = self.evaluate(¶ms_minus)?;
398 let expectation_minus = compute_expectation_value(&state_minus, observable)?;
399
400 gradients[param_idx] = (expectation_plus - expectation_minus) / 2.0;
402 }
403
404 Ok(gradients)
405 }
406
407 fn finite_difference_gradient(
409 &self,
410 observable: &PauliOperatorSum,
411 params: &[f64],
412 step_size: f64,
413 ) -> Result<Vec<f64>> {
414 let mut gradients = vec![0.0; self.num_parameters];
415
416 for param_idx in 0..self.num_parameters {
417 let mut params_plus = params.to_vec();
419 params_plus[param_idx] += step_size;
420 let state_plus = self.evaluate(¶ms_plus)?;
421 let expectation_plus = compute_expectation_value(&state_plus, observable)?;
422
423 let state = self.evaluate(params)?;
425 let expectation = compute_expectation_value(&state, observable)?;
426
427 gradients[param_idx] = (expectation_plus - expectation) / step_size;
428 }
429
430 Ok(gradients)
431 }
432
433 fn spsa_gradient(
435 &self,
436 observable: &PauliOperatorSum,
437 params: &[f64],
438 step_size: f64,
439 ) -> Result<Vec<f64>> {
440 let mut rng = thread_rng();
441
442 let mut perturbation = vec![0.0; self.num_parameters];
444 for p in &mut perturbation {
445 *p = if rng.gen::<bool>() { 1.0 } else { -1.0 };
446 }
447
448 let mut params_plus = params.to_vec();
450 let mut params_minus = params.to_vec();
451 for i in 0..self.num_parameters {
452 params_plus[i] += step_size * perturbation[i];
453 params_minus[i] -= step_size * perturbation[i];
454 }
455
456 let state_plus = self.evaluate(¶ms_plus)?;
457 let state_minus = self.evaluate(¶ms_minus)?;
458 let expectation_plus = compute_expectation_value(&state_plus, observable)?;
459 let expectation_minus = compute_expectation_value(&state_minus, observable)?;
460
461 let diff = (expectation_plus - expectation_minus) / (2.0 * step_size);
463 let gradients = perturbation.iter().map(|&p| diff / p).collect();
464
465 Ok(gradients)
466 }
467}
468
469pub struct VQEWithAutodiff {
471 pub ansatz: ParametricCircuit,
473 pub hamiltonian: PauliOperatorSum,
475 pub context: AutoDiffContext,
477 pub history: Vec<VQEIteration>,
479 pub convergence: ConvergenceCriteria,
481}
482
483#[derive(Clone)]
485pub struct VQEIteration {
486 pub iteration: usize,
488 pub parameters: Vec<f64>,
490 pub energy: f64,
492 pub gradient_norm: f64,
494 pub func_evals: usize,
496 pub grad_evals: usize,
498}
499
500pub struct ConvergenceCriteria {
502 pub max_iterations: usize,
504 pub energy_tolerance: f64,
506 pub gradient_tolerance: f64,
508 pub max_func_evals: usize,
510}
511
512impl Default for ConvergenceCriteria {
513 fn default() -> Self {
514 Self {
515 max_iterations: 1000,
516 energy_tolerance: 1e-6,
517 gradient_tolerance: 1e-6,
518 max_func_evals: 10000,
519 }
520 }
521}
522
523impl VQEWithAutodiff {
524 pub fn new(
526 ansatz: ParametricCircuit,
527 hamiltonian: PauliOperatorSum,
528 initial_params: Vec<f64>,
529 gradient_method: GradientMethod,
530 ) -> Self {
531 let context = AutoDiffContext::new(initial_params, gradient_method);
532 Self {
533 ansatz,
534 hamiltonian,
535 context,
536 history: Vec::new(),
537 convergence: ConvergenceCriteria::default(),
538 }
539 }
540
541 pub const fn with_convergence(mut self, convergence: ConvergenceCriteria) -> Self {
543 self.convergence = convergence;
544 self
545 }
546
547 pub fn evaluate_energy(&mut self) -> Result<f64> {
549 let state = self.ansatz.evaluate(&self.context.parameters)?;
550 let energy = compute_expectation_value(&state, &self.hamiltonian)?;
551 self.context.func_evaluations += 1;
552 Ok(energy)
553 }
554
555 pub fn compute_gradient(&mut self) -> Result<Vec<f64>> {
557 let gradients = self.ansatz.gradient_expectation(
558 &self.hamiltonian,
559 &self.context.parameters,
560 self.context.method,
561 )?;
562 self.context.gradients = gradients.clone();
563 self.context.grad_evaluations += 1;
564 Ok(gradients)
565 }
566
567 pub fn step(&mut self, learning_rate: f64) -> Result<VQEIteration> {
569 let energy = self.evaluate_energy()?;
570 let gradients = self.compute_gradient()?;
571
572 for (i, &grad) in gradients.iter().enumerate() {
574 self.context.parameters[i] -= learning_rate * grad;
575 }
576
577 let gradient_norm = gradients.iter().map(|g| g * g).sum::<f64>().sqrt();
578
579 let iteration = VQEIteration {
580 iteration: self.history.len(),
581 parameters: self.context.parameters.clone(),
582 energy,
583 gradient_norm,
584 func_evals: self.context.func_evaluations,
585 grad_evals: self.context.grad_evaluations,
586 };
587
588 self.history.push(iteration.clone());
589 Ok(iteration)
590 }
591
592 pub fn optimize(&mut self, learning_rate: f64) -> Result<VQEResult> {
594 while !self.is_converged()? {
595 let iteration = self.step(learning_rate)?;
596
597 if iteration.iteration >= self.convergence.max_iterations {
598 break;
599 }
600 if iteration.func_evals >= self.convergence.max_func_evals {
601 break;
602 }
603 }
604
605 let final_iteration = self.history.last().unwrap();
606 Ok(VQEResult {
607 optimal_parameters: final_iteration.parameters.clone(),
608 optimal_energy: final_iteration.energy,
609 iterations: self.history.len(),
610 converged: self.is_converged()?,
611 history: self.history.clone(),
612 })
613 }
614
615 fn is_converged(&self) -> Result<bool> {
617 if self.history.len() < 2 {
618 return Ok(false);
619 }
620
621 let current = &self.history[self.history.len() - 1];
622 let previous = &self.history[self.history.len() - 2];
623
624 let energy_converged =
625 (current.energy - previous.energy).abs() < self.convergence.energy_tolerance;
626 let gradient_converged = current.gradient_norm < self.convergence.gradient_tolerance;
627
628 Ok(energy_converged && gradient_converged)
629 }
630
631 #[cfg(feature = "optimize")]
657 pub fn optimize_with_optirs(&mut self, config: OptiRSConfig) -> Result<VQEResult> {
658 use std::time::Instant;
659
660 let start_time = Instant::now();
661 let mut optimizer = OptiRSQuantumOptimizer::new(config)?;
662
663 while !self.is_converged()? && !optimizer.has_converged() {
664 let energy = self.evaluate_energy()?;
666 let gradients = self.compute_gradient()?;
667
668 let new_params =
670 optimizer.optimize_step(&self.context.parameters, &gradients, energy)?;
671
672 self.context.parameters = new_params;
674
675 let gradient_norm = gradients.iter().map(|g| g * g).sum::<f64>().sqrt();
677 let iteration = VQEIteration {
678 iteration: self.history.len(),
679 parameters: self.context.parameters.clone(),
680 energy,
681 gradient_norm,
682 func_evals: self.context.func_evaluations,
683 grad_evals: self.context.grad_evaluations,
684 };
685 self.history.push(iteration);
686
687 if self.history.len() >= self.convergence.max_iterations {
689 break;
690 }
691 if self.context.func_evaluations >= self.convergence.max_func_evals {
692 break;
693 }
694 }
695
696 let optimization_time = start_time.elapsed();
697 let final_iteration = self.history.last().unwrap();
698
699 Ok(VQEResult {
700 optimal_parameters: final_iteration.parameters.clone(),
701 optimal_energy: final_iteration.energy,
702 iterations: self.history.len(),
703 converged: self.is_converged()?,
704 history: self.history.clone(),
705 })
706 }
707}
708
709pub struct VQEResult {
711 pub optimal_parameters: Vec<f64>,
713 pub optimal_energy: f64,
715 pub iterations: usize,
717 pub converged: bool,
719 pub history: Vec<VQEIteration>,
721}
722
723fn compute_expectation_value(
727 state: &Array1<Complex64>,
728 observable: &PauliOperatorSum,
729) -> Result<f64> {
730 let mut expectation = 0.0;
731
732 for term in &observable.terms {
733 let pauli_expectation = compute_pauli_expectation_from_state(state, term)?;
735 expectation += term.coefficient.re * pauli_expectation.re;
736 }
737
738 Ok(expectation)
739}
740
741fn compute_pauli_expectation_from_state(
743 state: &Array1<Complex64>,
744 pauli_string: &PauliString,
745) -> Result<Complex64> {
746 let num_qubits = pauli_string.num_qubits;
747 let dim = 1 << num_qubits;
748 let mut result = Complex64::new(0.0, 0.0);
749
750 for (i, &litude) in state.iter().enumerate() {
751 if i >= dim {
752 break;
753 }
754
755 let mut coeff = Complex64::new(1.0, 0.0);
757 let mut target_state = i;
758
759 for (qubit, &pauli_op) in pauli_string.operators.iter().enumerate() {
760 let bit = (i >> qubit) & 1;
761 use crate::pauli::PauliOperator;
762
763 match pauli_op {
764 PauliOperator::I => {} PauliOperator::X => {
766 target_state ^= 1 << qubit;
768 }
769 PauliOperator::Y => {
770 target_state ^= 1 << qubit;
772 coeff *= if bit == 0 {
773 Complex64::new(0.0, 1.0)
774 } else {
775 Complex64::new(0.0, -1.0)
776 };
777 }
778 PauliOperator::Z => {
779 if bit == 1 {
781 coeff *= Complex64::new(-1.0, 0.0);
782 }
783 }
784 }
785 }
786
787 if target_state < dim {
788 result += amplitude.conj() * coeff * state[target_state];
789 }
790 }
791
792 Ok(result * pauli_string.coefficient)
793}
794
795pub mod ansatze {
797 use super::*;
798
799 pub fn hardware_efficient(num_qubits: usize, num_layers: usize) -> ParametricCircuit {
801 let mut circuit = ParametricCircuit::new(num_qubits);
802 let mut param_idx = 0;
803
804 for _layer in 0..num_layers {
805 for qubit in 0..num_qubits {
807 circuit.ry(qubit, param_idx);
808 param_idx += 1;
809 circuit.rz(qubit, param_idx);
810 param_idx += 1;
811 }
812
813 }
816
817 circuit
818 }
819
820 pub fn qaoa_maxcut(
822 num_qubits: usize,
823 num_layers: usize,
824 edges: &[(usize, usize)],
825 ) -> ParametricCircuit {
826 let mut circuit = ParametricCircuit::new(num_qubits);
827 let mut param_idx = 0;
828
829 for qubit in 0..num_qubits {
831 circuit.ry(qubit, param_idx); }
833
834 for _layer in 0..num_layers {
835 for &(i, j) in edges {
837 circuit.rz(i, param_idx);
840 circuit.rz(j, param_idx);
841 param_idx += 1;
842 }
843
844 for qubit in 0..num_qubits {
846 circuit.rx(qubit, param_idx);
847 param_idx += 1;
848 }
849 }
850
851 circuit
852 }
853}
854
855#[cfg(test)]
856mod tests {
857 use super::*;
858
859 #[test]
860 fn test_parametric_rx_matrix() {
861 let rx_gate = ParametricRX {
862 qubit: 0,
863 param_idx: 0,
864 };
865 let params = vec![PI / 2.0];
866 let matrix = rx_gate.matrix(¶ms).unwrap();
867
868 let expected_val = 1.0 / 2.0_f64.sqrt();
870 assert!((matrix[[0, 0]].re - expected_val).abs() < 1e-10);
871 assert!((matrix[[0, 1]].im + expected_val).abs() < 1e-10);
872 }
873
874 #[test]
875 fn test_autodiff_context() {
876 let params = vec![1.0, 2.0, 3.0];
877 let mut context = AutoDiffContext::new(params.clone(), GradientMethod::ParameterShift);
878
879 assert_eq!(context.parameters, params);
880 assert_eq!(context.gradients.len(), 3);
881
882 context.update_parameters(vec![4.0, 5.0, 6.0]);
883 assert_eq!(context.parameters, vec![4.0, 5.0, 6.0]);
884 }
885
886 #[test]
887 fn test_parametric_circuit_creation() {
888 let mut circuit = ParametricCircuit::new(2);
889 circuit.rx(0, 0);
890 circuit.ry(1, 1);
891
892 assert_eq!(circuit.gates.len(), 2);
893 assert_eq!(circuit.num_parameters, 2);
894 }
895
896 #[test]
897 fn test_hardware_efficient_ansatz() {
898 let ansatz = ansatze::hardware_efficient(3, 2);
899 assert_eq!(ansatz.num_qubits, 3);
900 assert!(ansatz.num_parameters > 0);
901 }
902}