quantrs2_sim/
autodiff_vqe.rs

1//! Automatic differentiation for Variational Quantum Eigensolver (VQE).
2//!
3//! This module implements automatic differentiation techniques specifically designed
4//! for variational quantum algorithms, including parameter-shift rule, finite differences,
5//! and optimization strategies for VQE.
6
7use crate::error::{Result, SimulatorError};
8use crate::pauli::{PauliOperatorSum, PauliString};
9use crate::statevector::StateVectorSimulator;
10use quantrs2_core::gate::GateOp;
11use scirs2_core::ndarray::{Array1, Array2};
12use scirs2_core::random::prelude::*;
13use scirs2_core::Complex64;
14use std::f64::consts::PI;
15
16#[cfg(feature = "optimize")]
17use crate::optirs_integration::{OptiRSConfig, OptiRSQuantumOptimizer};
18
19/// Gradient computation method
20#[derive(Debug, Clone, Copy)]
21pub enum GradientMethod {
22    /// Parameter-shift rule (exact for quantum gates)
23    ParameterShift,
24    /// Finite differences
25    FiniteDifference { step_size: f64 },
26    /// Simultaneous perturbation stochastic approximation
27    SPSA { step_size: f64 },
28}
29
30/// Automatic differentiation context for tracking gradients
31#[derive(Debug, Clone)]
32pub struct AutoDiffContext {
33    /// Parameter values
34    pub parameters: Vec<f64>,
35    /// Parameter names/indices
36    pub parameter_names: Vec<String>,
37    /// Gradient computation method
38    pub method: GradientMethod,
39    /// Current gradients
40    pub gradients: Vec<f64>,
41    /// Gradient computation count
42    pub grad_evaluations: usize,
43    /// Function evaluation count
44    pub func_evaluations: usize,
45}
46
47impl AutoDiffContext {
48    /// Create new autodiff context
49    #[must_use]
50    pub fn new(parameters: Vec<f64>, method: GradientMethod) -> Self {
51        let num_params = parameters.len();
52        Self {
53            parameters,
54            parameter_names: (0..num_params).map(|i| format!("θ{i}")).collect(),
55            method,
56            gradients: vec![0.0; num_params],
57            grad_evaluations: 0,
58            func_evaluations: 0,
59        }
60    }
61
62    /// Set parameter names
63    #[must_use]
64    pub fn with_parameter_names(mut self, names: Vec<String>) -> Self {
65        assert_eq!(names.len(), self.parameters.len());
66        self.parameter_names = names;
67        self
68    }
69
70    /// Update parameters
71    pub fn update_parameters(&mut self, new_params: Vec<f64>) {
72        assert_eq!(new_params.len(), self.parameters.len());
73        self.parameters = new_params;
74    }
75
76    /// Get parameter by name
77    #[must_use]
78    pub fn get_parameter(&self, name: &str) -> Option<f64> {
79        self.parameter_names
80            .iter()
81            .position(|n| n == name)
82            .map(|i| self.parameters[i])
83    }
84
85    /// Set parameter by name
86    pub fn set_parameter(&mut self, name: &str, value: f64) -> Result<()> {
87        if let Some(i) = self.parameter_names.iter().position(|n| n == name) {
88            self.parameters[i] = value;
89            Ok(())
90        } else {
91            Err(SimulatorError::InvalidInput(format!(
92                "Parameter '{name}' not found"
93            )))
94        }
95    }
96}
97
98/// Parametric quantum gate that supports automatic differentiation
99pub trait ParametricGate: Send + Sync {
100    /// Get gate name
101    fn name(&self) -> &str;
102
103    /// Get qubits this gate acts on
104    fn qubits(&self) -> Vec<usize>;
105
106    /// Get parameter indices this gate depends on
107    fn parameter_indices(&self) -> Vec<usize>;
108
109    /// Evaluate gate matrix given parameter values
110    fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>>;
111
112    /// Compute gradient of gate matrix with respect to each parameter
113    fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>>;
114
115    /// Apply parameter-shift rule for this gate
116    fn parameter_shift_gradient(
117        &self,
118        params: &[f64],
119        param_idx: usize,
120    ) -> Result<(Array2<Complex64>, Array2<Complex64>)> {
121        let shift = PI / 2.0;
122        let mut params_plus = params.to_vec();
123        let mut params_minus = params.to_vec();
124
125        if param_idx < params.len() {
126            params_plus[param_idx] += shift;
127            params_minus[param_idx] -= shift;
128        }
129
130        let matrix_plus = self.matrix(&params_plus)?;
131        let matrix_minus = self.matrix(&params_minus)?;
132
133        Ok((matrix_plus, matrix_minus))
134    }
135}
136
137/// Parametric rotation gates
138pub struct ParametricRX {
139    pub qubit: usize,
140    pub param_idx: usize,
141}
142
143impl ParametricGate for ParametricRX {
144    fn name(&self) -> &'static str {
145        "RX"
146    }
147
148    fn qubits(&self) -> Vec<usize> {
149        vec![self.qubit]
150    }
151
152    fn parameter_indices(&self) -> Vec<usize> {
153        vec![self.param_idx]
154    }
155
156    fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
157        let theta = params[self.param_idx];
158        let cos_half = (theta / 2.0).cos();
159        let sin_half = (theta / 2.0).sin();
160
161        Ok(scirs2_core::ndarray::array![
162            [Complex64::new(cos_half, 0.), Complex64::new(0., -sin_half)],
163            [Complex64::new(0., -sin_half), Complex64::new(cos_half, 0.)]
164        ])
165    }
166
167    fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
168        if param_idx != self.param_idx {
169            return Ok(Array2::zeros((2, 2)));
170        }
171
172        let theta = params[self.param_idx];
173        let cos_half = (theta / 2.0).cos();
174        let sin_half = (theta / 2.0).sin();
175
176        // d/dθ RX(θ) = -i/2 * X * RX(θ)
177        Ok(scirs2_core::ndarray::array![
178            [
179                Complex64::new(-sin_half / 2.0, 0.),
180                Complex64::new(0., -cos_half / 2.0)
181            ],
182            [
183                Complex64::new(0., -cos_half / 2.0),
184                Complex64::new(-sin_half / 2.0, 0.)
185            ]
186        ])
187    }
188}
189
190pub struct ParametricRY {
191    pub qubit: usize,
192    pub param_idx: usize,
193}
194
195impl ParametricGate for ParametricRY {
196    fn name(&self) -> &'static str {
197        "RY"
198    }
199
200    fn qubits(&self) -> Vec<usize> {
201        vec![self.qubit]
202    }
203
204    fn parameter_indices(&self) -> Vec<usize> {
205        vec![self.param_idx]
206    }
207
208    fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
209        let theta = params[self.param_idx];
210        let cos_half = (theta / 2.0).cos();
211        let sin_half = (theta / 2.0).sin();
212
213        Ok(scirs2_core::ndarray::array![
214            [Complex64::new(cos_half, 0.), Complex64::new(-sin_half, 0.)],
215            [Complex64::new(sin_half, 0.), Complex64::new(cos_half, 0.)]
216        ])
217    }
218
219    fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
220        if param_idx != self.param_idx {
221            return Ok(Array2::zeros((2, 2)));
222        }
223
224        let theta = params[self.param_idx];
225        let cos_half = (theta / 2.0).cos();
226        let sin_half = (theta / 2.0).sin();
227
228        Ok(scirs2_core::ndarray::array![
229            [
230                Complex64::new(-sin_half / 2.0, 0.),
231                Complex64::new(-cos_half / 2.0, 0.)
232            ],
233            [
234                Complex64::new(cos_half / 2.0, 0.),
235                Complex64::new(-sin_half / 2.0, 0.)
236            ]
237        ])
238    }
239}
240
241pub struct ParametricRZ {
242    pub qubit: usize,
243    pub param_idx: usize,
244}
245
246impl ParametricGate for ParametricRZ {
247    fn name(&self) -> &'static str {
248        "RZ"
249    }
250
251    fn qubits(&self) -> Vec<usize> {
252        vec![self.qubit]
253    }
254
255    fn parameter_indices(&self) -> Vec<usize> {
256        vec![self.param_idx]
257    }
258
259    fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
260        let theta = params[self.param_idx];
261        let exp_pos = Complex64::from_polar(1.0, theta / 2.0);
262        let exp_neg = Complex64::from_polar(1.0, -theta / 2.0);
263
264        Ok(scirs2_core::ndarray::array![
265            [exp_neg, Complex64::new(0., 0.)],
266            [Complex64::new(0., 0.), exp_pos]
267        ])
268    }
269
270    fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
271        if param_idx != self.param_idx {
272            return Ok(Array2::zeros((2, 2)));
273        }
274
275        let theta = params[self.param_idx];
276        let exp_pos = Complex64::from_polar(1.0, theta / 2.0);
277        let exp_neg = Complex64::from_polar(1.0, -theta / 2.0);
278
279        Ok(scirs2_core::ndarray::array![
280            [exp_neg * Complex64::new(0., -0.5), Complex64::new(0., 0.)],
281            [Complex64::new(0., 0.), exp_pos * Complex64::new(0., 0.5)]
282        ])
283    }
284}
285
286/// Parametric quantum circuit for VQE
287pub struct ParametricCircuit {
288    /// Sequence of parametric gates
289    pub gates: Vec<Box<dyn ParametricGate>>,
290    /// Number of qubits
291    pub num_qubits: usize,
292    /// Number of parameters
293    pub num_parameters: usize,
294}
295
296impl ParametricCircuit {
297    /// Create new parametric circuit
298    #[must_use]
299    pub fn new(num_qubits: usize) -> Self {
300        Self {
301            gates: Vec::new(),
302            num_qubits,
303            num_parameters: 0,
304        }
305    }
306
307    /// Add a parametric gate
308    pub fn add_gate(&mut self, gate: Box<dyn ParametricGate>) {
309        // Update parameter count
310        for &param_idx in &gate.parameter_indices() {
311            self.num_parameters = self.num_parameters.max(param_idx + 1);
312        }
313        self.gates.push(gate);
314    }
315
316    /// Add RX gate
317    pub fn rx(&mut self, qubit: usize, param_idx: usize) {
318        self.add_gate(Box::new(ParametricRX { qubit, param_idx }));
319    }
320
321    /// Add RY gate
322    pub fn ry(&mut self, qubit: usize, param_idx: usize) {
323        self.add_gate(Box::new(ParametricRY { qubit, param_idx }));
324    }
325
326    /// Add RZ gate
327    pub fn rz(&mut self, qubit: usize, param_idx: usize) {
328        self.add_gate(Box::new(ParametricRZ { qubit, param_idx }));
329    }
330
331    /// Evaluate circuit for given parameters and return final state
332    pub fn evaluate(&self, params: &[f64]) -> Result<Array1<Complex64>> {
333        if params.len() != self.num_parameters {
334            return Err(SimulatorError::InvalidInput(format!(
335                "Expected {} parameters, got {}",
336                self.num_parameters,
337                params.len()
338            )));
339        }
340
341        // Initialize state vector simulator
342        let mut simulator = StateVectorSimulator::new();
343
344        // Apply gates sequentially
345        for gate in &self.gates {
346            let matrix = gate.matrix(params)?;
347            let qubits = gate.qubits();
348
349            if qubits.len() == 1 {
350                // Single-qubit gate - would need proper simulator integration
351                // For now, this is a placeholder
352            } else if qubits.len() == 2 {
353                // Two-qubit gate - would need proper simulator integration
354            }
355        }
356
357        // Return placeholder state for now
358        let mut state = Array1::zeros(1 << self.num_qubits);
359        state[0] = Complex64::new(1.0, 0.0); // |0...0>
360        Ok(state)
361    }
362
363    /// Compute gradient of expectation value using parameter-shift rule
364    pub fn gradient_expectation(
365        &self,
366        observable: &PauliOperatorSum,
367        params: &[f64],
368        method: GradientMethod,
369    ) -> Result<Vec<f64>> {
370        match method {
371            GradientMethod::ParameterShift => self.parameter_shift_gradient(observable, params),
372            GradientMethod::FiniteDifference { step_size } => {
373                self.finite_difference_gradient(observable, params, step_size)
374            }
375            GradientMethod::SPSA { step_size } => self.spsa_gradient(observable, params, step_size),
376        }
377    }
378
379    /// Parameter-shift rule gradient computation
380    fn parameter_shift_gradient(
381        &self,
382        observable: &PauliOperatorSum,
383        params: &[f64],
384    ) -> Result<Vec<f64>> {
385        let mut gradients = vec![0.0; self.num_parameters];
386
387        // Use parameter-shift rule: ∂⟨H⟩/∂θᵢ = (⟨H⟩₊ - ⟨H⟩₋) / 2
388        // where ±π/2 shifts are applied to parameter θᵢ
389        for param_idx in 0..self.num_parameters {
390            let shift = PI / 2.0;
391
392            // Forward shift
393            let mut params_plus = params.to_vec();
394            params_plus[param_idx] += shift;
395            let state_plus = self.evaluate(&params_plus)?;
396            let expectation_plus = compute_expectation_value(&state_plus, observable)?;
397
398            // Backward shift
399            let mut params_minus = params.to_vec();
400            params_minus[param_idx] -= shift;
401            let state_minus = self.evaluate(&params_minus)?;
402            let expectation_minus = compute_expectation_value(&state_minus, observable)?;
403
404            // Gradient
405            gradients[param_idx] = (expectation_plus - expectation_minus) / 2.0;
406        }
407
408        Ok(gradients)
409    }
410
411    /// Finite difference gradient computation
412    fn finite_difference_gradient(
413        &self,
414        observable: &PauliOperatorSum,
415        params: &[f64],
416        step_size: f64,
417    ) -> Result<Vec<f64>> {
418        let mut gradients = vec![0.0; self.num_parameters];
419
420        for param_idx in 0..self.num_parameters {
421            // Forward difference
422            let mut params_plus = params.to_vec();
423            params_plus[param_idx] += step_size;
424            let state_plus = self.evaluate(&params_plus)?;
425            let expectation_plus = compute_expectation_value(&state_plus, observable)?;
426
427            // Current value
428            let state = self.evaluate(params)?;
429            let expectation = compute_expectation_value(&state, observable)?;
430
431            gradients[param_idx] = (expectation_plus - expectation) / step_size;
432        }
433
434        Ok(gradients)
435    }
436
437    /// SPSA gradient estimation
438    fn spsa_gradient(
439        &self,
440        observable: &PauliOperatorSum,
441        params: &[f64],
442        step_size: f64,
443    ) -> Result<Vec<f64>> {
444        let mut rng = thread_rng();
445
446        // Generate random perturbation vector
447        let mut perturbation = vec![0.0; self.num_parameters];
448        for p in &mut perturbation {
449            *p = if rng.gen::<bool>() { 1.0 } else { -1.0 };
450        }
451
452        // Two evaluations with opposite perturbations
453        let mut params_plus = params.to_vec();
454        let mut params_minus = params.to_vec();
455        for i in 0..self.num_parameters {
456            params_plus[i] += step_size * perturbation[i];
457            params_minus[i] -= step_size * perturbation[i];
458        }
459
460        let state_plus = self.evaluate(&params_plus)?;
461        let state_minus = self.evaluate(&params_minus)?;
462        let expectation_plus = compute_expectation_value(&state_plus, observable)?;
463        let expectation_minus = compute_expectation_value(&state_minus, observable)?;
464
465        // SPSA gradient estimate
466        let diff = (expectation_plus - expectation_minus) / (2.0 * step_size);
467        let gradients = perturbation.iter().map(|&p| diff / p).collect();
468
469        Ok(gradients)
470    }
471}
472
473/// VQE algorithm with automatic differentiation
474pub struct VQEWithAutodiff {
475    /// Parametric ansatz circuit
476    pub ansatz: ParametricCircuit,
477    /// Hamiltonian observable
478    pub hamiltonian: PauliOperatorSum,
479    /// Autodiff context
480    pub context: AutoDiffContext,
481    /// Optimization history
482    pub history: Vec<VQEIteration>,
483    /// Convergence criteria
484    pub convergence: ConvergenceCriteria,
485}
486
487/// Single VQE iteration data
488#[derive(Clone)]
489pub struct VQEIteration {
490    /// Iteration number
491    pub iteration: usize,
492    /// Parameters at this iteration
493    pub parameters: Vec<f64>,
494    /// Energy expectation value
495    pub energy: f64,
496    /// Gradient norm
497    pub gradient_norm: f64,
498    /// Function evaluations so far
499    pub func_evals: usize,
500    /// Gradient evaluations so far
501    pub grad_evals: usize,
502}
503
504/// Convergence criteria for VQE
505pub struct ConvergenceCriteria {
506    /// Maximum iterations
507    pub max_iterations: usize,
508    /// Energy tolerance
509    pub energy_tolerance: f64,
510    /// Gradient norm tolerance
511    pub gradient_tolerance: f64,
512    /// Maximum function evaluations
513    pub max_func_evals: usize,
514}
515
516impl Default for ConvergenceCriteria {
517    fn default() -> Self {
518        Self {
519            max_iterations: 1000,
520            energy_tolerance: 1e-6,
521            gradient_tolerance: 1e-6,
522            max_func_evals: 10_000,
523        }
524    }
525}
526
527impl VQEWithAutodiff {
528    /// Create new VQE instance
529    #[must_use]
530    pub fn new(
531        ansatz: ParametricCircuit,
532        hamiltonian: PauliOperatorSum,
533        initial_params: Vec<f64>,
534        gradient_method: GradientMethod,
535    ) -> Self {
536        let context = AutoDiffContext::new(initial_params, gradient_method);
537        Self {
538            ansatz,
539            hamiltonian,
540            context,
541            history: Vec::new(),
542            convergence: ConvergenceCriteria::default(),
543        }
544    }
545
546    /// Set convergence criteria
547    #[must_use]
548    pub const fn with_convergence(mut self, convergence: ConvergenceCriteria) -> Self {
549        self.convergence = convergence;
550        self
551    }
552
553    /// Evaluate energy for current parameters
554    pub fn evaluate_energy(&mut self) -> Result<f64> {
555        let state = self.ansatz.evaluate(&self.context.parameters)?;
556        let energy = compute_expectation_value(&state, &self.hamiltonian)?;
557        self.context.func_evaluations += 1;
558        Ok(energy)
559    }
560
561    /// Compute gradient for current parameters
562    pub fn compute_gradient(&mut self) -> Result<Vec<f64>> {
563        let gradients = self.ansatz.gradient_expectation(
564            &self.hamiltonian,
565            &self.context.parameters,
566            self.context.method,
567        )?;
568        self.context.gradients.clone_from(&gradients);
569        self.context.grad_evaluations += 1;
570        Ok(gradients)
571    }
572
573    /// Perform one VQE optimization step using gradient descent
574    pub fn step(&mut self, learning_rate: f64) -> Result<VQEIteration> {
575        let energy = self.evaluate_energy()?;
576        let gradients = self.compute_gradient()?;
577
578        // Gradient descent update
579        for (i, &grad) in gradients.iter().enumerate() {
580            self.context.parameters[i] -= learning_rate * grad;
581        }
582
583        let gradient_norm = gradients.iter().map(|g| g * g).sum::<f64>().sqrt();
584
585        let iteration = VQEIteration {
586            iteration: self.history.len(),
587            parameters: self.context.parameters.clone(),
588            energy,
589            gradient_norm,
590            func_evals: self.context.func_evaluations,
591            grad_evals: self.context.grad_evaluations,
592        };
593
594        self.history.push(iteration.clone());
595        Ok(iteration)
596    }
597
598    /// Run VQE optimization until convergence
599    pub fn optimize(&mut self, learning_rate: f64) -> Result<VQEResult> {
600        while !self.is_converged()? {
601            let iteration = self.step(learning_rate)?;
602
603            if iteration.iteration >= self.convergence.max_iterations {
604                break;
605            }
606            if iteration.func_evals >= self.convergence.max_func_evals {
607                break;
608            }
609        }
610
611        let final_iteration = self.history.last().ok_or_else(|| {
612            SimulatorError::InvalidOperation("VQE optimization produced no iterations".to_string())
613        })?;
614        Ok(VQEResult {
615            optimal_parameters: final_iteration.parameters.clone(),
616            optimal_energy: final_iteration.energy,
617            iterations: self.history.len(),
618            converged: self.is_converged()?,
619            history: self.history.clone(),
620        })
621    }
622
623    /// Check convergence
624    fn is_converged(&self) -> Result<bool> {
625        if self.history.len() < 2 {
626            return Ok(false);
627        }
628
629        let current = &self.history[self.history.len() - 1];
630        let previous = &self.history[self.history.len() - 2];
631
632        let energy_converged =
633            (current.energy - previous.energy).abs() < self.convergence.energy_tolerance;
634        let gradient_converged = current.gradient_norm < self.convergence.gradient_tolerance;
635
636        Ok(energy_converged && gradient_converged)
637    }
638
639    /// Run VQE optimization using `OptiRS` optimizers (Adam, SGD, `RMSprop`, etc.)
640    ///
641    /// This method provides state-of-the-art optimization using `OptiRS`'s advanced
642    /// machine learning optimizers, which typically converge faster and more robustly
643    /// than basic gradient descent.
644    ///
645    /// # Arguments
646    /// * `config` - `OptiRS` optimizer configuration
647    ///
648    /// # Returns
649    /// * `VQEResult` - Optimization result with optimal parameters and energy
650    ///
651    /// # Example
652    /// ```ignore
653    /// use quantrs2_sim::autodiff_vqe::*;
654    /// use quantrs2_sim::optirs_integration::*;
655    ///
656    /// let mut vqe = VQEWithAutodiff::new(...);
657    /// let config = OptiRSConfig {
658    ///     optimizer_type: OptiRSOptimizerType::Adam,
659    ///     learning_rate: 0.01,
660    ///     ..Default::default()
661    /// };
662    /// let result = vqe.optimize_with_optirs(config)?;
663    /// ```
664    #[cfg(feature = "optimize")]
665    pub fn optimize_with_optirs(&mut self, config: OptiRSConfig) -> Result<VQEResult> {
666        use std::time::Instant;
667
668        let start_time = Instant::now();
669        let mut optimizer = OptiRSQuantumOptimizer::new(config)?;
670
671        while !self.is_converged()? && !optimizer.has_converged() {
672            // Evaluate energy and gradients
673            let energy = self.evaluate_energy()?;
674            let gradients = self.compute_gradient()?;
675
676            // OptiRS optimization step
677            let new_params =
678                optimizer.optimize_step(&self.context.parameters, &gradients, energy)?;
679
680            // Update parameters
681            self.context.parameters = new_params;
682
683            // Record iteration
684            let gradient_norm = gradients.iter().map(|g| g * g).sum::<f64>().sqrt();
685            let iteration = VQEIteration {
686                iteration: self.history.len(),
687                parameters: self.context.parameters.clone(),
688                energy,
689                gradient_norm,
690                func_evals: self.context.func_evaluations,
691                grad_evals: self.context.grad_evaluations,
692            };
693            self.history.push(iteration);
694
695            // Check maximum iterations (use VQE's convergence criteria)
696            if self.history.len() >= self.convergence.max_iterations {
697                break;
698            }
699            if self.context.func_evaluations >= self.convergence.max_func_evals {
700                break;
701            }
702        }
703
704        let _optimization_time = start_time.elapsed();
705        let final_iteration = self.history.last().ok_or_else(|| {
706            SimulatorError::InvalidOperation(
707                "VQE optimization with OptiRS produced no iterations".to_string(),
708            )
709        })?;
710
711        Ok(VQEResult {
712            optimal_parameters: final_iteration.parameters.clone(),
713            optimal_energy: final_iteration.energy,
714            iterations: self.history.len(),
715            converged: self.is_converged()?,
716            history: self.history.clone(),
717        })
718    }
719}
720
721/// VQE optimization result
722pub struct VQEResult {
723    /// Optimal parameters found
724    pub optimal_parameters: Vec<f64>,
725    /// Optimal energy value
726    pub optimal_energy: f64,
727    /// Number of iterations performed
728    pub iterations: usize,
729    /// Whether optimization converged
730    pub converged: bool,
731    /// Full optimization history
732    pub history: Vec<VQEIteration>,
733}
734
735// Helper functions
736
737/// Compute expectation value of observable for given state
738fn compute_expectation_value(
739    state: &Array1<Complex64>,
740    observable: &PauliOperatorSum,
741) -> Result<f64> {
742    let mut expectation = 0.0;
743
744    for term in &observable.terms {
745        // Compute ⟨ψ|P|ψ⟩ for each Pauli string P
746        let pauli_expectation = compute_pauli_expectation_from_state(state, term)?;
747        expectation += term.coefficient.re * pauli_expectation.re;
748    }
749
750    Ok(expectation)
751}
752
753/// Compute expectation value of a single Pauli string
754fn compute_pauli_expectation_from_state(
755    state: &Array1<Complex64>,
756    pauli_string: &PauliString,
757) -> Result<Complex64> {
758    let num_qubits = pauli_string.num_qubits;
759    let dim = 1 << num_qubits;
760    let mut result = Complex64::new(0.0, 0.0);
761
762    for (i, &amplitude) in state.iter().enumerate() {
763        if i >= dim {
764            break;
765        }
766
767        // Apply Pauli string to basis state |i⟩
768        let mut coeff = Complex64::new(1.0, 0.0);
769        let mut target_state = i;
770
771        for (qubit, &pauli_op) in pauli_string.operators.iter().enumerate() {
772            let bit = (i >> qubit) & 1;
773            use crate::pauli::PauliOperator;
774
775            match pauli_op {
776                PauliOperator::I => {} // Identity does nothing
777                PauliOperator::X => {
778                    // X flips the bit
779                    target_state ^= 1 << qubit;
780                }
781                PauliOperator::Y => {
782                    // Y flips the bit and adds phase
783                    target_state ^= 1 << qubit;
784                    coeff *= if bit == 0 {
785                        Complex64::new(0.0, 1.0)
786                    } else {
787                        Complex64::new(0.0, -1.0)
788                    };
789                }
790                PauliOperator::Z => {
791                    // Z adds phase based on bit value
792                    if bit == 1 {
793                        coeff *= Complex64::new(-1.0, 0.0);
794                    }
795                }
796            }
797        }
798
799        if target_state < dim {
800            result += amplitude.conj() * coeff * state[target_state];
801        }
802    }
803
804    Ok(result * pauli_string.coefficient)
805}
806
807/// Convenience functions for creating common ansätze
808pub mod ansatze {
809    use super::ParametricCircuit;
810
811    /// Create a hardware-efficient ansatz
812    #[must_use]
813    pub fn hardware_efficient(num_qubits: usize, num_layers: usize) -> ParametricCircuit {
814        let mut circuit = ParametricCircuit::new(num_qubits);
815        let mut param_idx = 0;
816
817        for _layer in 0..num_layers {
818            // Single-qubit rotations
819            for qubit in 0..num_qubits {
820                circuit.ry(qubit, param_idx);
821                param_idx += 1;
822                circuit.rz(qubit, param_idx);
823                param_idx += 1;
824            }
825
826            // Entangling layer (would need CNOT gates - simplified here)
827            // In practice, would add parametric CNOT gates
828        }
829
830        circuit
831    }
832
833    /// Create a QAOA ansatz for `MaxCut` problem
834    #[must_use]
835    pub fn qaoa_maxcut(
836        num_qubits: usize,
837        num_layers: usize,
838        edges: &[(usize, usize)],
839    ) -> ParametricCircuit {
840        let mut circuit = ParametricCircuit::new(num_qubits);
841        let mut param_idx = 0;
842
843        // Initial superposition
844        for qubit in 0..num_qubits {
845            circuit.ry(qubit, param_idx); // RY(π/2) for H gate equivalent
846        }
847
848        for _layer in 0..num_layers {
849            // Problem Hamiltonian evolution (ZZ terms)
850            for &(i, j) in edges {
851                // Would implement ZZ rotation here
852                // For now, approximate with RZ gates
853                circuit.rz(i, param_idx);
854                circuit.rz(j, param_idx);
855                param_idx += 1;
856            }
857
858            // Mixer Hamiltonian evolution (X terms)
859            for qubit in 0..num_qubits {
860                circuit.rx(qubit, param_idx);
861                param_idx += 1;
862            }
863        }
864
865        circuit
866    }
867}
868
869#[cfg(test)]
870mod tests {
871    use super::*;
872
873    #[test]
874    fn test_parametric_rx_matrix() {
875        let rx_gate = ParametricRX {
876            qubit: 0,
877            param_idx: 0,
878        };
879        let params = vec![PI / 2.0];
880        let matrix = rx_gate
881            .matrix(&params)
882            .expect("RX gate matrix computation should succeed");
883
884        // RX(π/2) should be approximately [[1/√2, -i/√2], [-i/√2, 1/√2]]
885        let expected_val = 1.0 / 2.0_f64.sqrt();
886        assert!((matrix[[0, 0]].re - expected_val).abs() < 1e-10);
887        assert!((matrix[[0, 1]].im + expected_val).abs() < 1e-10);
888    }
889
890    #[test]
891    fn test_autodiff_context() {
892        let params = vec![1.0, 2.0, 3.0];
893        let mut context = AutoDiffContext::new(params.clone(), GradientMethod::ParameterShift);
894
895        assert_eq!(context.parameters, params);
896        assert_eq!(context.gradients.len(), 3);
897
898        context.update_parameters(vec![4.0, 5.0, 6.0]);
899        assert_eq!(context.parameters, vec![4.0, 5.0, 6.0]);
900    }
901
902    #[test]
903    fn test_parametric_circuit_creation() {
904        let mut circuit = ParametricCircuit::new(2);
905        circuit.rx(0, 0);
906        circuit.ry(1, 1);
907
908        assert_eq!(circuit.gates.len(), 2);
909        assert_eq!(circuit.num_parameters, 2);
910    }
911
912    #[test]
913    fn test_hardware_efficient_ansatz() {
914        let ansatz = ansatze::hardware_efficient(3, 2);
915        assert_eq!(ansatz.num_qubits, 3);
916        assert!(ansatz.num_parameters > 0);
917    }
918}