quantrs2_symengine_pure/optimization/
mod.rs

1//! VQE/QAOA optimization support.
2//!
3//! This module provides specialized support for variational quantum
4//! algorithm optimization.
5
6use std::collections::HashMap;
7
8use crate::error::{SymEngineError, SymEngineResult};
9use crate::expr::Expression;
10
11/// Compute the gradient of an expression at specific parameter values.
12///
13/// This is optimized for VQE optimization loops where we need to
14/// compute gradients at many different parameter values.
15pub fn gradient_at(
16    expr: &Expression,
17    params: &[Expression],
18    values: &HashMap<String, f64>,
19) -> SymEngineResult<Vec<f64>> {
20    let gradient = expr.gradient(params);
21    gradient.iter().map(|g| g.eval(values)).collect()
22}
23
24/// Compute the Hessian matrix at specific parameter values.
25pub fn hessian_at(
26    expr: &Expression,
27    params: &[Expression],
28    values: &HashMap<String, f64>,
29) -> SymEngineResult<Vec<Vec<f64>>> {
30    let hessian = expr.hessian(params);
31    hessian
32        .iter()
33        .map(|row| row.iter().map(|h| h.eval(values)).collect())
34        .collect()
35}
36
37/// Parameter-shift rule for quantum gradient estimation.
38///
39/// For a parametric gate U(θ), the gradient can be computed as:
40/// ∂⟨H⟩/∂θ = (⟨H⟩(θ+s) - ⟨H⟩(θ-s)) / (2 sin(s))
41///
42/// where s is typically π/2.
43pub struct ParameterShiftRule {
44    /// Shift amount (default: π/2)
45    pub shift: f64,
46}
47
48impl ParameterShiftRule {
49    /// Create a new parameter-shift rule with default shift (π/2)
50    #[must_use]
51    pub const fn new() -> Self {
52        Self {
53            shift: std::f64::consts::FRAC_PI_2,
54        }
55    }
56
57    /// Create with custom shift
58    #[must_use]
59    pub const fn with_shift(shift: f64) -> Self {
60        Self { shift }
61    }
62
63    /// Compute gradient using parameter-shift rule
64    ///
65    /// # Arguments
66    /// * `energy_fn` - Function that computes energy expectation value
67    /// * `params` - Current parameter values
68    ///
69    /// # Returns
70    /// Vector of gradient components
71    pub fn compute_gradient<F>(&self, energy_fn: F, params: &[f64]) -> Vec<f64>
72    where
73        F: Fn(&[f64]) -> f64,
74    {
75        let n = params.len();
76        let mut gradient = vec![0.0; n];
77        let s = self.shift;
78        let denominator = 2.0 * s.sin();
79
80        for i in 0..n {
81            let mut params_plus = params.to_vec();
82            let mut params_minus = params.to_vec();
83
84            params_plus[i] += s;
85            params_minus[i] -= s;
86
87            let e_plus = energy_fn(&params_plus);
88            let e_minus = energy_fn(&params_minus);
89
90            gradient[i] = (e_plus - e_minus) / denominator;
91        }
92
93        gradient
94    }
95}
96
97impl Default for ParameterShiftRule {
98    fn default() -> Self {
99        Self::new()
100    }
101}
102
103/// VQE optimizer state
104pub struct VqeOptimizer {
105    /// Energy expression (symbolic Hamiltonian expectation value)
106    pub energy: Expression,
107    /// Parameters
108    pub params: Vec<Expression>,
109    /// Learning rate
110    pub learning_rate: f64,
111}
112
113impl VqeOptimizer {
114    /// Create a new VQE optimizer
115    #[allow(clippy::missing_const_for_fn)] // Expression contains non-const internals
116    pub fn new(energy: Expression, params: Vec<Expression>, learning_rate: f64) -> Self {
117        Self {
118            energy,
119            params,
120            learning_rate,
121        }
122    }
123
124    /// Compute gradient at current parameter values
125    pub fn compute_gradient(&self, values: &HashMap<String, f64>) -> SymEngineResult<Vec<f64>> {
126        gradient_at(&self.energy, &self.params, values)
127    }
128
129    /// Perform one optimization step
130    pub fn step(&self, values: &mut HashMap<String, f64>) -> SymEngineResult<f64> {
131        let gradient = self.compute_gradient(values)?;
132
133        // Update parameters: θ_new = θ_old - lr * ∇E
134        for (param, grad) in self.params.iter().zip(gradient.iter()) {
135            if let Some(name) = param.as_symbol() {
136                if let Some(value) = values.get_mut(name) {
137                    *value -= self.learning_rate * grad;
138                }
139            }
140        }
141
142        // Return new energy
143        self.energy.eval(values)
144    }
145}
146
147#[cfg(test)]
148mod tests {
149    use super::*;
150    use crate::ops::trig;
151
152    #[test]
153    fn test_gradient_at() {
154        // E(θ) = θ^2
155        let theta = Expression::symbol("theta");
156        let energy = theta.clone() * theta.clone();
157        let params = vec![theta];
158
159        let mut values = HashMap::new();
160        values.insert("theta".to_string(), 3.0);
161
162        let grad = gradient_at(&energy, &params, &values).expect("should compute gradient");
163
164        // ∂(θ²)/∂θ = 2θ, at θ=3 should be 6
165        assert!((grad[0] - 6.0).abs() < 1e-6);
166    }
167
168    #[test]
169    fn test_parameter_shift_rule() {
170        let psr = ParameterShiftRule::new();
171
172        // Test with f(θ) = sin(θ), where f'(θ) = cos(θ)
173        let gradient = psr.compute_gradient(
174            |params| params[0].sin(),
175            &[0.0], // θ = 0
176        );
177
178        // cos(0) = 1
179        assert!((gradient[0] - 1.0).abs() < 1e-6);
180    }
181}