quantrs2_symengine_pure/optimization/
mod.rs1use std::collections::HashMap;
7
8use crate::error::{SymEngineError, SymEngineResult};
9use crate::expr::Expression;
10
11pub fn gradient_at(
16 expr: &Expression,
17 params: &[Expression],
18 values: &HashMap<String, f64>,
19) -> SymEngineResult<Vec<f64>> {
20 let gradient = expr.gradient(params);
21 gradient.iter().map(|g| g.eval(values)).collect()
22}
23
24pub fn hessian_at(
26 expr: &Expression,
27 params: &[Expression],
28 values: &HashMap<String, f64>,
29) -> SymEngineResult<Vec<Vec<f64>>> {
30 let hessian = expr.hessian(params);
31 hessian
32 .iter()
33 .map(|row| row.iter().map(|h| h.eval(values)).collect())
34 .collect()
35}
36
37pub struct ParameterShiftRule {
44 pub shift: f64,
46}
47
48impl ParameterShiftRule {
49 #[must_use]
51 pub const fn new() -> Self {
52 Self {
53 shift: std::f64::consts::FRAC_PI_2,
54 }
55 }
56
57 #[must_use]
59 pub const fn with_shift(shift: f64) -> Self {
60 Self { shift }
61 }
62
63 pub fn compute_gradient<F>(&self, energy_fn: F, params: &[f64]) -> Vec<f64>
72 where
73 F: Fn(&[f64]) -> f64,
74 {
75 let n = params.len();
76 let mut gradient = vec![0.0; n];
77 let s = self.shift;
78 let denominator = 2.0 * s.sin();
79
80 for i in 0..n {
81 let mut params_plus = params.to_vec();
82 let mut params_minus = params.to_vec();
83
84 params_plus[i] += s;
85 params_minus[i] -= s;
86
87 let e_plus = energy_fn(¶ms_plus);
88 let e_minus = energy_fn(¶ms_minus);
89
90 gradient[i] = (e_plus - e_minus) / denominator;
91 }
92
93 gradient
94 }
95}
96
97impl Default for ParameterShiftRule {
98 fn default() -> Self {
99 Self::new()
100 }
101}
102
103pub struct VqeOptimizer {
105 pub energy: Expression,
107 pub params: Vec<Expression>,
109 pub learning_rate: f64,
111}
112
113impl VqeOptimizer {
114 #[allow(clippy::missing_const_for_fn)] pub fn new(energy: Expression, params: Vec<Expression>, learning_rate: f64) -> Self {
117 Self {
118 energy,
119 params,
120 learning_rate,
121 }
122 }
123
124 pub fn compute_gradient(&self, values: &HashMap<String, f64>) -> SymEngineResult<Vec<f64>> {
126 gradient_at(&self.energy, &self.params, values)
127 }
128
129 pub fn step(&self, values: &mut HashMap<String, f64>) -> SymEngineResult<f64> {
131 let gradient = self.compute_gradient(values)?;
132
133 for (param, grad) in self.params.iter().zip(gradient.iter()) {
135 if let Some(name) = param.as_symbol() {
136 if let Some(value) = values.get_mut(name) {
137 *value -= self.learning_rate * grad;
138 }
139 }
140 }
141
142 self.energy.eval(values)
144 }
145}
146
147#[cfg(test)]
148mod tests {
149 use super::*;
150 use crate::ops::trig;
151
152 #[test]
153 fn test_gradient_at() {
154 let theta = Expression::symbol("theta");
156 let energy = theta.clone() * theta.clone();
157 let params = vec![theta];
158
159 let mut values = HashMap::new();
160 values.insert("theta".to_string(), 3.0);
161
162 let grad = gradient_at(&energy, ¶ms, &values).expect("should compute gradient");
163
164 assert!((grad[0] - 6.0).abs() < 1e-6);
166 }
167
168 #[test]
169 fn test_parameter_shift_rule() {
170 let psr = ParameterShiftRule::new();
171
172 let gradient = psr.compute_gradient(
174 |params| params[0].sin(),
175 &[0.0], );
177
178 assert!((gradient[0] - 1.0).abs() < 1e-6);
180 }
181}