1use crate::error::{MLError, Result};
2use scirs2_core::ndarray::{Array1, ArrayView1};
3use std::collections::HashMap;
4use std::fmt;
5
6#[derive(Debug, Clone, Copy)]
8pub enum OptimizationMethod {
9 GradientDescent,
11
12 Adam,
14
15 SPSA,
17
18 LBFGS,
20
21 QuantumNaturalGradient,
23
24 SciRS2Adam,
26
27 SciRS2LBFGS,
29
30 SciRS2CG,
32}
33
34#[derive(Debug, Clone)]
36pub enum Optimizer {
37 GradientDescent {
39 learning_rate: f64,
41 },
42
43 Adam {
45 learning_rate: f64,
47
48 beta1: f64,
50
51 beta2: f64,
53
54 epsilon: f64,
56 },
57
58 SPSA {
60 learning_rate: f64,
62
63 perturbation: f64,
65 },
66
67 SciRS2 {
69 method: String,
71 config: HashMap<String, f64>,
73 },
74}
75
76impl Optimizer {
77 pub fn new(method: OptimizationMethod) -> Self {
79 match method {
80 OptimizationMethod::GradientDescent => Optimizer::GradientDescent {
81 learning_rate: 0.01,
82 },
83 OptimizationMethod::Adam => Optimizer::Adam {
84 learning_rate: 0.01,
85 beta1: 0.9,
86 beta2: 0.999,
87 epsilon: 1e-8,
88 },
89 OptimizationMethod::SPSA => Optimizer::SPSA {
90 learning_rate: 0.01,
91 perturbation: 0.01,
92 },
93 OptimizationMethod::LBFGS => {
94 Optimizer::Adam {
96 learning_rate: 0.01,
97 beta1: 0.9,
98 beta2: 0.999,
99 epsilon: 1e-8,
100 }
101 }
102 OptimizationMethod::QuantumNaturalGradient => {
103 Optimizer::Adam {
105 learning_rate: 0.01,
106 beta1: 0.9,
107 beta2: 0.999,
108 epsilon: 1e-8,
109 }
110 }
111 OptimizationMethod::SciRS2Adam => {
112 let mut config = HashMap::new();
113 config.insert("learning_rate".to_string(), 0.001);
114 config.insert("beta1".to_string(), 0.9);
115 config.insert("beta2".to_string(), 0.999);
116 config.insert("epsilon".to_string(), 1e-8);
117 Optimizer::SciRS2 {
118 method: "adam".to_string(),
119 config,
120 }
121 }
122 OptimizationMethod::SciRS2LBFGS => {
123 let mut config = HashMap::new();
124 config.insert("m".to_string(), 10.0); config.insert("c1".to_string(), 1e-4);
126 config.insert("c2".to_string(), 0.9);
127 Optimizer::SciRS2 {
128 method: "lbfgs".to_string(),
129 config,
130 }
131 }
132 OptimizationMethod::SciRS2CG => {
133 let mut config = HashMap::new();
134 config.insert("beta_method".to_string(), 0.0); config.insert("restart_threshold".to_string(), 100.0);
136 Optimizer::SciRS2 {
137 method: "cg".to_string(),
138 config,
139 }
140 }
141 }
142 }
143
144 pub fn update_parameters(
146 &self,
147 parameters: &mut Array1<f64>,
148 gradients: &ArrayView1<f64>,
149 iteration: usize,
150 ) -> Result<()> {
151 match self {
152 Optimizer::GradientDescent { learning_rate } => {
153 for i in 0..parameters.len() {
155 parameters[i] -= learning_rate * gradients[i];
156 }
157 Ok(())
158 }
159 Optimizer::Adam {
160 learning_rate,
161 beta1,
162 beta2,
163 epsilon,
164 } => {
165 for i in 0..parameters.len() {
168 parameters[i] -= learning_rate * gradients[i];
169 }
170 Ok(())
171 }
172 Optimizer::SPSA {
173 learning_rate,
174 perturbation,
175 } => {
176 for i in 0..parameters.len() {
178 parameters[i] -= learning_rate * gradients[i];
179 }
180 Ok(())
181 }
182 Optimizer::SciRS2 { method, config } => {
183 let learning_rate = config.get("learning_rate").unwrap_or(&0.001);
185 match method.as_str() {
186 "adam" => {
187 for i in 0..parameters.len() {
189 parameters[i] -= learning_rate * gradients[i];
190 }
191 }
192 "lbfgs" => {
193 for i in 0..parameters.len() {
195 parameters[i] -= learning_rate * gradients[i];
196 }
197 }
198 "cg" => {
199 for i in 0..parameters.len() {
201 parameters[i] -= learning_rate * gradients[i];
202 }
203 }
204 _ => {
205 return Err(MLError::InvalidConfiguration(format!(
206 "Unknown SciRS2 optimizer method: {}",
207 method
208 )));
209 }
210 }
211 Ok(())
212 }
213 }
214 }
215}
216
217pub trait ObjectiveFunction {
219 fn evaluate(&self, parameters: &ArrayView1<f64>) -> Result<f64>;
221
222 fn gradient(&self, parameters: &ArrayView1<f64>) -> Result<Array1<f64>> {
224 let epsilon = 1e-6;
226 let n = parameters.len();
227 let mut gradient = Array1::zeros(n);
228
229 let f0 = self.evaluate(parameters)?;
230
231 for i in 0..n {
232 let mut params_plus = parameters.to_owned();
233 params_plus[i] += epsilon;
234
235 let f_plus = self.evaluate(¶ms_plus.view())?;
236
237 gradient[i] = (f_plus - f0) / epsilon;
238 }
239
240 Ok(gradient)
241 }
242}
243
244impl fmt::Display for OptimizationMethod {
245 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
246 match self {
247 OptimizationMethod::GradientDescent => write!(f, "Gradient Descent"),
248 OptimizationMethod::Adam => write!(f, "Adam"),
249 OptimizationMethod::SPSA => write!(f, "SPSA"),
250 OptimizationMethod::LBFGS => write!(f, "L-BFGS"),
251 OptimizationMethod::QuantumNaturalGradient => write!(f, "Quantum Natural Gradient"),
252 OptimizationMethod::SciRS2Adam => write!(f, "SciRS2 Adam"),
253 OptimizationMethod::SciRS2LBFGS => write!(f, "SciRS2 L-BFGS"),
254 OptimizationMethod::SciRS2CG => write!(f, "SciRS2 Conjugate Gradient"),
255 }
256 }
257}
258
259impl fmt::Display for Optimizer {
260 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
261 match self {
262 Optimizer::GradientDescent { learning_rate } => {
263 write!(f, "Gradient Descent (learning_rate: {})", learning_rate)
264 }
265 Optimizer::Adam {
266 learning_rate,
267 beta1,
268 beta2,
269 epsilon,
270 } => {
271 write!(
272 f,
273 "Adam (learning_rate: {}, beta1: {}, beta2: {}, epsilon: {})",
274 learning_rate, beta1, beta2, epsilon
275 )
276 }
277 Optimizer::SPSA {
278 learning_rate,
279 perturbation,
280 } => {
281 write!(
282 f,
283 "SPSA (learning_rate: {}, perturbation: {})",
284 learning_rate, perturbation
285 )
286 }
287 Optimizer::SciRS2 { method, config } => {
288 write!(f, "SciRS2 {} with config: {:?}", method, config)
289 }
290 }
291 }
292}