1use crate::error::{Result, SimulatorError};
16use scirs2_core::ndarray::Array1;
17use serde::{Deserialize, Serialize};
18use std::time::Duration;
19
20use optirs_core::optimizers::{Adagrad, Adam, Optimizer, RMSprop, SGD};
22
23#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
25pub enum OptiRSOptimizerType {
26 SGD { momentum: bool },
28 Adam,
30 RMSprop,
32 Adagrad,
34}
35
36#[derive(Debug, Clone)]
38pub struct OptiRSConfig {
39 pub optimizer_type: OptiRSOptimizerType,
41 pub learning_rate: f64,
43 pub gradient_clip_norm: Option<f64>,
45 pub l2_regularization: f64,
47 pub max_iterations: usize,
49 pub convergence_tolerance: f64,
51 pub parameter_bounds: Option<(f64, f64)>,
53 pub momentum: f64,
55}
56
57impl Default for OptiRSConfig {
58 fn default() -> Self {
59 Self {
60 optimizer_type: OptiRSOptimizerType::Adam,
61 learning_rate: 0.01,
62 gradient_clip_norm: Some(1.0),
63 l2_regularization: 0.0,
64 max_iterations: 1000,
65 convergence_tolerance: 1e-6,
66 parameter_bounds: Some((-std::f64::consts::PI, std::f64::consts::PI)),
67 momentum: 0.9,
68 }
69 }
70}
71
72pub struct OptiRSQuantumOptimizer {
74 config: OptiRSConfig,
76 optimizer: OptiRSOptimizerImpl,
78 iteration: usize,
80 best_parameters: Option<Vec<f64>>,
82 best_cost: f64,
84 cost_history: Vec<f64>,
86 gradient_norms: Vec<f64>,
88}
89
90enum OptiRSOptimizerImpl {
92 SGD(SGD<f64>),
93 Adam(Adam<f64>),
94 RMSprop(RMSprop<f64>),
95 Adagrad(Adagrad<f64>),
96}
97
98impl OptiRSQuantumOptimizer {
99 pub fn new(config: OptiRSConfig) -> Result<Self> {
101 let optimizer = Self::create_optimizer(&config)?;
102
103 Ok(Self {
104 config,
105 optimizer,
106 iteration: 0,
107 best_parameters: None,
108 best_cost: f64::INFINITY,
109 cost_history: Vec::new(),
110 gradient_norms: Vec::new(),
111 })
112 }
113
114 fn create_optimizer(config: &OptiRSConfig) -> Result<OptiRSOptimizerImpl> {
116 let optimizer = match config.optimizer_type {
117 OptiRSOptimizerType::SGD { momentum } => {
118 let sgd = SGD::new(config.learning_rate);
119 if momentum {
120 OptiRSOptimizerImpl::SGD(sgd.with_momentum(config.momentum))
121 } else {
122 OptiRSOptimizerImpl::SGD(sgd)
123 }
124 }
125 OptiRSOptimizerType::Adam => OptiRSOptimizerImpl::Adam(Adam::new(config.learning_rate)),
126 OptiRSOptimizerType::RMSprop => {
127 OptiRSOptimizerImpl::RMSprop(RMSprop::new(config.learning_rate))
128 }
129 OptiRSOptimizerType::Adagrad => {
130 OptiRSOptimizerImpl::Adagrad(Adagrad::new(config.learning_rate))
131 }
132 };
133
134 Ok(optimizer)
135 }
136
137 pub fn optimize_step(
139 &mut self,
140 parameters: &[f64],
141 gradients: &[f64],
142 cost: f64,
143 ) -> Result<Vec<f64>> {
144 let params_array = Array1::from_vec(parameters.to_vec());
146 let mut grads_array = Array1::from_vec(gradients.to_vec());
147
148 if let Some(clip_norm) = self.config.gradient_clip_norm {
150 let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
151 if grad_norm > clip_norm {
152 grads_array = &grads_array * (clip_norm / grad_norm);
153 }
154 }
155
156 if self.config.l2_regularization > 0.0 {
158 grads_array = &grads_array + &(¶ms_array * self.config.l2_regularization);
159 }
160
161 let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
163 self.gradient_norms.push(grad_norm);
164
165 let new_params = match &mut self.optimizer {
167 OptiRSOptimizerImpl::SGD(opt) => opt
168 .step(¶ms_array, &grads_array)
169 .map_err(|e| SimulatorError::ComputationError(format!("SGD step failed: {e}")))?,
170 OptiRSOptimizerImpl::Adam(opt) => opt
171 .step(¶ms_array, &grads_array)
172 .map_err(|e| SimulatorError::ComputationError(format!("Adam step failed: {e}")))?,
173 OptiRSOptimizerImpl::RMSprop(opt) => {
174 opt.step(¶ms_array, &grads_array).map_err(|e| {
175 SimulatorError::ComputationError(format!("RMSprop step failed: {e}"))
176 })?
177 }
178 OptiRSOptimizerImpl::Adagrad(opt) => {
179 opt.step(¶ms_array, &grads_array).map_err(|e| {
180 SimulatorError::ComputationError(format!("Adagrad step failed: {e}"))
181 })?
182 }
183 };
184
185 let bounded_params = if let Some((min_val, max_val)) = self.config.parameter_bounds {
187 new_params.mapv(|p| p.clamp(min_val, max_val))
188 } else {
189 new_params
190 };
191
192 if cost < self.best_cost {
194 self.best_cost = cost;
195 self.best_parameters = Some(bounded_params.to_vec());
196 }
197
198 self.cost_history.push(cost);
200 self.iteration += 1;
201
202 Ok(bounded_params.to_vec())
203 }
204
205 pub fn has_converged(&self) -> bool {
207 if self.cost_history.len() < 2 {
208 return false;
209 }
210
211 let recent_costs = &self.cost_history[self.cost_history.len().saturating_sub(10)..];
212 if recent_costs.len() < 2 {
213 return false;
214 }
215
216 let cost_variance = {
217 let mean = recent_costs.iter().sum::<f64>() / recent_costs.len() as f64;
218 recent_costs
219 .iter()
220 .map(|&c| (c - mean).powi(2))
221 .sum::<f64>()
222 / recent_costs.len() as f64
223 };
224
225 cost_variance < self.config.convergence_tolerance
226 }
227
228 pub fn best_parameters(&self) -> Option<&[f64]> {
230 self.best_parameters.as_deref()
231 }
232
233 pub const fn best_cost(&self) -> f64 {
235 self.best_cost
236 }
237
238 pub fn cost_history(&self) -> &[f64] {
240 &self.cost_history
241 }
242
243 pub fn gradient_norms(&self) -> &[f64] {
245 &self.gradient_norms
246 }
247
248 pub const fn iteration(&self) -> usize {
250 self.iteration
251 }
252
253 pub fn reset(&mut self) {
255 self.optimizer = Self::create_optimizer(&self.config).unwrap();
257 self.iteration = 0;
258 self.best_parameters = None;
259 self.best_cost = f64::INFINITY;
260 self.cost_history.clear();
261 self.gradient_norms.clear();
262 }
263}
264
265#[derive(Debug, Clone, Serialize, Deserialize)]
267pub struct OptiRSOptimizationResult {
268 pub optimal_parameters: Vec<f64>,
270 pub optimal_cost: f64,
272 pub cost_history: Vec<f64>,
274 pub gradient_norms: Vec<f64>,
276 pub iterations: usize,
278 pub converged: bool,
280 pub optimization_time: Duration,
282}
283
284impl OptiRSOptimizationResult {
285 pub fn from_optimizer(
287 optimizer: &OptiRSQuantumOptimizer,
288 converged: bool,
289 optimization_time: Duration,
290 ) -> Self {
291 Self {
292 optimal_parameters: optimizer.best_parameters().unwrap_or(&[]).to_vec(),
293 optimal_cost: optimizer.best_cost(),
294 cost_history: optimizer.cost_history().to_vec(),
295 gradient_norms: optimizer.gradient_norms().to_vec(),
296 iterations: optimizer.iteration(),
297 converged,
298 optimization_time,
299 }
300 }
301}
302
303#[cfg(test)]
304mod tests {
305 use super::*;
306
307 #[test]
308 fn test_optirs_optimizer_creation() {
309 let config = OptiRSConfig::default();
310 let optimizer = OptiRSQuantumOptimizer::new(config);
311 assert!(optimizer.is_ok());
312 }
313
314 #[test]
315 fn test_optirs_sgd_optimizer() {
316 let config = OptiRSConfig {
317 optimizer_type: OptiRSOptimizerType::SGD { momentum: true },
318 ..Default::default()
319 };
320 let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
321
322 let params = vec![1.0, 2.0, 3.0];
323 let grads = vec![0.1, 0.2, 0.15];
324 let cost = 1.5;
325
326 let new_params = optimizer.optimize_step(¶ms, &grads, cost).unwrap();
327 assert_eq!(new_params.len(), params.len());
328 }
329
330 #[test]
331 fn test_optirs_adam_optimizer() {
332 let config = OptiRSConfig {
333 optimizer_type: OptiRSOptimizerType::Adam,
334 learning_rate: 0.001,
335 ..Default::default()
336 };
337 let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
338
339 let params = vec![0.5, 1.5, 2.5];
340 let grads = vec![0.05, 0.15, 0.1];
341 let cost = 2.3;
342
343 let new_params = optimizer.optimize_step(¶ms, &grads, cost).unwrap();
344 assert_eq!(new_params.len(), params.len());
345 assert_eq!(optimizer.iteration(), 1);
346 }
347
348 #[test]
349 fn test_optirs_convergence_check() {
350 let config = OptiRSConfig {
351 convergence_tolerance: 1e-6,
352 ..Default::default()
353 };
354 let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
355
356 assert!(!optimizer.has_converged());
358
359 for _ in 0..15 {
361 let params = vec![1.0];
362 let grads = vec![0.001];
363 optimizer.optimize_step(¶ms, &grads, 1.0).unwrap();
364 }
365
366 assert!(optimizer.has_converged());
368 }
369
370 #[test]
371 fn test_optirs_parameter_bounds() {
372 let config = OptiRSConfig {
373 parameter_bounds: Some((-1.0, 1.0)),
374 learning_rate: 10.0, ..Default::default()
376 };
377 let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
378
379 let params = vec![0.9];
380 let grads = vec![-1.0]; let cost = 1.0;
382
383 let new_params = optimizer.optimize_step(¶ms, &grads, cost).unwrap();
384 assert!(new_params[0] <= 1.0);
385 assert!(new_params[0] >= -1.0);
386 }
387
388 #[test]
389 fn test_optirs_gradient_clipping() {
390 let config = OptiRSConfig {
391 gradient_clip_norm: Some(0.5),
392 ..Default::default()
393 };
394 let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
395
396 let params = vec![1.0, 1.0];
397 let large_grads = vec![10.0, 10.0]; let cost = 1.0;
399
400 let new_params = optimizer
402 .optimize_step(¶ms, &large_grads, cost)
403 .unwrap();
404 assert_eq!(new_params.len(), params.len());
405 }
406
407 #[test]
408 fn test_optirs_reset() {
409 let config = OptiRSConfig::default();
410 let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
411
412 for _ in 0..5 {
414 let params = vec![1.0];
415 let grads = vec![0.1];
416 optimizer.optimize_step(¶ms, &grads, 1.0).unwrap();
417 }
418
419 assert_eq!(optimizer.iteration(), 5);
420
421 optimizer.reset();
423
424 assert_eq!(optimizer.iteration(), 0);
425 assert_eq!(optimizer.cost_history().len(), 0);
426 }
427
428 #[test]
429 fn test_all_optimizer_types() {
430 let optimizers = vec![
431 OptiRSOptimizerType::SGD { momentum: false },
432 OptiRSOptimizerType::SGD { momentum: true },
433 OptiRSOptimizerType::Adam,
434 OptiRSOptimizerType::RMSprop,
435 OptiRSOptimizerType::Adagrad,
436 ];
437
438 for opt_type in optimizers {
439 let config = OptiRSConfig {
440 optimizer_type: opt_type,
441 ..Default::default()
442 };
443 let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
444
445 let params = vec![1.0, 2.0];
446 let grads = vec![0.1, 0.2];
447 let cost = 1.0;
448
449 let result = optimizer.optimize_step(¶ms, &grads, cost);
450 assert!(result.is_ok(), "Failed for optimizer {opt_type:?}");
451 }
452 }
453}