1use crate::error::{Result, SimulatorError};
16use scirs2_core::ndarray::Array1;
17use serde::{Deserialize, Serialize};
18use std::time::Duration;
19
20use optirs_core::optimizers::{Adagrad, Adam, Optimizer, RMSprop, SGD};
22
23#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
25pub enum OptiRSOptimizerType {
26 SGD { momentum: bool },
28 Adam,
30 RMSprop,
32 Adagrad,
34}
35
36#[derive(Debug, Clone)]
38pub struct OptiRSConfig {
39 pub optimizer_type: OptiRSOptimizerType,
41 pub learning_rate: f64,
43 pub gradient_clip_norm: Option<f64>,
45 pub l2_regularization: f64,
47 pub max_iterations: usize,
49 pub convergence_tolerance: f64,
51 pub parameter_bounds: Option<(f64, f64)>,
53 pub momentum: f64,
55}
56
57impl Default for OptiRSConfig {
58 fn default() -> Self {
59 Self {
60 optimizer_type: OptiRSOptimizerType::Adam,
61 learning_rate: 0.01,
62 gradient_clip_norm: Some(1.0),
63 l2_regularization: 0.0,
64 max_iterations: 1000,
65 convergence_tolerance: 1e-6,
66 parameter_bounds: Some((-std::f64::consts::PI, std::f64::consts::PI)),
67 momentum: 0.9,
68 }
69 }
70}
71
72pub struct OptiRSQuantumOptimizer {
74 config: OptiRSConfig,
76 optimizer: OptiRSOptimizerImpl,
78 iteration: usize,
80 best_parameters: Option<Vec<f64>>,
82 best_cost: f64,
84 cost_history: Vec<f64>,
86 gradient_norms: Vec<f64>,
88}
89
90enum OptiRSOptimizerImpl {
92 SGD(SGD<f64>),
93 Adam(Adam<f64>),
94 RMSprop(RMSprop<f64>),
95 Adagrad(Adagrad<f64>),
96}
97
98impl OptiRSQuantumOptimizer {
99 pub fn new(config: OptiRSConfig) -> Result<Self> {
101 let optimizer = Self::create_optimizer(&config)?;
102
103 Ok(Self {
104 config,
105 optimizer,
106 iteration: 0,
107 best_parameters: None,
108 best_cost: f64::INFINITY,
109 cost_history: Vec::new(),
110 gradient_norms: Vec::new(),
111 })
112 }
113
114 fn create_optimizer(config: &OptiRSConfig) -> Result<OptiRSOptimizerImpl> {
116 let optimizer = match config.optimizer_type {
117 OptiRSOptimizerType::SGD { momentum } => {
118 let sgd = SGD::new(config.learning_rate);
119 if momentum {
120 OptiRSOptimizerImpl::SGD(sgd.with_momentum(config.momentum))
121 } else {
122 OptiRSOptimizerImpl::SGD(sgd)
123 }
124 }
125 OptiRSOptimizerType::Adam => OptiRSOptimizerImpl::Adam(Adam::new(config.learning_rate)),
126 OptiRSOptimizerType::RMSprop => {
127 OptiRSOptimizerImpl::RMSprop(RMSprop::new(config.learning_rate))
128 }
129 OptiRSOptimizerType::Adagrad => {
130 OptiRSOptimizerImpl::Adagrad(Adagrad::new(config.learning_rate))
131 }
132 };
133
134 Ok(optimizer)
135 }
136
137 pub fn optimize_step(
139 &mut self,
140 parameters: &[f64],
141 gradients: &[f64],
142 cost: f64,
143 ) -> Result<Vec<f64>> {
144 let params_array = Array1::from_vec(parameters.to_vec());
146 let mut grads_array = Array1::from_vec(gradients.to_vec());
147
148 if let Some(clip_norm) = self.config.gradient_clip_norm {
150 let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
151 if grad_norm > clip_norm {
152 grads_array = &grads_array * (clip_norm / grad_norm);
153 }
154 }
155
156 if self.config.l2_regularization > 0.0 {
158 grads_array = &grads_array + &(¶ms_array * self.config.l2_regularization);
159 }
160
161 let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
163 self.gradient_norms.push(grad_norm);
164
165 let new_params = match &mut self.optimizer {
167 OptiRSOptimizerImpl::SGD(opt) => opt
168 .step(¶ms_array, &grads_array)
169 .map_err(|e| SimulatorError::ComputationError(format!("SGD step failed: {e}")))?,
170 OptiRSOptimizerImpl::Adam(opt) => opt
171 .step(¶ms_array, &grads_array)
172 .map_err(|e| SimulatorError::ComputationError(format!("Adam step failed: {e}")))?,
173 OptiRSOptimizerImpl::RMSprop(opt) => {
174 opt.step(¶ms_array, &grads_array).map_err(|e| {
175 SimulatorError::ComputationError(format!("RMSprop step failed: {e}"))
176 })?
177 }
178 OptiRSOptimizerImpl::Adagrad(opt) => {
179 opt.step(¶ms_array, &grads_array).map_err(|e| {
180 SimulatorError::ComputationError(format!("Adagrad step failed: {e}"))
181 })?
182 }
183 };
184
185 let bounded_params = if let Some((min_val, max_val)) = self.config.parameter_bounds {
187 new_params.mapv(|p| p.clamp(min_val, max_val))
188 } else {
189 new_params
190 };
191
192 if cost < self.best_cost {
194 self.best_cost = cost;
195 self.best_parameters = Some(bounded_params.to_vec());
196 }
197
198 self.cost_history.push(cost);
200 self.iteration += 1;
201
202 Ok(bounded_params.to_vec())
203 }
204
205 #[must_use]
207 pub fn has_converged(&self) -> bool {
208 if self.cost_history.len() < 2 {
209 return false;
210 }
211
212 let recent_costs = &self.cost_history[self.cost_history.len().saturating_sub(10)..];
213 if recent_costs.len() < 2 {
214 return false;
215 }
216
217 let cost_variance = {
218 let mean = recent_costs.iter().sum::<f64>() / recent_costs.len() as f64;
219 recent_costs
220 .iter()
221 .map(|&c| (c - mean).powi(2))
222 .sum::<f64>()
223 / recent_costs.len() as f64
224 };
225
226 cost_variance < self.config.convergence_tolerance
227 }
228
229 #[must_use]
231 pub fn best_parameters(&self) -> Option<&[f64]> {
232 self.best_parameters.as_deref()
233 }
234
235 #[must_use]
237 pub const fn best_cost(&self) -> f64 {
238 self.best_cost
239 }
240
241 #[must_use]
243 pub fn cost_history(&self) -> &[f64] {
244 &self.cost_history
245 }
246
247 #[must_use]
249 pub fn gradient_norms(&self) -> &[f64] {
250 &self.gradient_norms
251 }
252
253 #[must_use]
255 pub const fn iteration(&self) -> usize {
256 self.iteration
257 }
258
259 pub fn reset(&mut self) -> Result<()> {
261 self.optimizer = Self::create_optimizer(&self.config)?;
263 self.iteration = 0;
264 self.best_parameters = None;
265 self.best_cost = f64::INFINITY;
266 self.cost_history.clear();
267 self.gradient_norms.clear();
268 Ok(())
269 }
270}
271
272#[derive(Debug, Clone, Serialize, Deserialize)]
274pub struct OptiRSOptimizationResult {
275 pub optimal_parameters: Vec<f64>,
277 pub optimal_cost: f64,
279 pub cost_history: Vec<f64>,
281 pub gradient_norms: Vec<f64>,
283 pub iterations: usize,
285 pub converged: bool,
287 pub optimization_time: Duration,
289}
290
291impl OptiRSOptimizationResult {
292 #[must_use]
294 pub fn from_optimizer(
295 optimizer: &OptiRSQuantumOptimizer,
296 converged: bool,
297 optimization_time: Duration,
298 ) -> Self {
299 Self {
300 optimal_parameters: optimizer.best_parameters().unwrap_or(&[]).to_vec(),
301 optimal_cost: optimizer.best_cost(),
302 cost_history: optimizer.cost_history().to_vec(),
303 gradient_norms: optimizer.gradient_norms().to_vec(),
304 iterations: optimizer.iteration(),
305 converged,
306 optimization_time,
307 }
308 }
309}
310
311#[cfg(test)]
312mod tests {
313 use super::*;
314
315 #[test]
316 fn test_optirs_optimizer_creation() {
317 let config = OptiRSConfig::default();
318 let optimizer = OptiRSQuantumOptimizer::new(config);
319 assert!(optimizer.is_ok());
320 }
321
322 #[test]
323 fn test_optirs_sgd_optimizer() {
324 let config = OptiRSConfig {
325 optimizer_type: OptiRSOptimizerType::SGD { momentum: true },
326 ..Default::default()
327 };
328 let mut optimizer =
329 OptiRSQuantumOptimizer::new(config).expect("Failed to create SGD optimizer");
330
331 let params = vec![1.0, 2.0, 3.0];
332 let grads = vec![0.1, 0.2, 0.15];
333 let cost = 1.5;
334
335 let new_params = optimizer
336 .optimize_step(¶ms, &grads, cost)
337 .expect("Failed to perform optimization step");
338 assert_eq!(new_params.len(), params.len());
339 }
340
341 #[test]
342 fn test_optirs_adam_optimizer() {
343 let config = OptiRSConfig {
344 optimizer_type: OptiRSOptimizerType::Adam,
345 learning_rate: 0.001,
346 ..Default::default()
347 };
348 let mut optimizer =
349 OptiRSQuantumOptimizer::new(config).expect("Failed to create Adam optimizer");
350
351 let params = vec![0.5, 1.5, 2.5];
352 let grads = vec![0.05, 0.15, 0.1];
353 let cost = 2.3;
354
355 let new_params = optimizer
356 .optimize_step(¶ms, &grads, cost)
357 .expect("Failed to perform optimization step");
358 assert_eq!(new_params.len(), params.len());
359 assert_eq!(optimizer.iteration(), 1);
360 }
361
362 #[test]
363 fn test_optirs_convergence_check() {
364 let config = OptiRSConfig {
365 convergence_tolerance: 1e-6,
366 ..Default::default()
367 };
368 let mut optimizer =
369 OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
370
371 assert!(!optimizer.has_converged());
373
374 for _ in 0..15 {
376 let params = vec![1.0];
377 let grads = vec![0.001];
378 optimizer
379 .optimize_step(¶ms, &grads, 1.0)
380 .expect("Failed to perform optimization step");
381 }
382
383 assert!(optimizer.has_converged());
385 }
386
387 #[test]
388 fn test_optirs_parameter_bounds() {
389 let config = OptiRSConfig {
390 parameter_bounds: Some((-1.0, 1.0)),
391 learning_rate: 10.0, ..Default::default()
393 };
394 let mut optimizer =
395 OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
396
397 let params = vec![0.9];
398 let grads = vec![-1.0]; let cost = 1.0;
400
401 let new_params = optimizer
402 .optimize_step(¶ms, &grads, cost)
403 .expect("Failed to perform optimization step");
404 assert!(new_params[0] <= 1.0);
405 assert!(new_params[0] >= -1.0);
406 }
407
408 #[test]
409 fn test_optirs_gradient_clipping() {
410 let config = OptiRSConfig {
411 gradient_clip_norm: Some(0.5),
412 ..Default::default()
413 };
414 let mut optimizer =
415 OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
416
417 let params = vec![1.0, 1.0];
418 let large_grads = vec![10.0, 10.0]; let cost = 1.0;
420
421 let new_params = optimizer
423 .optimize_step(¶ms, &large_grads, cost)
424 .expect("Failed to perform optimization step");
425 assert_eq!(new_params.len(), params.len());
426 }
427
428 #[test]
429 fn test_optirs_reset() {
430 let config = OptiRSConfig::default();
431 let mut optimizer =
432 OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
433
434 for _ in 0..5 {
436 let params = vec![1.0];
437 let grads = vec![0.1];
438 optimizer
439 .optimize_step(¶ms, &grads, 1.0)
440 .expect("Failed to perform optimization step");
441 }
442
443 assert_eq!(optimizer.iteration(), 5);
444
445 optimizer.reset().expect("Failed to reset optimizer");
447
448 assert_eq!(optimizer.iteration(), 0);
449 assert_eq!(optimizer.cost_history().len(), 0);
450 }
451
452 #[test]
453 fn test_all_optimizer_types() {
454 let optimizers = vec![
455 OptiRSOptimizerType::SGD { momentum: false },
456 OptiRSOptimizerType::SGD { momentum: true },
457 OptiRSOptimizerType::Adam,
458 OptiRSOptimizerType::RMSprop,
459 OptiRSOptimizerType::Adagrad,
460 ];
461
462 for opt_type in optimizers {
463 let config = OptiRSConfig {
464 optimizer_type: opt_type,
465 ..Default::default()
466 };
467 let mut optimizer =
468 OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
469
470 let params = vec![1.0, 2.0];
471 let grads = vec![0.1, 0.2];
472 let cost = 1.0;
473
474 let result = optimizer.optimize_step(¶ms, &grads, cost);
475 assert!(result.is_ok(), "Failed for optimizer {opt_type:?}");
476 }
477 }
478}