quantrs2_sim/
optirs_integration.rs

1//! `OptiRS` Integration for Quantum Variational Algorithms
2//!
3//! This module provides integration between `OptiRS` optimizers and `QuantRS2` variational
4//! quantum algorithms (VQE, QAOA, etc.). It bridges `OptiRS`'s state-of-the-art ML
5//! optimization algorithms with quantum circuit parameter optimization.
6//!
7//! # Features
8//! - Production-ready optimizers from `OptiRS` (Adam, SGD, `RMSprop`, Adagrad)
9//! - Gradient-based optimization for VQE/QAOA
10//! - Learning rate scheduling
11//! - Gradient clipping and regularization
12//! - Hardware-aware parameter optimization
13//! - Performance metrics and monitoring
14
15use crate::error::{Result, SimulatorError};
16use scirs2_core::ndarray::Array1;
17use serde::{Deserialize, Serialize};
18use std::time::Duration;
19
20// Import OptiRS optimizers with proper type bounds
21use optirs_core::optimizers::{Adagrad, Adam, Optimizer, RMSprop, SGD};
22
23/// `OptiRS` optimizer types available for quantum optimization
24#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
25pub enum OptiRSOptimizerType {
26    /// Stochastic Gradient Descent with momentum
27    SGD { momentum: bool },
28    /// Adam optimizer (Adaptive Moment Estimation)
29    Adam,
30    /// `RMSprop` optimizer
31    RMSprop,
32    /// Adagrad optimizer
33    Adagrad,
34}
35
36/// `OptiRS` optimizer configuration for quantum algorithms
37#[derive(Debug, Clone)]
38pub struct OptiRSConfig {
39    /// Optimizer type
40    pub optimizer_type: OptiRSOptimizerType,
41    /// Initial learning rate
42    pub learning_rate: f64,
43    /// Gradient clipping threshold
44    pub gradient_clip_norm: Option<f64>,
45    /// L2 regularization strength
46    pub l2_regularization: f64,
47    /// Maximum number of iterations
48    pub max_iterations: usize,
49    /// Convergence tolerance
50    pub convergence_tolerance: f64,
51    /// Enable parameter bounds
52    pub parameter_bounds: Option<(f64, f64)>,
53    /// Momentum (for SGD)
54    pub momentum: f64,
55}
56
57impl Default for OptiRSConfig {
58    fn default() -> Self {
59        Self {
60            optimizer_type: OptiRSOptimizerType::Adam,
61            learning_rate: 0.01,
62            gradient_clip_norm: Some(1.0),
63            l2_regularization: 0.0,
64            max_iterations: 1000,
65            convergence_tolerance: 1e-6,
66            parameter_bounds: Some((-std::f64::consts::PI, std::f64::consts::PI)),
67            momentum: 0.9,
68        }
69    }
70}
71
72/// `OptiRS` quantum optimizer wrapper
73pub struct OptiRSQuantumOptimizer {
74    /// Configuration
75    config: OptiRSConfig,
76    /// Underlying `OptiRS` optimizer
77    optimizer: OptiRSOptimizerImpl,
78    /// Current iteration
79    iteration: usize,
80    /// Best parameters seen
81    best_parameters: Option<Vec<f64>>,
82    /// Best cost seen
83    best_cost: f64,
84    /// Optimization history
85    cost_history: Vec<f64>,
86    /// Gradient norms history
87    gradient_norms: Vec<f64>,
88}
89
90/// Internal optimizer implementation enum
91enum OptiRSOptimizerImpl {
92    SGD(SGD<f64>),
93    Adam(Adam<f64>),
94    RMSprop(RMSprop<f64>),
95    Adagrad(Adagrad<f64>),
96}
97
98impl OptiRSQuantumOptimizer {
99    /// Create a new `OptiRS` quantum optimizer
100    pub fn new(config: OptiRSConfig) -> Result<Self> {
101        let optimizer = Self::create_optimizer(&config)?;
102
103        Ok(Self {
104            config,
105            optimizer,
106            iteration: 0,
107            best_parameters: None,
108            best_cost: f64::INFINITY,
109            cost_history: Vec::new(),
110            gradient_norms: Vec::new(),
111        })
112    }
113
114    /// Create the underlying `OptiRS` optimizer
115    fn create_optimizer(config: &OptiRSConfig) -> Result<OptiRSOptimizerImpl> {
116        let optimizer = match config.optimizer_type {
117            OptiRSOptimizerType::SGD { momentum } => {
118                let sgd = SGD::new(config.learning_rate);
119                if momentum {
120                    OptiRSOptimizerImpl::SGD(sgd.with_momentum(config.momentum))
121                } else {
122                    OptiRSOptimizerImpl::SGD(sgd)
123                }
124            }
125            OptiRSOptimizerType::Adam => OptiRSOptimizerImpl::Adam(Adam::new(config.learning_rate)),
126            OptiRSOptimizerType::RMSprop => {
127                OptiRSOptimizerImpl::RMSprop(RMSprop::new(config.learning_rate))
128            }
129            OptiRSOptimizerType::Adagrad => {
130                OptiRSOptimizerImpl::Adagrad(Adagrad::new(config.learning_rate))
131            }
132        };
133
134        Ok(optimizer)
135    }
136
137    /// Perform one optimization step
138    pub fn optimize_step(
139        &mut self,
140        parameters: &[f64],
141        gradients: &[f64],
142        cost: f64,
143    ) -> Result<Vec<f64>> {
144        // Convert to ndarray
145        let params_array = Array1::from_vec(parameters.to_vec());
146        let mut grads_array = Array1::from_vec(gradients.to_vec());
147
148        // Apply gradient clipping
149        if let Some(clip_norm) = self.config.gradient_clip_norm {
150            let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
151            if grad_norm > clip_norm {
152                grads_array = &grads_array * (clip_norm / grad_norm);
153            }
154        }
155
156        // Apply L2 regularization
157        if self.config.l2_regularization > 0.0 {
158            grads_array = &grads_array + &(&params_array * self.config.l2_regularization);
159        }
160
161        // Compute gradient norm for tracking
162        let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
163        self.gradient_norms.push(grad_norm);
164
165        // Perform optimization step based on optimizer type
166        let new_params = match &mut self.optimizer {
167            OptiRSOptimizerImpl::SGD(opt) => opt
168                .step(&params_array, &grads_array)
169                .map_err(|e| SimulatorError::ComputationError(format!("SGD step failed: {e}")))?,
170            OptiRSOptimizerImpl::Adam(opt) => opt
171                .step(&params_array, &grads_array)
172                .map_err(|e| SimulatorError::ComputationError(format!("Adam step failed: {e}")))?,
173            OptiRSOptimizerImpl::RMSprop(opt) => {
174                opt.step(&params_array, &grads_array).map_err(|e| {
175                    SimulatorError::ComputationError(format!("RMSprop step failed: {e}"))
176                })?
177            }
178            OptiRSOptimizerImpl::Adagrad(opt) => {
179                opt.step(&params_array, &grads_array).map_err(|e| {
180                    SimulatorError::ComputationError(format!("Adagrad step failed: {e}"))
181                })?
182            }
183        };
184
185        // Apply parameter bounds if specified
186        let bounded_params = if let Some((min_val, max_val)) = self.config.parameter_bounds {
187            new_params.mapv(|p| p.clamp(min_val, max_val))
188        } else {
189            new_params
190        };
191
192        // Update best parameters
193        if cost < self.best_cost {
194            self.best_cost = cost;
195            self.best_parameters = Some(bounded_params.to_vec());
196        }
197
198        // Update history
199        self.cost_history.push(cost);
200        self.iteration += 1;
201
202        Ok(bounded_params.to_vec())
203    }
204
205    /// Check if optimization has converged
206    #[must_use]
207    pub fn has_converged(&self) -> bool {
208        if self.cost_history.len() < 2 {
209            return false;
210        }
211
212        let recent_costs = &self.cost_history[self.cost_history.len().saturating_sub(10)..];
213        if recent_costs.len() < 2 {
214            return false;
215        }
216
217        let cost_variance = {
218            let mean = recent_costs.iter().sum::<f64>() / recent_costs.len() as f64;
219            recent_costs
220                .iter()
221                .map(|&c| (c - mean).powi(2))
222                .sum::<f64>()
223                / recent_costs.len() as f64
224        };
225
226        cost_variance < self.config.convergence_tolerance
227    }
228
229    /// Get the best parameters found
230    #[must_use]
231    pub fn best_parameters(&self) -> Option<&[f64]> {
232        self.best_parameters.as_deref()
233    }
234
235    /// Get the best cost found
236    #[must_use]
237    pub const fn best_cost(&self) -> f64 {
238        self.best_cost
239    }
240
241    /// Get the cost history
242    #[must_use]
243    pub fn cost_history(&self) -> &[f64] {
244        &self.cost_history
245    }
246
247    /// Get the gradient norms history
248    #[must_use]
249    pub fn gradient_norms(&self) -> &[f64] {
250        &self.gradient_norms
251    }
252
253    /// Get the current iteration
254    #[must_use]
255    pub const fn iteration(&self) -> usize {
256        self.iteration
257    }
258
259    /// Reset the optimizer state by recreating it
260    pub fn reset(&mut self) -> Result<()> {
261        // Recreate the optimizer to reset its state
262        self.optimizer = Self::create_optimizer(&self.config)?;
263        self.iteration = 0;
264        self.best_parameters = None;
265        self.best_cost = f64::INFINITY;
266        self.cost_history.clear();
267        self.gradient_norms.clear();
268        Ok(())
269    }
270}
271
272/// `OptiRS` optimization result
273#[derive(Debug, Clone, Serialize, Deserialize)]
274pub struct OptiRSOptimizationResult {
275    /// Optimal parameters found
276    pub optimal_parameters: Vec<f64>,
277    /// Final cost function value
278    pub optimal_cost: f64,
279    /// Optimization history
280    pub cost_history: Vec<f64>,
281    /// Gradient norms history
282    pub gradient_norms: Vec<f64>,
283    /// Number of iterations performed
284    pub iterations: usize,
285    /// Convergence flag
286    pub converged: bool,
287    /// Total optimization time
288    pub optimization_time: Duration,
289}
290
291impl OptiRSOptimizationResult {
292    /// Create a new result from the optimizer
293    #[must_use]
294    pub fn from_optimizer(
295        optimizer: &OptiRSQuantumOptimizer,
296        converged: bool,
297        optimization_time: Duration,
298    ) -> Self {
299        Self {
300            optimal_parameters: optimizer.best_parameters().unwrap_or(&[]).to_vec(),
301            optimal_cost: optimizer.best_cost(),
302            cost_history: optimizer.cost_history().to_vec(),
303            gradient_norms: optimizer.gradient_norms().to_vec(),
304            iterations: optimizer.iteration(),
305            converged,
306            optimization_time,
307        }
308    }
309}
310
311#[cfg(test)]
312mod tests {
313    use super::*;
314
315    #[test]
316    fn test_optirs_optimizer_creation() {
317        let config = OptiRSConfig::default();
318        let optimizer = OptiRSQuantumOptimizer::new(config);
319        assert!(optimizer.is_ok());
320    }
321
322    #[test]
323    fn test_optirs_sgd_optimizer() {
324        let config = OptiRSConfig {
325            optimizer_type: OptiRSOptimizerType::SGD { momentum: true },
326            ..Default::default()
327        };
328        let mut optimizer =
329            OptiRSQuantumOptimizer::new(config).expect("Failed to create SGD optimizer");
330
331        let params = vec![1.0, 2.0, 3.0];
332        let grads = vec![0.1, 0.2, 0.15];
333        let cost = 1.5;
334
335        let new_params = optimizer
336            .optimize_step(&params, &grads, cost)
337            .expect("Failed to perform optimization step");
338        assert_eq!(new_params.len(), params.len());
339    }
340
341    #[test]
342    fn test_optirs_adam_optimizer() {
343        let config = OptiRSConfig {
344            optimizer_type: OptiRSOptimizerType::Adam,
345            learning_rate: 0.001,
346            ..Default::default()
347        };
348        let mut optimizer =
349            OptiRSQuantumOptimizer::new(config).expect("Failed to create Adam optimizer");
350
351        let params = vec![0.5, 1.5, 2.5];
352        let grads = vec![0.05, 0.15, 0.1];
353        let cost = 2.3;
354
355        let new_params = optimizer
356            .optimize_step(&params, &grads, cost)
357            .expect("Failed to perform optimization step");
358        assert_eq!(new_params.len(), params.len());
359        assert_eq!(optimizer.iteration(), 1);
360    }
361
362    #[test]
363    fn test_optirs_convergence_check() {
364        let config = OptiRSConfig {
365            convergence_tolerance: 1e-6,
366            ..Default::default()
367        };
368        let mut optimizer =
369            OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
370
371        // Should not converge initially
372        assert!(!optimizer.has_converged());
373
374        // Add stable cost history
375        for _ in 0..15 {
376            let params = vec![1.0];
377            let grads = vec![0.001];
378            optimizer
379                .optimize_step(&params, &grads, 1.0)
380                .expect("Failed to perform optimization step");
381        }
382
383        // Should converge with stable costs
384        assert!(optimizer.has_converged());
385    }
386
387    #[test]
388    fn test_optirs_parameter_bounds() {
389        let config = OptiRSConfig {
390            parameter_bounds: Some((-1.0, 1.0)),
391            learning_rate: 10.0, // Large LR to test bounds
392            ..Default::default()
393        };
394        let mut optimizer =
395            OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
396
397        let params = vec![0.9];
398        let grads = vec![-1.0]; // Would push parameter > 1.0 without bounds
399        let cost = 1.0;
400
401        let new_params = optimizer
402            .optimize_step(&params, &grads, cost)
403            .expect("Failed to perform optimization step");
404        assert!(new_params[0] <= 1.0);
405        assert!(new_params[0] >= -1.0);
406    }
407
408    #[test]
409    fn test_optirs_gradient_clipping() {
410        let config = OptiRSConfig {
411            gradient_clip_norm: Some(0.5),
412            ..Default::default()
413        };
414        let mut optimizer =
415            OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
416
417        let params = vec![1.0, 1.0];
418        let large_grads = vec![10.0, 10.0]; // Norm = sqrt(200) >> 0.5
419        let cost = 1.0;
420
421        // Should not crash with large gradients
422        let new_params = optimizer
423            .optimize_step(&params, &large_grads, cost)
424            .expect("Failed to perform optimization step");
425        assert_eq!(new_params.len(), params.len());
426    }
427
428    #[test]
429    fn test_optirs_reset() {
430        let config = OptiRSConfig::default();
431        let mut optimizer =
432            OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
433
434        // Perform some steps
435        for _ in 0..5 {
436            let params = vec![1.0];
437            let grads = vec![0.1];
438            optimizer
439                .optimize_step(&params, &grads, 1.0)
440                .expect("Failed to perform optimization step");
441        }
442
443        assert_eq!(optimizer.iteration(), 5);
444
445        // Reset
446        optimizer.reset().expect("Failed to reset optimizer");
447
448        assert_eq!(optimizer.iteration(), 0);
449        assert_eq!(optimizer.cost_history().len(), 0);
450    }
451
452    #[test]
453    fn test_all_optimizer_types() {
454        let optimizers = vec![
455            OptiRSOptimizerType::SGD { momentum: false },
456            OptiRSOptimizerType::SGD { momentum: true },
457            OptiRSOptimizerType::Adam,
458            OptiRSOptimizerType::RMSprop,
459            OptiRSOptimizerType::Adagrad,
460        ];
461
462        for opt_type in optimizers {
463            let config = OptiRSConfig {
464                optimizer_type: opt_type,
465                ..Default::default()
466            };
467            let mut optimizer =
468                OptiRSQuantumOptimizer::new(config).expect("Failed to create optimizer");
469
470            let params = vec![1.0, 2.0];
471            let grads = vec![0.1, 0.2];
472            let cost = 1.0;
473
474            let result = optimizer.optimize_step(&params, &grads, cost);
475            assert!(result.is_ok(), "Failed for optimizer {opt_type:?}");
476        }
477    }
478}