quantrs2_sim/
optirs_integration.rs

1//! OptiRS Integration for Quantum Variational Algorithms
2//!
3//! This module provides integration between OptiRS optimizers and QuantRS2 variational
4//! quantum algorithms (VQE, QAOA, etc.). It bridges OptiRS's state-of-the-art ML
5//! optimization algorithms with quantum circuit parameter optimization.
6//!
7//! # Features
8//! - Production-ready optimizers from OptiRS (Adam, SGD, RMSprop, Adagrad)
9//! - Gradient-based optimization for VQE/QAOA
10//! - Learning rate scheduling
11//! - Gradient clipping and regularization
12//! - Hardware-aware parameter optimization
13//! - Performance metrics and monitoring
14
15use crate::error::{Result, SimulatorError};
16use scirs2_core::ndarray::Array1;
17use serde::{Deserialize, Serialize};
18use std::time::Duration;
19
20// Import OptiRS optimizers with proper type bounds
21use optirs_core::optimizers::{Adagrad, Adam, Optimizer, RMSprop, SGD};
22
23/// OptiRS optimizer types available for quantum optimization
24#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
25pub enum OptiRSOptimizerType {
26    /// Stochastic Gradient Descent with momentum
27    SGD { momentum: bool },
28    /// Adam optimizer (Adaptive Moment Estimation)
29    Adam,
30    /// RMSprop optimizer
31    RMSprop,
32    /// Adagrad optimizer
33    Adagrad,
34}
35
36/// OptiRS optimizer configuration for quantum algorithms
37#[derive(Debug, Clone)]
38pub struct OptiRSConfig {
39    /// Optimizer type
40    pub optimizer_type: OptiRSOptimizerType,
41    /// Initial learning rate
42    pub learning_rate: f64,
43    /// Gradient clipping threshold
44    pub gradient_clip_norm: Option<f64>,
45    /// L2 regularization strength
46    pub l2_regularization: f64,
47    /// Maximum number of iterations
48    pub max_iterations: usize,
49    /// Convergence tolerance
50    pub convergence_tolerance: f64,
51    /// Enable parameter bounds
52    pub parameter_bounds: Option<(f64, f64)>,
53    /// Momentum (for SGD)
54    pub momentum: f64,
55}
56
57impl Default for OptiRSConfig {
58    fn default() -> Self {
59        Self {
60            optimizer_type: OptiRSOptimizerType::Adam,
61            learning_rate: 0.01,
62            gradient_clip_norm: Some(1.0),
63            l2_regularization: 0.0,
64            max_iterations: 1000,
65            convergence_tolerance: 1e-6,
66            parameter_bounds: Some((-std::f64::consts::PI, std::f64::consts::PI)),
67            momentum: 0.9,
68        }
69    }
70}
71
72/// OptiRS quantum optimizer wrapper
73pub struct OptiRSQuantumOptimizer {
74    /// Configuration
75    config: OptiRSConfig,
76    /// Underlying OptiRS optimizer
77    optimizer: OptiRSOptimizerImpl,
78    /// Current iteration
79    iteration: usize,
80    /// Best parameters seen
81    best_parameters: Option<Vec<f64>>,
82    /// Best cost seen
83    best_cost: f64,
84    /// Optimization history
85    cost_history: Vec<f64>,
86    /// Gradient norms history
87    gradient_norms: Vec<f64>,
88}
89
90/// Internal optimizer implementation enum
91enum OptiRSOptimizerImpl {
92    SGD(SGD<f64>),
93    Adam(Adam<f64>),
94    RMSprop(RMSprop<f64>),
95    Adagrad(Adagrad<f64>),
96}
97
98impl OptiRSQuantumOptimizer {
99    /// Create a new OptiRS quantum optimizer
100    pub fn new(config: OptiRSConfig) -> Result<Self> {
101        let optimizer = Self::create_optimizer(&config)?;
102
103        Ok(Self {
104            config,
105            optimizer,
106            iteration: 0,
107            best_parameters: None,
108            best_cost: f64::INFINITY,
109            cost_history: Vec::new(),
110            gradient_norms: Vec::new(),
111        })
112    }
113
114    /// Create the underlying OptiRS optimizer
115    fn create_optimizer(config: &OptiRSConfig) -> Result<OptiRSOptimizerImpl> {
116        let optimizer = match config.optimizer_type {
117            OptiRSOptimizerType::SGD { momentum } => {
118                let sgd = SGD::new(config.learning_rate);
119                if momentum {
120                    OptiRSOptimizerImpl::SGD(sgd.with_momentum(config.momentum))
121                } else {
122                    OptiRSOptimizerImpl::SGD(sgd)
123                }
124            }
125            OptiRSOptimizerType::Adam => OptiRSOptimizerImpl::Adam(Adam::new(config.learning_rate)),
126            OptiRSOptimizerType::RMSprop => {
127                OptiRSOptimizerImpl::RMSprop(RMSprop::new(config.learning_rate))
128            }
129            OptiRSOptimizerType::Adagrad => {
130                OptiRSOptimizerImpl::Adagrad(Adagrad::new(config.learning_rate))
131            }
132        };
133
134        Ok(optimizer)
135    }
136
137    /// Perform one optimization step
138    pub fn optimize_step(
139        &mut self,
140        parameters: &[f64],
141        gradients: &[f64],
142        cost: f64,
143    ) -> Result<Vec<f64>> {
144        // Convert to ndarray
145        let params_array = Array1::from_vec(parameters.to_vec());
146        let mut grads_array = Array1::from_vec(gradients.to_vec());
147
148        // Apply gradient clipping
149        if let Some(clip_norm) = self.config.gradient_clip_norm {
150            let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
151            if grad_norm > clip_norm {
152                grads_array = &grads_array * (clip_norm / grad_norm);
153            }
154        }
155
156        // Apply L2 regularization
157        if self.config.l2_regularization > 0.0 {
158            grads_array = &grads_array + &(&params_array * self.config.l2_regularization);
159        }
160
161        // Compute gradient norm for tracking
162        let grad_norm = grads_array.iter().map(|g| g * g).sum::<f64>().sqrt();
163        self.gradient_norms.push(grad_norm);
164
165        // Perform optimization step based on optimizer type
166        let new_params = match &mut self.optimizer {
167            OptiRSOptimizerImpl::SGD(opt) => opt
168                .step(&params_array, &grads_array)
169                .map_err(|e| SimulatorError::ComputationError(format!("SGD step failed: {e}")))?,
170            OptiRSOptimizerImpl::Adam(opt) => opt
171                .step(&params_array, &grads_array)
172                .map_err(|e| SimulatorError::ComputationError(format!("Adam step failed: {e}")))?,
173            OptiRSOptimizerImpl::RMSprop(opt) => {
174                opt.step(&params_array, &grads_array).map_err(|e| {
175                    SimulatorError::ComputationError(format!("RMSprop step failed: {e}"))
176                })?
177            }
178            OptiRSOptimizerImpl::Adagrad(opt) => {
179                opt.step(&params_array, &grads_array).map_err(|e| {
180                    SimulatorError::ComputationError(format!("Adagrad step failed: {e}"))
181                })?
182            }
183        };
184
185        // Apply parameter bounds if specified
186        let bounded_params = if let Some((min_val, max_val)) = self.config.parameter_bounds {
187            new_params.mapv(|p| p.clamp(min_val, max_val))
188        } else {
189            new_params
190        };
191
192        // Update best parameters
193        if cost < self.best_cost {
194            self.best_cost = cost;
195            self.best_parameters = Some(bounded_params.to_vec());
196        }
197
198        // Update history
199        self.cost_history.push(cost);
200        self.iteration += 1;
201
202        Ok(bounded_params.to_vec())
203    }
204
205    /// Check if optimization has converged
206    pub fn has_converged(&self) -> bool {
207        if self.cost_history.len() < 2 {
208            return false;
209        }
210
211        let recent_costs = &self.cost_history[self.cost_history.len().saturating_sub(10)..];
212        if recent_costs.len() < 2 {
213            return false;
214        }
215
216        let cost_variance = {
217            let mean = recent_costs.iter().sum::<f64>() / recent_costs.len() as f64;
218            recent_costs
219                .iter()
220                .map(|&c| (c - mean).powi(2))
221                .sum::<f64>()
222                / recent_costs.len() as f64
223        };
224
225        cost_variance < self.config.convergence_tolerance
226    }
227
228    /// Get the best parameters found
229    pub fn best_parameters(&self) -> Option<&[f64]> {
230        self.best_parameters.as_deref()
231    }
232
233    /// Get the best cost found
234    pub const fn best_cost(&self) -> f64 {
235        self.best_cost
236    }
237
238    /// Get the cost history
239    pub fn cost_history(&self) -> &[f64] {
240        &self.cost_history
241    }
242
243    /// Get the gradient norms history
244    pub fn gradient_norms(&self) -> &[f64] {
245        &self.gradient_norms
246    }
247
248    /// Get the current iteration
249    pub const fn iteration(&self) -> usize {
250        self.iteration
251    }
252
253    /// Reset the optimizer state by recreating it
254    pub fn reset(&mut self) {
255        // Recreate the optimizer to reset its state
256        self.optimizer = Self::create_optimizer(&self.config).unwrap();
257        self.iteration = 0;
258        self.best_parameters = None;
259        self.best_cost = f64::INFINITY;
260        self.cost_history.clear();
261        self.gradient_norms.clear();
262    }
263}
264
265/// OptiRS optimization result
266#[derive(Debug, Clone, Serialize, Deserialize)]
267pub struct OptiRSOptimizationResult {
268    /// Optimal parameters found
269    pub optimal_parameters: Vec<f64>,
270    /// Final cost function value
271    pub optimal_cost: f64,
272    /// Optimization history
273    pub cost_history: Vec<f64>,
274    /// Gradient norms history
275    pub gradient_norms: Vec<f64>,
276    /// Number of iterations performed
277    pub iterations: usize,
278    /// Convergence flag
279    pub converged: bool,
280    /// Total optimization time
281    pub optimization_time: Duration,
282}
283
284impl OptiRSOptimizationResult {
285    /// Create a new result from the optimizer
286    pub fn from_optimizer(
287        optimizer: &OptiRSQuantumOptimizer,
288        converged: bool,
289        optimization_time: Duration,
290    ) -> Self {
291        Self {
292            optimal_parameters: optimizer.best_parameters().unwrap_or(&[]).to_vec(),
293            optimal_cost: optimizer.best_cost(),
294            cost_history: optimizer.cost_history().to_vec(),
295            gradient_norms: optimizer.gradient_norms().to_vec(),
296            iterations: optimizer.iteration(),
297            converged,
298            optimization_time,
299        }
300    }
301}
302
303#[cfg(test)]
304mod tests {
305    use super::*;
306
307    #[test]
308    fn test_optirs_optimizer_creation() {
309        let config = OptiRSConfig::default();
310        let optimizer = OptiRSQuantumOptimizer::new(config);
311        assert!(optimizer.is_ok());
312    }
313
314    #[test]
315    fn test_optirs_sgd_optimizer() {
316        let config = OptiRSConfig {
317            optimizer_type: OptiRSOptimizerType::SGD { momentum: true },
318            ..Default::default()
319        };
320        let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
321
322        let params = vec![1.0, 2.0, 3.0];
323        let grads = vec![0.1, 0.2, 0.15];
324        let cost = 1.5;
325
326        let new_params = optimizer.optimize_step(&params, &grads, cost).unwrap();
327        assert_eq!(new_params.len(), params.len());
328    }
329
330    #[test]
331    fn test_optirs_adam_optimizer() {
332        let config = OptiRSConfig {
333            optimizer_type: OptiRSOptimizerType::Adam,
334            learning_rate: 0.001,
335            ..Default::default()
336        };
337        let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
338
339        let params = vec![0.5, 1.5, 2.5];
340        let grads = vec![0.05, 0.15, 0.1];
341        let cost = 2.3;
342
343        let new_params = optimizer.optimize_step(&params, &grads, cost).unwrap();
344        assert_eq!(new_params.len(), params.len());
345        assert_eq!(optimizer.iteration(), 1);
346    }
347
348    #[test]
349    fn test_optirs_convergence_check() {
350        let config = OptiRSConfig {
351            convergence_tolerance: 1e-6,
352            ..Default::default()
353        };
354        let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
355
356        // Should not converge initially
357        assert!(!optimizer.has_converged());
358
359        // Add stable cost history
360        for _ in 0..15 {
361            let params = vec![1.0];
362            let grads = vec![0.001];
363            optimizer.optimize_step(&params, &grads, 1.0).unwrap();
364        }
365
366        // Should converge with stable costs
367        assert!(optimizer.has_converged());
368    }
369
370    #[test]
371    fn test_optirs_parameter_bounds() {
372        let config = OptiRSConfig {
373            parameter_bounds: Some((-1.0, 1.0)),
374            learning_rate: 10.0, // Large LR to test bounds
375            ..Default::default()
376        };
377        let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
378
379        let params = vec![0.9];
380        let grads = vec![-1.0]; // Would push parameter > 1.0 without bounds
381        let cost = 1.0;
382
383        let new_params = optimizer.optimize_step(&params, &grads, cost).unwrap();
384        assert!(new_params[0] <= 1.0);
385        assert!(new_params[0] >= -1.0);
386    }
387
388    #[test]
389    fn test_optirs_gradient_clipping() {
390        let config = OptiRSConfig {
391            gradient_clip_norm: Some(0.5),
392            ..Default::default()
393        };
394        let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
395
396        let params = vec![1.0, 1.0];
397        let large_grads = vec![10.0, 10.0]; // Norm = sqrt(200) >> 0.5
398        let cost = 1.0;
399
400        // Should not crash with large gradients
401        let new_params = optimizer
402            .optimize_step(&params, &large_grads, cost)
403            .unwrap();
404        assert_eq!(new_params.len(), params.len());
405    }
406
407    #[test]
408    fn test_optirs_reset() {
409        let config = OptiRSConfig::default();
410        let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
411
412        // Perform some steps
413        for _ in 0..5 {
414            let params = vec![1.0];
415            let grads = vec![0.1];
416            optimizer.optimize_step(&params, &grads, 1.0).unwrap();
417        }
418
419        assert_eq!(optimizer.iteration(), 5);
420
421        // Reset
422        optimizer.reset();
423
424        assert_eq!(optimizer.iteration(), 0);
425        assert_eq!(optimizer.cost_history().len(), 0);
426    }
427
428    #[test]
429    fn test_all_optimizer_types() {
430        let optimizers = vec![
431            OptiRSOptimizerType::SGD { momentum: false },
432            OptiRSOptimizerType::SGD { momentum: true },
433            OptiRSOptimizerType::Adam,
434            OptiRSOptimizerType::RMSprop,
435            OptiRSOptimizerType::Adagrad,
436        ];
437
438        for opt_type in optimizers {
439            let config = OptiRSConfig {
440                optimizer_type: opt_type,
441                ..Default::default()
442            };
443            let mut optimizer = OptiRSQuantumOptimizer::new(config).unwrap();
444
445            let params = vec![1.0, 2.0];
446            let grads = vec![0.1, 0.2];
447            let cost = 1.0;
448
449            let result = optimizer.optimize_step(&params, &grads, cost);
450            assert!(result.is_ok(), "Failed for optimizer {opt_type:?}");
451        }
452    }
453}