quantrs2_sim/qml/
config.rs

1//! Configuration structures and enums for quantum machine learning algorithms.
2//!
3//! This module provides configuration types for hardware architectures,
4//! algorithm types, optimization methods, and training parameters.
5
6use serde::{Deserialize, Serialize};
7
8/// Hardware architecture types for optimization
9#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
10pub enum HardwareArchitecture {
11    /// Noisy Intermediate-Scale Quantum devices
12    NISQ,
13    /// Fault-tolerant quantum computers
14    FaultTolerant,
15    /// Superconducting quantum processors
16    Superconducting,
17    /// Trapped ion systems
18    TrappedIon,
19    /// Photonic quantum computers
20    Photonic,
21    /// Neutral atom systems
22    NeutralAtom,
23    /// Classical simulation
24    ClassicalSimulation,
25}
26
27/// Quantum machine learning algorithm types
28#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
29pub enum QMLAlgorithmType {
30    /// Variational Quantum Eigensolver
31    VQE,
32    /// Quantum Approximate Optimization Algorithm
33    QAOA,
34    /// Quantum Convolutional Neural Network
35    QCNN,
36    /// Quantum Support Vector Machine
37    QSVM,
38    /// Quantum Reinforcement Learning
39    QRL,
40    /// Quantum Generative Adversarial Network
41    QGAN,
42    /// Quantum Boltzmann Machine
43    QBM,
44}
45
46/// Gradient estimation methods
47#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
48pub enum GradientMethod {
49    /// Parameter shift rule
50    ParameterShift,
51    /// Finite differences
52    FiniteDifferences,
53    /// Automatic differentiation
54    AutomaticDifferentiation,
55    /// Natural gradients
56    NaturalGradients,
57    /// Stochastic parameter shift
58    StochasticParameterShift,
59}
60
61/// Optimization algorithms
62#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
63pub enum OptimizerType {
64    /// Adam optimizer
65    Adam,
66    /// Stochastic gradient descent
67    SGD,
68    /// RMSprop
69    RMSprop,
70    /// L-BFGS
71    LBFGS,
72    /// Quantum natural gradient
73    QuantumNaturalGradient,
74    /// SPSA (Simultaneous Perturbation Stochastic Approximation)
75    SPSA,
76}
77
78/// QML configuration
79#[derive(Debug, Clone, Serialize, Deserialize)]
80pub struct QMLConfig {
81    /// Target hardware architecture
82    pub hardware_architecture: HardwareArchitecture,
83    /// Algorithm type
84    pub algorithm_type: QMLAlgorithmType,
85    /// Number of qubits
86    pub num_qubits: usize,
87    /// Circuit depth
88    pub circuit_depth: usize,
89    /// Number of parameters
90    pub num_parameters: usize,
91    /// Gradient estimation method
92    pub gradient_method: GradientMethod,
93    /// Optimizer type
94    pub optimizer_type: OptimizerType,
95    /// Learning rate
96    pub learning_rate: f64,
97    /// Batch size
98    pub batch_size: usize,
99    /// Maximum epochs
100    pub max_epochs: usize,
101    /// Convergence tolerance
102    pub convergence_tolerance: f64,
103    /// Enable hardware-aware optimization
104    pub hardware_aware_optimization: bool,
105    /// Enable noise adaptation
106    pub noise_adaptive_training: bool,
107    /// Shot budget for expectation value estimation
108    pub shot_budget: usize,
109}
110
111impl Default for QMLConfig {
112    fn default() -> Self {
113        Self {
114            hardware_architecture: HardwareArchitecture::NISQ,
115            algorithm_type: QMLAlgorithmType::VQE,
116            num_qubits: 4,
117            circuit_depth: 3,
118            num_parameters: 12,
119            gradient_method: GradientMethod::ParameterShift,
120            optimizer_type: OptimizerType::Adam,
121            learning_rate: 0.01,
122            batch_size: 32,
123            max_epochs: 100,
124            convergence_tolerance: 1e-6,
125            hardware_aware_optimization: true,
126            noise_adaptive_training: true,
127            shot_budget: 8192,
128        }
129    }
130}
131
132impl QMLConfig {
133    /// Create a new QML configuration for a specific algorithm type
134    pub fn for_algorithm(algorithm_type: QMLAlgorithmType) -> Self {
135        let mut config = Self::default();
136        config.algorithm_type = algorithm_type;
137
138        // Adjust default parameters based on algorithm
139        match algorithm_type {
140            QMLAlgorithmType::VQE => {
141                config.num_qubits = 4;
142                config.circuit_depth = 3;
143                config.num_parameters = 12;
144                config.gradient_method = GradientMethod::ParameterShift;
145            }
146            QMLAlgorithmType::QAOA => {
147                config.num_qubits = 6;
148                config.circuit_depth = 2;
149                config.num_parameters = 4;
150                config.gradient_method = GradientMethod::ParameterShift;
151            }
152            QMLAlgorithmType::QCNN => {
153                config.num_qubits = 8;
154                config.circuit_depth = 4;
155                config.num_parameters = 24;
156                config.gradient_method = GradientMethod::AutomaticDifferentiation;
157            }
158            QMLAlgorithmType::QSVM => {
159                config.num_qubits = 6;
160                config.circuit_depth = 2;
161                config.num_parameters = 8;
162                config.gradient_method = GradientMethod::FiniteDifferences;
163            }
164            QMLAlgorithmType::QRL => {
165                config.num_qubits = 4;
166                config.circuit_depth = 5;
167                config.num_parameters = 20;
168                config.gradient_method = GradientMethod::NaturalGradients;
169            }
170            QMLAlgorithmType::QGAN => {
171                config.num_qubits = 8;
172                config.circuit_depth = 6;
173                config.num_parameters = 32;
174                config.gradient_method = GradientMethod::AutomaticDifferentiation;
175            }
176            QMLAlgorithmType::QBM => {
177                config.num_qubits = 10;
178                config.circuit_depth = 3;
179                config.num_parameters = 15;
180                config.gradient_method = GradientMethod::StochasticParameterShift;
181            }
182        }
183
184        config
185    }
186
187    /// Create a configuration optimized for a specific hardware architecture
188    pub fn for_hardware(hardware: HardwareArchitecture) -> Self {
189        let mut config = Self::default();
190        config.hardware_architecture = hardware;
191
192        // Adjust parameters based on hardware constraints
193        match hardware {
194            HardwareArchitecture::NISQ => {
195                config.circuit_depth = 3;
196                config.shot_budget = 8192;
197                config.noise_adaptive_training = true;
198            }
199            HardwareArchitecture::FaultTolerant => {
200                config.circuit_depth = 10;
201                config.shot_budget = 1_000_000;
202                config.noise_adaptive_training = false;
203            }
204            HardwareArchitecture::Superconducting => {
205                config.circuit_depth = 5;
206                config.shot_budget = 16384;
207                config.noise_adaptive_training = true;
208            }
209            HardwareArchitecture::TrappedIon => {
210                config.circuit_depth = 8;
211                config.shot_budget = 32768;
212                config.noise_adaptive_training = true;
213            }
214            HardwareArchitecture::Photonic => {
215                config.circuit_depth = 4;
216                config.shot_budget = 4096;
217                config.noise_adaptive_training = true;
218            }
219            HardwareArchitecture::NeutralAtom => {
220                config.circuit_depth = 6;
221                config.shot_budget = 16384;
222                config.noise_adaptive_training = true;
223            }
224            HardwareArchitecture::ClassicalSimulation => {
225                config.circuit_depth = 15;
226                config.shot_budget = 1_000_000;
227                config.noise_adaptive_training = false;
228            }
229        }
230
231        config
232    }
233
234    /// Validate the configuration
235    pub fn validate(&self) -> Result<(), String> {
236        if self.num_qubits == 0 {
237            return Err("Number of qubits must be positive".to_string());
238        }
239        if self.circuit_depth == 0 {
240            return Err("Circuit depth must be positive".to_string());
241        }
242        if self.num_parameters == 0 {
243            return Err("Number of parameters must be positive".to_string());
244        }
245        if self.learning_rate <= 0.0 {
246            return Err("Learning rate must be positive".to_string());
247        }
248        if self.batch_size == 0 {
249            return Err("Batch size must be positive".to_string());
250        }
251        if self.max_epochs == 0 {
252            return Err("Maximum epochs must be positive".to_string());
253        }
254        if self.convergence_tolerance <= 0.0 {
255            return Err("Convergence tolerance must be positive".to_string());
256        }
257        if self.shot_budget == 0 {
258            return Err("Shot budget must be positive".to_string());
259        }
260
261        Ok(())
262    }
263}