quantrs2_sim/qml/
config.rs

1//! Configuration structures and enums for quantum machine learning algorithms.
2//!
3//! This module provides configuration types for hardware architectures,
4//! algorithm types, optimization methods, and training parameters.
5
6use serde::{Deserialize, Serialize};
7
8/// Hardware architecture types for optimization
9#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
10pub enum HardwareArchitecture {
11    /// Noisy Intermediate-Scale Quantum devices
12    NISQ,
13    /// Fault-tolerant quantum computers
14    FaultTolerant,
15    /// Superconducting quantum processors
16    Superconducting,
17    /// Trapped ion systems
18    TrappedIon,
19    /// Photonic quantum computers
20    Photonic,
21    /// Neutral atom systems
22    NeutralAtom,
23    /// Classical simulation
24    ClassicalSimulation,
25}
26
27/// Quantum machine learning algorithm types
28#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
29pub enum QMLAlgorithmType {
30    /// Variational Quantum Eigensolver
31    VQE,
32    /// Quantum Approximate Optimization Algorithm
33    QAOA,
34    /// Quantum Convolutional Neural Network
35    QCNN,
36    /// Quantum Support Vector Machine
37    QSVM,
38    /// Quantum Reinforcement Learning
39    QRL,
40    /// Quantum Generative Adversarial Network
41    QGAN,
42    /// Quantum Boltzmann Machine
43    QBM,
44}
45
46/// Gradient estimation methods
47#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
48pub enum GradientMethod {
49    /// Parameter shift rule
50    ParameterShift,
51    /// Finite differences
52    FiniteDifferences,
53    /// Automatic differentiation
54    AutomaticDifferentiation,
55    /// Natural gradients
56    NaturalGradients,
57    /// Stochastic parameter shift
58    StochasticParameterShift,
59}
60
61/// Optimization algorithms
62#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
63pub enum OptimizerType {
64    /// Adam optimizer
65    Adam,
66    /// Stochastic gradient descent
67    SGD,
68    /// `RMSprop`
69    RMSprop,
70    /// L-BFGS
71    LBFGS,
72    /// Quantum natural gradient
73    QuantumNaturalGradient,
74    /// SPSA (Simultaneous Perturbation Stochastic Approximation)
75    SPSA,
76}
77
78/// QML configuration
79#[derive(Debug, Clone, Serialize, Deserialize)]
80pub struct QMLConfig {
81    /// Target hardware architecture
82    pub hardware_architecture: HardwareArchitecture,
83    /// Algorithm type
84    pub algorithm_type: QMLAlgorithmType,
85    /// Number of qubits
86    pub num_qubits: usize,
87    /// Circuit depth
88    pub circuit_depth: usize,
89    /// Number of parameters
90    pub num_parameters: usize,
91    /// Gradient estimation method
92    pub gradient_method: GradientMethod,
93    /// Optimizer type
94    pub optimizer_type: OptimizerType,
95    /// Learning rate
96    pub learning_rate: f64,
97    /// Batch size
98    pub batch_size: usize,
99    /// Maximum epochs
100    pub max_epochs: usize,
101    /// Convergence tolerance
102    pub convergence_tolerance: f64,
103    /// Enable hardware-aware optimization
104    pub hardware_aware_optimization: bool,
105    /// Enable noise adaptation
106    pub noise_adaptive_training: bool,
107    /// Shot budget for expectation value estimation
108    pub shot_budget: usize,
109}
110
111impl Default for QMLConfig {
112    fn default() -> Self {
113        Self {
114            hardware_architecture: HardwareArchitecture::NISQ,
115            algorithm_type: QMLAlgorithmType::VQE,
116            num_qubits: 4,
117            circuit_depth: 3,
118            num_parameters: 12,
119            gradient_method: GradientMethod::ParameterShift,
120            optimizer_type: OptimizerType::Adam,
121            learning_rate: 0.01,
122            batch_size: 32,
123            max_epochs: 100,
124            convergence_tolerance: 1e-6,
125            hardware_aware_optimization: true,
126            noise_adaptive_training: true,
127            shot_budget: 8192,
128        }
129    }
130}
131
132impl QMLConfig {
133    /// Create a new QML configuration for a specific algorithm type
134    #[must_use]
135    pub fn for_algorithm(algorithm_type: QMLAlgorithmType) -> Self {
136        let mut config = Self {
137            algorithm_type,
138            ..Self::default()
139        };
140
141        // Adjust default parameters based on algorithm
142        match algorithm_type {
143            QMLAlgorithmType::VQE => {
144                config.num_qubits = 4;
145                config.circuit_depth = 3;
146                config.num_parameters = 12;
147                config.gradient_method = GradientMethod::ParameterShift;
148            }
149            QMLAlgorithmType::QAOA => {
150                config.num_qubits = 6;
151                config.circuit_depth = 2;
152                config.num_parameters = 4;
153                config.gradient_method = GradientMethod::ParameterShift;
154            }
155            QMLAlgorithmType::QCNN => {
156                config.num_qubits = 8;
157                config.circuit_depth = 4;
158                config.num_parameters = 24;
159                config.gradient_method = GradientMethod::AutomaticDifferentiation;
160            }
161            QMLAlgorithmType::QSVM => {
162                config.num_qubits = 6;
163                config.circuit_depth = 2;
164                config.num_parameters = 8;
165                config.gradient_method = GradientMethod::FiniteDifferences;
166            }
167            QMLAlgorithmType::QRL => {
168                config.num_qubits = 4;
169                config.circuit_depth = 5;
170                config.num_parameters = 20;
171                config.gradient_method = GradientMethod::NaturalGradients;
172            }
173            QMLAlgorithmType::QGAN => {
174                config.num_qubits = 8;
175                config.circuit_depth = 6;
176                config.num_parameters = 32;
177                config.gradient_method = GradientMethod::AutomaticDifferentiation;
178            }
179            QMLAlgorithmType::QBM => {
180                config.num_qubits = 10;
181                config.circuit_depth = 3;
182                config.num_parameters = 15;
183                config.gradient_method = GradientMethod::StochasticParameterShift;
184            }
185        }
186
187        config
188    }
189
190    /// Create a configuration optimized for a specific hardware architecture
191    #[must_use]
192    pub fn for_hardware(hardware: HardwareArchitecture) -> Self {
193        let mut config = Self {
194            hardware_architecture: hardware,
195            ..Self::default()
196        };
197
198        // Adjust parameters based on hardware constraints
199        match hardware {
200            HardwareArchitecture::NISQ => {
201                config.circuit_depth = 3;
202                config.shot_budget = 8192;
203                config.noise_adaptive_training = true;
204            }
205            HardwareArchitecture::FaultTolerant => {
206                config.circuit_depth = 10;
207                config.shot_budget = 1_000_000;
208                config.noise_adaptive_training = false;
209            }
210            HardwareArchitecture::Superconducting => {
211                config.circuit_depth = 5;
212                config.shot_budget = 16_384;
213                config.noise_adaptive_training = true;
214            }
215            HardwareArchitecture::TrappedIon => {
216                config.circuit_depth = 8;
217                config.shot_budget = 32_768;
218                config.noise_adaptive_training = true;
219            }
220            HardwareArchitecture::Photonic => {
221                config.circuit_depth = 4;
222                config.shot_budget = 4096;
223                config.noise_adaptive_training = true;
224            }
225            HardwareArchitecture::NeutralAtom => {
226                config.circuit_depth = 6;
227                config.shot_budget = 16_384;
228                config.noise_adaptive_training = true;
229            }
230            HardwareArchitecture::ClassicalSimulation => {
231                config.circuit_depth = 15;
232                config.shot_budget = 1_000_000;
233                config.noise_adaptive_training = false;
234            }
235        }
236
237        config
238    }
239
240    /// Validate the configuration
241    pub fn validate(&self) -> Result<(), String> {
242        if self.num_qubits == 0 {
243            return Err("Number of qubits must be positive".to_string());
244        }
245        if self.circuit_depth == 0 {
246            return Err("Circuit depth must be positive".to_string());
247        }
248        if self.num_parameters == 0 {
249            return Err("Number of parameters must be positive".to_string());
250        }
251        if self.learning_rate <= 0.0 {
252            return Err("Learning rate must be positive".to_string());
253        }
254        if self.batch_size == 0 {
255            return Err("Batch size must be positive".to_string());
256        }
257        if self.max_epochs == 0 {
258            return Err("Maximum epochs must be positive".to_string());
259        }
260        if self.convergence_tolerance <= 0.0 {
261            return Err("Convergence tolerance must be positive".to_string());
262        }
263        if self.shot_budget == 0 {
264            return Err("Shot budget must be positive".to_string());
265        }
266
267        Ok(())
268    }
269}