1use crate::prelude::{InterfaceGate, InterfaceGateType, SimulatorError};
10use scirs2_core::ndarray::Array1;
11use scirs2_core::parallel_ops::{IndexedParallelIterator, ParallelIterator};
12use scirs2_core::Complex64;
13use serde::{Deserialize, Serialize};
14use std::collections::HashMap;
15use std::sync::{Arc, Mutex};
16
17use crate::autodiff_vqe::AutoDiffContext;
18use crate::circuit_interfaces::{CircuitInterface, InterfaceCircuit};
19use crate::error::Result;
20use crate::scirs2_integration::SciRS2Backend;
21
22#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
24pub enum QMLFramework {
25 PyTorch,
27 TensorFlow,
29 JAX,
31 SciRS2,
33 Custom,
35}
36
37#[derive(Debug, Clone)]
39pub struct QMLIntegrationConfig {
40 pub framework: QMLFramework,
42 pub enable_autodiff: bool,
44 pub enable_gradient_optimization: bool,
46 pub batch_size: usize,
48 pub enable_parameter_sharing: bool,
50 pub hardware_aware_optimization: bool,
52 pub gradient_memory_limit: usize,
54 pub enable_distributed_training: bool,
56 pub enable_mixed_precision: bool,
58}
59
60impl Default for QMLIntegrationConfig {
61 fn default() -> Self {
62 Self {
63 framework: QMLFramework::SciRS2,
64 enable_autodiff: true,
65 enable_gradient_optimization: true,
66 batch_size: 32,
67 enable_parameter_sharing: true,
68 hardware_aware_optimization: true,
69 gradient_memory_limit: 8_000_000_000, enable_distributed_training: false,
71 enable_mixed_precision: false,
72 }
73 }
74}
75
76#[derive(Debug, Clone, PartialEq, Eq, Hash)]
78pub enum QMLLayerType {
79 VariationalCircuit,
81 QuantumConvolutional,
83 QuantumRecurrent,
85 QuantumAttention,
87 DataEncoding,
89 Measurement,
91 Classical,
93}
94
95#[derive(Debug, Clone)]
97pub struct QMLLayer {
98 pub layer_type: QMLLayerType,
100 pub name: String,
102 pub num_qubits: usize,
104 pub parameters: Vec<f64>,
106 pub parameter_names: Vec<String>,
108 pub circuit_template: Option<InterfaceCircuit>,
110 pub classical_function: Option<String>,
112 pub config: LayerConfig,
114}
115
116#[derive(Debug, Clone, Default)]
118pub struct LayerConfig {
119 pub repetitions: usize,
121 pub entangling_pattern: Vec<(usize, usize)>,
123 pub activation: Option<String>,
125 pub regularization: Option<RegularizationConfig>,
127 pub hardware_mapping: Option<Vec<usize>>,
129}
130
131#[derive(Debug, Clone)]
133pub struct RegularizationConfig {
134 pub l1_strength: f64,
136 pub l2_strength: f64,
138 pub dropout_prob: f64,
140}
141
142#[derive(Debug, Clone)]
144pub struct QuantumNeuralNetwork {
145 pub layers: Vec<QMLLayer>,
147 pub global_parameters: HashMap<String, f64>,
149 pub metadata: QNNMetadata,
151 pub training_config: TrainingConfig,
153}
154
155#[derive(Debug, Clone, Default)]
157pub struct QNNMetadata {
158 pub name: Option<String>,
160 pub description: Option<String>,
162 pub created_at: Option<std::time::SystemTime>,
164 pub total_parameters: usize,
166 pub trainable_parameters: usize,
168 pub complexity_score: f64,
170}
171
172#[derive(Debug, Clone)]
174pub struct TrainingConfig {
175 pub learning_rate: f64,
177 pub optimizer: OptimizerType,
179 pub loss_function: LossFunction,
181 pub epochs: usize,
183 pub batch_size: usize,
185 pub validation_split: f64,
187 pub early_stopping_patience: Option<usize>,
189 pub lr_scheduler: Option<LRScheduler>,
191}
192
193impl Default for TrainingConfig {
194 fn default() -> Self {
195 Self {
196 learning_rate: 0.01,
197 optimizer: OptimizerType::Adam,
198 loss_function: LossFunction::MeanSquaredError,
199 epochs: 100,
200 batch_size: 32,
201 validation_split: 0.2,
202 early_stopping_patience: Some(10),
203 lr_scheduler: None,
204 }
205 }
206}
207
208#[derive(Debug, Clone, Copy, PartialEq, Eq)]
210pub enum OptimizerType {
211 SGD,
212 Adam,
213 AdamW,
214 RMSprop,
215 LBFGS,
216 NaturalGradient,
217 QuantumNaturalGradient,
218}
219
220#[derive(Debug, Clone, Copy, PartialEq, Eq)]
222pub enum LossFunction {
223 MeanSquaredError,
224 MeanAbsoluteError,
225 CrossEntropy,
226 BinaryCrossEntropy,
227 Hinge,
228 CustomQuantum,
229}
230
231#[derive(Debug, Clone)]
233pub enum LRScheduler {
234 StepLR { step_size: usize, gamma: f64 },
235 ExponentialLR { gamma: f64 },
236 CosineAnnealingLR { t_max: usize },
237 ReduceLROnPlateau { patience: usize, factor: f64 },
238}
239
240pub struct QMLIntegration {
242 config: QMLIntegrationConfig,
244 circuit_interface: CircuitInterface,
246 backend: Option<SciRS2Backend>,
248 autodiff_context: Option<AutoDiffContext>,
250 parameter_cache: Arc<Mutex<HashMap<String, Vec<f64>>>>,
252 gradient_cache: Arc<Mutex<HashMap<String, Vec<f64>>>>,
254 stats: QMLTrainingStats,
256}
257
258#[derive(Debug, Clone, Default, Serialize, Deserialize)]
260pub struct QMLTrainingStats {
261 pub total_training_time_ms: f64,
263 pub parameter_updates: usize,
265 pub gradient_computations: usize,
267 pub avg_gradient_time_ms: f64,
269 pub circuit_evaluations: usize,
271 pub avg_circuit_time_ms: f64,
273 pub loss_history: Vec<f64>,
275 pub validation_loss_history: Vec<f64>,
277 pub parameter_norm_history: Vec<f64>,
279 pub gradient_norm_history: Vec<f64>,
281}
282
283impl QMLIntegration {
284 pub fn new(config: QMLIntegrationConfig) -> Result<Self> {
286 let circuit_interface = CircuitInterface::new(Default::default())?;
287
288 Ok(Self {
289 config,
290 circuit_interface,
291 backend: None,
292 autodiff_context: None,
293 parameter_cache: Arc::new(Mutex::new(HashMap::new())),
294 gradient_cache: Arc::new(Mutex::new(HashMap::new())),
295 stats: QMLTrainingStats::default(),
296 })
297 }
298
299 pub fn with_backend(mut self) -> Result<Self> {
301 self.backend = Some(SciRS2Backend::new());
302 self.circuit_interface = self.circuit_interface.with_backend()?;
303
304 if self.config.enable_autodiff {
305 self.autodiff_context = Some(AutoDiffContext::new(
306 Vec::new(),
307 crate::autodiff_vqe::GradientMethod::ParameterShift,
308 ));
309 }
310
311 Ok(self)
312 }
313
314 pub fn train_qnn(
316 &mut self,
317 mut qnn: QuantumNeuralNetwork,
318 training_data: &[TrainingExample],
319 validation_data: Option<&[TrainingExample]>,
320 ) -> Result<TrainingResult> {
321 let start_time = std::time::Instant::now();
322
323 let mut optimizer = self.create_optimizer(&qnn.training_config)?;
325
326 let mut lr_scheduler = qnn.training_config.lr_scheduler.clone();
328
329 let mut best_loss = f64::INFINITY;
330 let mut patience_counter = 0;
331
332 for epoch in 0..qnn.training_config.epochs {
333 let epoch_start = std::time::Instant::now();
334
335 let train_loss = self.train_epoch(&mut qnn, training_data, &mut optimizer)?;
337 self.stats.loss_history.push(train_loss);
338
339 let val_loss = if let Some(val_data) = validation_data {
341 self.validate_epoch(&qnn, val_data)?
342 } else {
343 train_loss
344 };
345 self.stats.validation_loss_history.push(val_loss);
346
347 if let Some(ref mut scheduler) = lr_scheduler {
349 self.update_lr_scheduler(scheduler, val_loss, &mut optimizer)?;
350 }
351
352 if let Some(patience) = qnn.training_config.early_stopping_patience {
354 if val_loss < best_loss {
355 best_loss = val_loss;
356 patience_counter = 0;
357 } else {
358 patience_counter += 1;
359 if patience_counter >= patience {
360 println!("Early stopping at epoch {epoch} due to no improvement");
361 break;
362 }
363 }
364 }
365
366 let param_norm = self.compute_parameter_norm(&qnn)?;
368 let grad_norm = self.compute_last_gradient_norm()?;
369 self.stats.parameter_norm_history.push(param_norm);
370 self.stats.gradient_norm_history.push(grad_norm);
371
372 println!(
373 "Epoch {}: train_loss={:.6}, val_loss={:.6}, time={:.2}ms",
374 epoch,
375 train_loss,
376 val_loss,
377 epoch_start.elapsed().as_secs_f64() * 1000.0
378 );
379 }
380
381 let total_time = start_time.elapsed().as_secs_f64() * 1000.0;
382 self.stats.total_training_time_ms += total_time;
383
384 Ok(TrainingResult {
385 trained_qnn: qnn.clone(),
386 final_loss: *self.stats.loss_history.last().unwrap_or(&0.0),
387 final_validation_loss: *self.stats.validation_loss_history.last().unwrap_or(&0.0),
388 epochs_completed: self.stats.loss_history.len(),
389 total_time_ms: total_time,
390 converged: patience_counter
391 < qnn
392 .training_config
393 .early_stopping_patience
394 .unwrap_or(usize::MAX),
395 })
396 }
397
398 fn train_epoch(
400 &mut self,
401 qnn: &mut QuantumNeuralNetwork,
402 training_data: &[TrainingExample],
403 optimizer: &mut Box<dyn QMLOptimizer>,
404 ) -> Result<f64> {
405 let mut total_loss = 0.0;
406 let batch_size = qnn.training_config.batch_size;
407 let num_batches = training_data.len().div_ceil(batch_size);
408
409 for batch_idx in 0..num_batches {
410 let start_idx = batch_idx * batch_size;
411 let end_idx = (start_idx + batch_size).min(training_data.len());
412 let batch = &training_data[start_idx..end_idx];
413
414 let (predictions, loss) = self.forward_pass(qnn, batch)?;
416 total_loss += loss;
417
418 let gradients = self.backward_pass(qnn, batch, &predictions)?;
420
421 optimizer.update_parameters(qnn, &gradients)?;
423
424 self.stats.parameter_updates += 1;
425 }
426
427 Ok(total_loss / num_batches as f64)
428 }
429
430 fn validate_epoch(
432 &mut self,
433 qnn: &QuantumNeuralNetwork,
434 validation_data: &[TrainingExample],
435 ) -> Result<f64> {
436 let mut total_loss = 0.0;
437 let batch_size = qnn.training_config.batch_size;
438 let num_batches = validation_data.len().div_ceil(batch_size);
439
440 for batch_idx in 0..num_batches {
441 let start_idx = batch_idx * batch_size;
442 let end_idx = (start_idx + batch_size).min(validation_data.len());
443 let batch = &validation_data[start_idx..end_idx];
444
445 let (_, loss) = self.forward_pass(qnn, batch)?;
446 total_loss += loss;
447 }
448
449 Ok(total_loss / num_batches as f64)
450 }
451
452 fn forward_pass(
454 &mut self,
455 qnn: &QuantumNeuralNetwork,
456 batch: &[TrainingExample],
457 ) -> Result<(Vec<Array1<f64>>, f64)> {
458 let start_time = std::time::Instant::now();
459
460 let mut predictions = Vec::new();
461 let mut total_loss = 0.0;
462
463 for example in batch {
464 let prediction = self.evaluate_qnn(qnn, &example.input)?;
466
467 let loss = self.compute_loss(
469 &prediction,
470 &example.target,
471 &qnn.training_config.loss_function,
472 )?;
473
474 predictions.push(prediction);
475 total_loss += loss;
476 }
477
478 let eval_time = start_time.elapsed().as_secs_f64() * 1000.0;
479 self.stats.avg_circuit_time_ms = self
480 .stats
481 .avg_circuit_time_ms
482 .mul_add(self.stats.circuit_evaluations as f64, eval_time)
483 / (self.stats.circuit_evaluations + batch.len()) as f64;
484 self.stats.circuit_evaluations += batch.len();
485
486 Ok((predictions, total_loss / batch.len() as f64))
487 }
488
489 fn backward_pass(
491 &mut self,
492 qnn: &QuantumNeuralNetwork,
493 batch: &[TrainingExample],
494 predictions: &[Array1<f64>],
495 ) -> Result<HashMap<String, Vec<f64>>> {
496 let start_time = std::time::Instant::now();
497
498 let mut gradients = if self.config.enable_autodiff {
499 self.compute_gradients_autodiff(qnn, batch, predictions)?
501 } else {
502 self.compute_gradients_parameter_shift(qnn, batch)?
504 };
505
506 let grad_time = start_time.elapsed().as_secs_f64() * 1000.0;
507 self.stats.avg_gradient_time_ms = self
508 .stats
509 .avg_gradient_time_ms
510 .mul_add(self.stats.gradient_computations as f64, grad_time)
511 / (self.stats.gradient_computations + 1) as f64;
512 self.stats.gradient_computations += 1;
513
514 {
516 let mut cache = self.gradient_cache.lock().map_err(|e| {
517 SimulatorError::InvalidOperation(format!("Gradient cache lock poisoned: {e}"))
518 })?;
519 for (param_name, grad) in &gradients {
520 cache.insert(param_name.clone(), grad.clone());
521 }
522 }
523
524 Ok(gradients)
525 }
526
527 fn evaluate_qnn(
529 &mut self,
530 qnn: &QuantumNeuralNetwork,
531 input: &Array1<f64>,
532 ) -> Result<Array1<f64>> {
533 let total_qubits = qnn.layers.iter().map(|l| l.num_qubits).max().unwrap_or(1);
535 let mut state = Array1::zeros(1 << total_qubits);
536 state[0] = Complex64::new(1.0, 0.0); let mut current_output = input.clone();
539
540 for layer in &qnn.layers {
542 current_output = self.evaluate_layer(layer, ¤t_output, &mut state)?;
543 }
544
545 Ok(current_output)
546 }
547
548 fn evaluate_layer(
550 &mut self,
551 layer: &QMLLayer,
552 input: &Array1<f64>,
553 state: &mut Array1<Complex64>,
554 ) -> Result<Array1<f64>> {
555 match layer.layer_type {
556 QMLLayerType::DataEncoding => {
557 self.apply_data_encoding(layer, input, state)?;
558 Ok(input.clone()) }
560 QMLLayerType::VariationalCircuit => {
561 self.apply_variational_circuit(layer, state)?;
562 self.measure_qubits(layer, state)
563 }
564 QMLLayerType::Measurement => self.measure_qubits(layer, state),
565 QMLLayerType::Classical => self.apply_classical_processing(layer, input),
566 _ => {
567 Ok(input.clone())
569 }
570 }
571 }
572
573 fn apply_data_encoding(
575 &self,
576 layer: &QMLLayer,
577 input: &Array1<f64>,
578 state: &mut Array1<Complex64>,
579 ) -> Result<()> {
580 for (i, &value) in input.iter().enumerate() {
582 if i < layer.num_qubits {
583 let angle = value * std::f64::consts::PI;
585 self.apply_ry_rotation(i, angle, state)?;
586 }
587 }
588 Ok(())
589 }
590
591 fn apply_variational_circuit(
593 &mut self,
594 layer: &QMLLayer,
595 state: &mut Array1<Complex64>,
596 ) -> Result<()> {
597 if let Some(circuit_template) = &layer.circuit_template {
598 let mut circuit = circuit_template.clone();
600 self.parameterize_circuit(&mut circuit, &layer.parameters)?;
601
602 let compiled = self.circuit_interface.compile_circuit(
604 &circuit,
605 crate::circuit_interfaces::SimulationBackend::StateVector,
606 )?;
607 let result = self
608 .circuit_interface
609 .execute_circuit(&compiled, Some(state.clone()))?;
610
611 if let Some(final_state) = result.final_state {
612 *state = final_state;
613 }
614 }
615 Ok(())
616 }
617
618 fn measure_qubits(&self, layer: &QMLLayer, state: &Array1<Complex64>) -> Result<Array1<f64>> {
620 let mut measurements = Array1::zeros(layer.num_qubits);
621
622 for qubit in 0..layer.num_qubits {
623 let prob = self.compute_measurement_probability(qubit, state)?;
624 measurements[qubit] = prob;
625 }
626
627 Ok(measurements)
628 }
629
630 fn apply_classical_processing(
632 &self,
633 layer: &QMLLayer,
634 input: &Array1<f64>,
635 ) -> Result<Array1<f64>> {
636 Ok(input.clone())
638 }
639
640 fn apply_ry_rotation(
642 &self,
643 qubit: usize,
644 angle: f64,
645 state: &mut Array1<Complex64>,
646 ) -> Result<()> {
647 let qubit_mask = 1 << qubit;
648 let cos_half = (angle / 2.0).cos();
649 let sin_half = (angle / 2.0).sin();
650
651 for i in 0..state.len() {
652 if i & qubit_mask == 0 {
653 let j = i | qubit_mask;
654 if j < state.len() {
655 let amp_0 = state[i];
656 let amp_1 = state[j];
657
658 state[i] = cos_half * amp_0 - sin_half * amp_1;
659 state[j] = sin_half * amp_0 + cos_half * amp_1;
660 }
661 }
662 }
663
664 Ok(())
665 }
666
667 fn parameterize_circuit(
669 &self,
670 circuit: &mut InterfaceCircuit,
671 parameters: &[f64],
672 ) -> Result<()> {
673 let mut param_idx = 0;
674
675 for gate in &mut circuit.gates {
676 match &mut gate.gate_type {
677 InterfaceGateType::RX(ref mut angle)
678 | InterfaceGateType::RY(ref mut angle)
679 | InterfaceGateType::RZ(ref mut angle) => {
680 if param_idx < parameters.len() {
681 *angle = parameters[param_idx];
682 param_idx += 1;
683 }
684 }
685 InterfaceGateType::Phase(ref mut angle) => {
686 if param_idx < parameters.len() {
687 *angle = parameters[param_idx];
688 param_idx += 1;
689 }
690 }
691 _ => {}
692 }
693 }
694
695 Ok(())
696 }
697
698 fn compute_measurement_probability(
700 &self,
701 qubit: usize,
702 state: &Array1<Complex64>,
703 ) -> Result<f64> {
704 let qubit_mask = 1 << qubit;
705 let mut prob_one = 0.0;
706
707 for (i, &litude) in state.iter().enumerate() {
708 if i & qubit_mask != 0 {
709 prob_one += amplitude.norm_sqr();
710 }
711 }
712
713 Ok(prob_one)
714 }
715
716 fn compute_loss(
718 &self,
719 prediction: &Array1<f64>,
720 target: &Array1<f64>,
721 loss_fn: &LossFunction,
722 ) -> Result<f64> {
723 match loss_fn {
724 LossFunction::MeanSquaredError => {
725 let diff = prediction - target;
726 Ok(diff.mapv(|x| x * x).mean().unwrap_or(0.0))
727 }
728 LossFunction::MeanAbsoluteError => {
729 let diff = prediction - target;
730 Ok(diff.mapv(f64::abs).mean().unwrap_or(0.0))
731 }
732 LossFunction::CrossEntropy => {
733 let mut loss = 0.0;
735 for (i, (&pred, &targ)) in prediction.iter().zip(target.iter()).enumerate() {
736 if targ > 0.0 {
737 loss -= targ * pred.ln();
738 }
739 }
740 Ok(loss)
741 }
742 _ => Ok(0.0), }
744 }
745
746 fn compute_gradients_autodiff(
748 &mut self,
749 qnn: &QuantumNeuralNetwork,
750 batch: &[TrainingExample],
751 predictions: &[Array1<f64>],
752 ) -> Result<HashMap<String, Vec<f64>>> {
753 self.compute_gradients_parameter_shift(qnn, batch)
755 }
756
757 fn compute_gradients_parameter_shift(
759 &mut self,
760 qnn: &QuantumNeuralNetwork,
761 batch: &[TrainingExample],
762 ) -> Result<HashMap<String, Vec<f64>>> {
763 let mut gradients = HashMap::new();
764 let shift = std::f64::consts::PI / 2.0;
765
766 let mut all_params = Vec::new();
768 let mut param_names = Vec::new();
769
770 for layer in &qnn.layers {
771 for (i, ¶m) in layer.parameters.iter().enumerate() {
772 all_params.push(param);
773 param_names.push(format!("{}_{}", layer.name, i));
774 }
775 }
776
777 for (param_idx, param_name) in param_names.iter().enumerate() {
778 let mut param_grad = 0.0;
779
780 for example in batch {
781 let mut qnn_plus = qnn.clone();
783 self.shift_parameter(&mut qnn_plus, param_idx, shift)?;
784 let pred_plus = self.evaluate_qnn(&qnn_plus, &example.input)?;
785 let loss_plus = self.compute_loss(
786 &pred_plus,
787 &example.target,
788 &qnn.training_config.loss_function,
789 )?;
790
791 let mut qnn_minus = qnn.clone();
793 self.shift_parameter(&mut qnn_minus, param_idx, -shift)?;
794 let pred_minus = self.evaluate_qnn(&qnn_minus, &example.input)?;
795 let loss_minus = self.compute_loss(
796 &pred_minus,
797 &example.target,
798 &qnn.training_config.loss_function,
799 )?;
800
801 param_grad += (loss_plus - loss_minus) / 2.0;
803 }
804
805 param_grad /= batch.len() as f64;
806 gradients.insert(param_name.clone(), vec![param_grad]);
807 }
808
809 Ok(gradients)
810 }
811
812 fn shift_parameter(
814 &self,
815 qnn: &mut QuantumNeuralNetwork,
816 param_idx: usize,
817 shift: f64,
818 ) -> Result<()> {
819 let mut current_idx = 0;
820
821 for layer in &mut qnn.layers {
822 if current_idx + layer.parameters.len() > param_idx {
823 let local_idx = param_idx - current_idx;
824 layer.parameters[local_idx] += shift;
825 return Ok(());
826 }
827 current_idx += layer.parameters.len();
828 }
829
830 Err(SimulatorError::InvalidInput(format!(
831 "Parameter index {param_idx} out of bounds"
832 )))
833 }
834
835 fn create_optimizer(&self, config: &TrainingConfig) -> Result<Box<dyn QMLOptimizer>> {
837 match config.optimizer {
838 OptimizerType::Adam => Ok(Box::new(AdamOptimizer::new(config.learning_rate))),
839 OptimizerType::SGD => Ok(Box::new(SGDOptimizer::new(config.learning_rate))),
840 _ => Ok(Box::new(AdamOptimizer::new(config.learning_rate))), }
842 }
843
844 fn update_lr_scheduler(
846 &self,
847 scheduler: &mut LRScheduler,
848 current_loss: f64,
849 optimizer: &mut Box<dyn QMLOptimizer>,
850 ) -> Result<()> {
851 match scheduler {
852 LRScheduler::StepLR {
853 step_size: _,
854 gamma,
855 } => {
856 optimizer.update_learning_rate(*gamma);
857 }
858 LRScheduler::ExponentialLR { gamma } => {
859 optimizer.update_learning_rate(*gamma);
860 }
861 LRScheduler::ReduceLROnPlateau {
862 patience: _,
863 factor,
864 } => {
865 optimizer.update_learning_rate(*factor);
867 }
868 LRScheduler::CosineAnnealingLR { .. } => {}
869 }
870 Ok(())
871 }
872
873 fn compute_parameter_norm(&self, qnn: &QuantumNeuralNetwork) -> Result<f64> {
875 let mut norm_squared = 0.0;
876
877 for layer in &qnn.layers {
878 for ¶m in &layer.parameters {
879 norm_squared += param * param;
880 }
881 }
882
883 Ok(norm_squared.sqrt())
884 }
885
886 fn compute_last_gradient_norm(&self) -> Result<f64> {
888 let cache = self.gradient_cache.lock().map_err(|e| {
889 SimulatorError::InvalidOperation(format!("Gradient cache lock poisoned: {e}"))
890 })?;
891 let mut norm_squared = 0.0;
892
893 for (_, grads) in cache.iter() {
894 for &grad in grads {
895 norm_squared += grad * grad;
896 }
897 }
898
899 Ok(norm_squared.sqrt())
900 }
901
902 #[must_use]
904 pub const fn get_stats(&self) -> &QMLTrainingStats {
905 &self.stats
906 }
907
908 pub fn reset_stats(&mut self) {
910 self.stats = QMLTrainingStats::default();
911 }
912}
913
914#[derive(Debug, Clone)]
916pub struct TrainingExample {
917 pub input: Array1<f64>,
919 pub target: Array1<f64>,
921}
922
923#[derive(Debug, Clone)]
925pub struct TrainingResult {
926 pub trained_qnn: QuantumNeuralNetwork,
928 pub final_loss: f64,
930 pub final_validation_loss: f64,
932 pub epochs_completed: usize,
934 pub total_time_ms: f64,
936 pub converged: bool,
938}
939
940pub trait QMLOptimizer {
942 fn update_parameters(
944 &mut self,
945 qnn: &mut QuantumNeuralNetwork,
946 gradients: &HashMap<String, Vec<f64>>,
947 ) -> Result<()>;
948
949 fn update_learning_rate(&mut self, factor: f64);
951
952 fn get_learning_rate(&self) -> f64;
954}
955
956pub struct AdamOptimizer {
958 learning_rate: f64,
959 beta1: f64,
960 beta2: f64,
961 epsilon: f64,
962 step: usize,
963 m: HashMap<String, Vec<f64>>, v: HashMap<String, Vec<f64>>, }
966
967impl AdamOptimizer {
968 #[must_use]
969 pub fn new(learning_rate: f64) -> Self {
970 Self {
971 learning_rate,
972 beta1: 0.9,
973 beta2: 0.999,
974 epsilon: 1e-8,
975 step: 0,
976 m: HashMap::new(),
977 v: HashMap::new(),
978 }
979 }
980}
981
982impl QMLOptimizer for AdamOptimizer {
983 fn update_parameters(
984 &mut self,
985 qnn: &mut QuantumNeuralNetwork,
986 gradients: &HashMap<String, Vec<f64>>,
987 ) -> Result<()> {
988 self.step += 1;
989
990 for (param_name, grads) in gradients {
991 if !self.m.contains_key(param_name) {
993 self.m.insert(param_name.clone(), vec![0.0; grads.len()]);
994 self.v.insert(param_name.clone(), vec![0.0; grads.len()]);
995 }
996
997 let mut updates = Vec::new();
998
999 {
1000 let m = self.m.get_mut(param_name).ok_or_else(|| {
1001 SimulatorError::InvalidOperation(format!(
1002 "Parameter {param_name} not found in first moment estimates"
1003 ))
1004 })?;
1005 let v = self.v.get_mut(param_name).ok_or_else(|| {
1006 SimulatorError::InvalidOperation(format!(
1007 "Parameter {param_name} not found in second moment estimates"
1008 ))
1009 })?;
1010
1011 for (i, &grad) in grads.iter().enumerate() {
1012 m[i] = self.beta1.mul_add(m[i], (1.0 - self.beta1) * grad);
1014
1015 v[i] = self.beta2.mul_add(v[i], (1.0 - self.beta2) * grad * grad);
1017
1018 let m_hat = m[i] / (1.0 - self.beta1.powi(self.step as i32));
1020
1021 let v_hat = v[i] / (1.0 - self.beta2.powi(self.step as i32));
1023
1024 let update = self.learning_rate * m_hat / (v_hat.sqrt() + self.epsilon);
1026 updates.push((i, -update));
1027 }
1028 }
1029
1030 for (i, update) in updates {
1032 self.update_qnn_parameter(qnn, param_name, i, update)?;
1033 }
1034 }
1035
1036 Ok(())
1037 }
1038
1039 fn update_learning_rate(&mut self, factor: f64) {
1040 self.learning_rate *= factor;
1041 }
1042
1043 fn get_learning_rate(&self) -> f64 {
1044 self.learning_rate
1045 }
1046}
1047
1048impl AdamOptimizer {
1049 fn update_qnn_parameter(
1050 &self,
1051 qnn: &mut QuantumNeuralNetwork,
1052 param_name: &str,
1053 param_idx: usize,
1054 update: f64,
1055 ) -> Result<()> {
1056 let parts: Vec<&str> = param_name.split('_').collect();
1058 if parts.len() >= 2 {
1059 let layer_name = parts[0];
1060
1061 for layer in &mut qnn.layers {
1062 if layer.name == layer_name && param_idx < layer.parameters.len() {
1063 layer.parameters[param_idx] += update;
1064 return Ok(());
1065 }
1066 }
1067 }
1068
1069 Err(SimulatorError::InvalidInput(format!(
1070 "Parameter {param_name} not found"
1071 )))
1072 }
1073}
1074
1075pub struct SGDOptimizer {
1077 learning_rate: f64,
1078 momentum: f64,
1079 velocity: HashMap<String, Vec<f64>>,
1080}
1081
1082impl SGDOptimizer {
1083 #[must_use]
1084 pub fn new(learning_rate: f64) -> Self {
1085 Self {
1086 learning_rate,
1087 momentum: 0.9,
1088 velocity: HashMap::new(),
1089 }
1090 }
1091}
1092
1093impl QMLOptimizer for SGDOptimizer {
1094 fn update_parameters(
1095 &mut self,
1096 qnn: &mut QuantumNeuralNetwork,
1097 gradients: &HashMap<String, Vec<f64>>,
1098 ) -> Result<()> {
1099 for (param_name, grads) in gradients {
1100 if !self.velocity.contains_key(param_name) {
1102 self.velocity
1103 .insert(param_name.clone(), vec![0.0; grads.len()]);
1104 }
1105
1106 let mut updates = Vec::new();
1107
1108 {
1109 let velocity = self.velocity.get_mut(param_name).ok_or_else(|| {
1110 SimulatorError::InvalidOperation(format!(
1111 "Parameter {param_name} not found in velocity cache"
1112 ))
1113 })?;
1114
1115 for (i, &grad) in grads.iter().enumerate() {
1116 velocity[i] = self
1118 .momentum
1119 .mul_add(velocity[i], -(self.learning_rate * grad));
1120 updates.push((i, velocity[i]));
1121 }
1122 }
1123
1124 for (i, update) in updates {
1126 self.update_qnn_parameter(qnn, param_name, i, update)?;
1127 }
1128 }
1129
1130 Ok(())
1131 }
1132
1133 fn update_learning_rate(&mut self, factor: f64) {
1134 self.learning_rate *= factor;
1135 }
1136
1137 fn get_learning_rate(&self) -> f64 {
1138 self.learning_rate
1139 }
1140}
1141
1142impl SGDOptimizer {
1143 fn update_qnn_parameter(
1144 &self,
1145 qnn: &mut QuantumNeuralNetwork,
1146 param_name: &str,
1147 param_idx: usize,
1148 update: f64,
1149 ) -> Result<()> {
1150 let parts: Vec<&str> = param_name.split('_').collect();
1152 if parts.len() >= 2 {
1153 let layer_name = parts[0];
1154
1155 for layer in &mut qnn.layers {
1156 if layer.name == layer_name && param_idx < layer.parameters.len() {
1157 layer.parameters[param_idx] += update;
1158 return Ok(());
1159 }
1160 }
1161 }
1162
1163 Err(SimulatorError::InvalidInput(format!(
1164 "Parameter {param_name} not found"
1165 )))
1166 }
1167}
1168
1169pub struct QMLUtils;
1171
1172impl QMLUtils {
1173 #[must_use]
1175 pub fn create_vqc(num_qubits: usize, num_layers: usize) -> QuantumNeuralNetwork {
1176 let mut layers = Vec::new();
1177
1178 layers.push(QMLLayer {
1180 layer_type: QMLLayerType::DataEncoding,
1181 name: "encoding".to_string(),
1182 num_qubits,
1183 parameters: Vec::new(),
1184 parameter_names: Vec::new(),
1185 circuit_template: None,
1186 classical_function: None,
1187 config: LayerConfig::default(),
1188 });
1189
1190 for layer_idx in 0..num_layers {
1192 let num_params = num_qubits * 3; let parameters = (0..num_params)
1194 .map(|_| fastrand::f64() * 2.0 * std::f64::consts::PI)
1195 .collect();
1196 let parameter_names = (0..num_params).map(|i| format!("param_{i}")).collect();
1197
1198 layers.push(QMLLayer {
1199 layer_type: QMLLayerType::VariationalCircuit,
1200 name: format!("var_layer_{layer_idx}"),
1201 num_qubits,
1202 parameters,
1203 parameter_names,
1204 circuit_template: Some(Self::create_variational_circuit_template(num_qubits)),
1205 classical_function: None,
1206 config: LayerConfig {
1207 repetitions: 1,
1208 entangling_pattern: (0..num_qubits - 1).map(|i| (i, i + 1)).collect(),
1209 ..Default::default()
1210 },
1211 });
1212 }
1213
1214 layers.push(QMLLayer {
1216 layer_type: QMLLayerType::Measurement,
1217 name: "measurement".to_string(),
1218 num_qubits,
1219 parameters: Vec::new(),
1220 parameter_names: Vec::new(),
1221 circuit_template: None,
1222 classical_function: None,
1223 config: LayerConfig::default(),
1224 });
1225
1226 QuantumNeuralNetwork {
1227 layers,
1228 global_parameters: HashMap::new(),
1229 metadata: QNNMetadata {
1230 name: Some("VQC".to_string()),
1231 total_parameters: num_layers * num_qubits * 3,
1232 trainable_parameters: num_layers * num_qubits * 3,
1233 ..Default::default()
1234 },
1235 training_config: TrainingConfig::default(),
1236 }
1237 }
1238
1239 fn create_variational_circuit_template(num_qubits: usize) -> InterfaceCircuit {
1241 let mut circuit = InterfaceCircuit::new(num_qubits, 0);
1242
1243 for qubit in 0..num_qubits {
1245 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RX(0.0), vec![qubit]));
1246 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RY(0.0), vec![qubit]));
1247 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RZ(0.0), vec![qubit]));
1248 }
1249
1250 for qubit in 0..num_qubits - 1 {
1252 circuit.add_gate(InterfaceGate::new(
1253 InterfaceGateType::CNOT,
1254 vec![qubit, qubit + 1],
1255 ));
1256 }
1257
1258 circuit
1259 }
1260
1261 #[must_use]
1263 pub fn create_xor_training_data() -> Vec<TrainingExample> {
1264 vec![
1265 TrainingExample {
1266 input: Array1::from(vec![0.0, 0.0]),
1267 target: Array1::from(vec![0.0]),
1268 },
1269 TrainingExample {
1270 input: Array1::from(vec![0.0, 1.0]),
1271 target: Array1::from(vec![1.0]),
1272 },
1273 TrainingExample {
1274 input: Array1::from(vec![1.0, 0.0]),
1275 target: Array1::from(vec![1.0]),
1276 },
1277 TrainingExample {
1278 input: Array1::from(vec![1.0, 1.0]),
1279 target: Array1::from(vec![0.0]),
1280 },
1281 ]
1282 }
1283
1284 pub fn benchmark_qml_integration() -> Result<QMLBenchmarkResults> {
1286 let mut results = QMLBenchmarkResults::default();
1287
1288 let configs = vec![
1289 QMLIntegrationConfig {
1290 framework: QMLFramework::SciRS2,
1291 enable_autodiff: false,
1292 batch_size: 4,
1293 ..Default::default()
1294 },
1295 QMLIntegrationConfig {
1296 framework: QMLFramework::SciRS2,
1297 enable_autodiff: true,
1298 batch_size: 4,
1299 ..Default::default()
1300 },
1301 ];
1302
1303 for (i, config) in configs.into_iter().enumerate() {
1304 let mut integration = QMLIntegration::new(config)?;
1305 let mut qnn = Self::create_vqc(2, 2);
1306 qnn.training_config.epochs = 10;
1307
1308 let training_data = Self::create_xor_training_data();
1309
1310 let start = std::time::Instant::now();
1311 let _result = integration.train_qnn(qnn, &training_data, None)?;
1312 let time = start.elapsed().as_secs_f64() * 1000.0;
1313
1314 results.training_times.push((format!("config_{i}"), time));
1315 }
1316
1317 Ok(results)
1318 }
1319}
1320
1321#[derive(Debug, Clone, Default)]
1323pub struct QMLBenchmarkResults {
1324 pub training_times: Vec<(String, f64)>,
1326}
1327
1328#[cfg(test)]
1329mod tests {
1330 use super::*;
1331 use approx::assert_abs_diff_eq;
1332
1333 #[test]
1334 fn test_qml_integration_creation() {
1335 let config = QMLIntegrationConfig::default();
1336 let integration = QMLIntegration::new(config);
1337 assert!(integration.is_ok());
1338 }
1339
1340 #[test]
1341 fn test_quantum_neural_network_creation() {
1342 let qnn = QMLUtils::create_vqc(2, 2);
1343 assert_eq!(qnn.layers.len(), 4); assert_eq!(qnn.metadata.total_parameters, 12); }
1346
1347 #[test]
1348 fn test_training_data_creation() {
1349 let data = QMLUtils::create_xor_training_data();
1350 assert_eq!(data.len(), 4);
1351 assert_eq!(data[0].input, Array1::from(vec![0.0, 0.0]));
1352 assert_eq!(data[0].target, Array1::from(vec![0.0]));
1353 }
1354
1355 #[test]
1356 fn test_adam_optimizer() {
1357 let mut optimizer = AdamOptimizer::new(0.01);
1358 assert_eq!(optimizer.get_learning_rate(), 0.01);
1359
1360 optimizer.update_learning_rate(0.5);
1361 assert_abs_diff_eq!(optimizer.get_learning_rate(), 0.005, epsilon = 1e-10);
1362 }
1363
1364 #[test]
1365 fn test_sgd_optimizer() {
1366 let mut optimizer = SGDOptimizer::new(0.1);
1367 assert_eq!(optimizer.get_learning_rate(), 0.1);
1368
1369 optimizer.update_learning_rate(0.9);
1370 assert_abs_diff_eq!(optimizer.get_learning_rate(), 0.09, epsilon = 1e-10);
1371 }
1372
1373 #[test]
1374 fn test_qml_layer_types() {
1375 let layer_types = [
1376 QMLLayerType::VariationalCircuit,
1377 QMLLayerType::DataEncoding,
1378 QMLLayerType::Measurement,
1379 QMLLayerType::Classical,
1380 ];
1381 assert_eq!(layer_types.len(), 4);
1382 }
1383
1384 #[test]
1385 fn test_training_config_default() {
1386 let config = TrainingConfig::default();
1387 assert_eq!(config.learning_rate, 0.01);
1388 assert_eq!(config.optimizer, OptimizerType::Adam);
1389 assert_eq!(config.loss_function, LossFunction::MeanSquaredError);
1390 }
1391
1392 #[test]
1393 fn test_measurement_probability_computation() {
1394 let config = QMLIntegrationConfig::default();
1395 let integration = QMLIntegration::new(config).expect("Failed to create QML integration");
1396
1397 let mut state = Array1::zeros(4);
1399 state[1] = Complex64::new(1.0, 0.0); let prob0 = integration
1402 .compute_measurement_probability(0, &state)
1403 .expect("Failed to compute measurement probability for qubit 0");
1404 let prob1 = integration
1405 .compute_measurement_probability(1, &state)
1406 .expect("Failed to compute measurement probability for qubit 1");
1407
1408 assert_abs_diff_eq!(prob0, 1.0, epsilon = 1e-10); assert_abs_diff_eq!(prob1, 0.0, epsilon = 1e-10); }
1411
1412 #[test]
1413 fn test_loss_computation() {
1414 let config = QMLIntegrationConfig::default();
1415 let integration = QMLIntegration::new(config).expect("Failed to create QML integration");
1416
1417 let prediction = Array1::from(vec![0.8, 0.2]);
1418 let target = Array1::from(vec![1.0, 0.0]);
1419
1420 let mse = integration
1421 .compute_loss(&prediction, &target, &LossFunction::MeanSquaredError)
1422 .expect("Failed to compute MSE loss");
1423 let mae = integration
1424 .compute_loss(&prediction, &target, &LossFunction::MeanAbsoluteError)
1425 .expect("Failed to compute MAE loss");
1426
1427 assert_abs_diff_eq!(mse, 0.04, epsilon = 1e-10); assert_abs_diff_eq!(mae, 0.2, epsilon = 1e-10); }
1430
1431 #[test]
1432 fn test_circuit_template_creation() {
1433 let circuit = QMLUtils::create_variational_circuit_template(3);
1434 assert_eq!(circuit.num_qubits, 3);
1435 assert_eq!(circuit.gates.len(), 11); }
1437}