1use crate::prelude::{InterfaceGate, InterfaceGateType, SimulatorError};
10use scirs2_core::ndarray::Array1;
11use scirs2_core::parallel_ops::*;
12use scirs2_core::Complex64;
13use serde::{Deserialize, Serialize};
14use std::collections::HashMap;
15use std::sync::{Arc, Mutex};
16
17use crate::autodiff_vqe::AutoDiffContext;
18use crate::circuit_interfaces::{CircuitInterface, InterfaceCircuit};
19use crate::error::Result;
20use crate::scirs2_integration::SciRS2Backend;
21
22#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
24pub enum QMLFramework {
25 PyTorch,
27 TensorFlow,
29 JAX,
31 SciRS2,
33 Custom,
35}
36
37#[derive(Debug, Clone)]
39pub struct QMLIntegrationConfig {
40 pub framework: QMLFramework,
42 pub enable_autodiff: bool,
44 pub enable_gradient_optimization: bool,
46 pub batch_size: usize,
48 pub enable_parameter_sharing: bool,
50 pub hardware_aware_optimization: bool,
52 pub gradient_memory_limit: usize,
54 pub enable_distributed_training: bool,
56 pub enable_mixed_precision: bool,
58}
59
60impl Default for QMLIntegrationConfig {
61 fn default() -> Self {
62 Self {
63 framework: QMLFramework::SciRS2,
64 enable_autodiff: true,
65 enable_gradient_optimization: true,
66 batch_size: 32,
67 enable_parameter_sharing: true,
68 hardware_aware_optimization: true,
69 gradient_memory_limit: 8_000_000_000, enable_distributed_training: false,
71 enable_mixed_precision: false,
72 }
73 }
74}
75
76#[derive(Debug, Clone, PartialEq, Eq, Hash)]
78pub enum QMLLayerType {
79 VariationalCircuit,
81 QuantumConvolutional,
83 QuantumRecurrent,
85 QuantumAttention,
87 DataEncoding,
89 Measurement,
91 Classical,
93}
94
95#[derive(Debug, Clone)]
97pub struct QMLLayer {
98 pub layer_type: QMLLayerType,
100 pub name: String,
102 pub num_qubits: usize,
104 pub parameters: Vec<f64>,
106 pub parameter_names: Vec<String>,
108 pub circuit_template: Option<InterfaceCircuit>,
110 pub classical_function: Option<String>,
112 pub config: LayerConfig,
114}
115
116#[derive(Debug, Clone, Default)]
118pub struct LayerConfig {
119 pub repetitions: usize,
121 pub entangling_pattern: Vec<(usize, usize)>,
123 pub activation: Option<String>,
125 pub regularization: Option<RegularizationConfig>,
127 pub hardware_mapping: Option<Vec<usize>>,
129}
130
131#[derive(Debug, Clone)]
133pub struct RegularizationConfig {
134 pub l1_strength: f64,
136 pub l2_strength: f64,
138 pub dropout_prob: f64,
140}
141
142#[derive(Debug, Clone)]
144pub struct QuantumNeuralNetwork {
145 pub layers: Vec<QMLLayer>,
147 pub global_parameters: HashMap<String, f64>,
149 pub metadata: QNNMetadata,
151 pub training_config: TrainingConfig,
153}
154
155#[derive(Debug, Clone, Default)]
157pub struct QNNMetadata {
158 pub name: Option<String>,
160 pub description: Option<String>,
162 pub created_at: Option<std::time::SystemTime>,
164 pub total_parameters: usize,
166 pub trainable_parameters: usize,
168 pub complexity_score: f64,
170}
171
172#[derive(Debug, Clone)]
174pub struct TrainingConfig {
175 pub learning_rate: f64,
177 pub optimizer: OptimizerType,
179 pub loss_function: LossFunction,
181 pub epochs: usize,
183 pub batch_size: usize,
185 pub validation_split: f64,
187 pub early_stopping_patience: Option<usize>,
189 pub lr_scheduler: Option<LRScheduler>,
191}
192
193impl Default for TrainingConfig {
194 fn default() -> Self {
195 Self {
196 learning_rate: 0.01,
197 optimizer: OptimizerType::Adam,
198 loss_function: LossFunction::MeanSquaredError,
199 epochs: 100,
200 batch_size: 32,
201 validation_split: 0.2,
202 early_stopping_patience: Some(10),
203 lr_scheduler: None,
204 }
205 }
206}
207
208#[derive(Debug, Clone, Copy, PartialEq, Eq)]
210pub enum OptimizerType {
211 SGD,
212 Adam,
213 AdamW,
214 RMSprop,
215 LBFGS,
216 NaturalGradient,
217 QuantumNaturalGradient,
218}
219
220#[derive(Debug, Clone, Copy, PartialEq, Eq)]
222pub enum LossFunction {
223 MeanSquaredError,
224 MeanAbsoluteError,
225 CrossEntropy,
226 BinaryCrossEntropy,
227 Hinge,
228 CustomQuantum,
229}
230
231#[derive(Debug, Clone)]
233pub enum LRScheduler {
234 StepLR { step_size: usize, gamma: f64 },
235 ExponentialLR { gamma: f64 },
236 CosineAnnealingLR { t_max: usize },
237 ReduceLROnPlateau { patience: usize, factor: f64 },
238}
239
240pub struct QMLIntegration {
242 config: QMLIntegrationConfig,
244 circuit_interface: CircuitInterface,
246 backend: Option<SciRS2Backend>,
248 autodiff_context: Option<AutoDiffContext>,
250 parameter_cache: Arc<Mutex<HashMap<String, Vec<f64>>>>,
252 gradient_cache: Arc<Mutex<HashMap<String, Vec<f64>>>>,
254 stats: QMLTrainingStats,
256}
257
258#[derive(Debug, Clone, Default, Serialize, Deserialize)]
260pub struct QMLTrainingStats {
261 pub total_training_time_ms: f64,
263 pub parameter_updates: usize,
265 pub gradient_computations: usize,
267 pub avg_gradient_time_ms: f64,
269 pub circuit_evaluations: usize,
271 pub avg_circuit_time_ms: f64,
273 pub loss_history: Vec<f64>,
275 pub validation_loss_history: Vec<f64>,
277 pub parameter_norm_history: Vec<f64>,
279 pub gradient_norm_history: Vec<f64>,
281}
282
283impl QMLIntegration {
284 pub fn new(config: QMLIntegrationConfig) -> Result<Self> {
286 let circuit_interface = CircuitInterface::new(Default::default())?;
287
288 Ok(Self {
289 config,
290 circuit_interface,
291 backend: None,
292 autodiff_context: None,
293 parameter_cache: Arc::new(Mutex::new(HashMap::new())),
294 gradient_cache: Arc::new(Mutex::new(HashMap::new())),
295 stats: QMLTrainingStats::default(),
296 })
297 }
298
299 pub fn with_backend(mut self) -> Result<Self> {
301 self.backend = Some(SciRS2Backend::new());
302 self.circuit_interface = self.circuit_interface.with_backend()?;
303
304 if self.config.enable_autodiff {
305 self.autodiff_context = Some(AutoDiffContext::new(
306 Vec::new(),
307 crate::autodiff_vqe::GradientMethod::ParameterShift,
308 ));
309 }
310
311 Ok(self)
312 }
313
314 pub fn train_qnn(
316 &mut self,
317 mut qnn: QuantumNeuralNetwork,
318 training_data: &[TrainingExample],
319 validation_data: Option<&[TrainingExample]>,
320 ) -> Result<TrainingResult> {
321 let start_time = std::time::Instant::now();
322
323 let mut optimizer = self.create_optimizer(&qnn.training_config)?;
325
326 let mut lr_scheduler = qnn.training_config.lr_scheduler.clone();
328
329 let mut best_loss = f64::INFINITY;
330 let mut patience_counter = 0;
331
332 for epoch in 0..qnn.training_config.epochs {
333 let epoch_start = std::time::Instant::now();
334
335 let train_loss = self.train_epoch(&mut qnn, training_data, &mut optimizer)?;
337 self.stats.loss_history.push(train_loss);
338
339 let val_loss = if let Some(val_data) = validation_data {
341 self.validate_epoch(&qnn, val_data)?
342 } else {
343 train_loss
344 };
345 self.stats.validation_loss_history.push(val_loss);
346
347 if let Some(ref mut scheduler) = lr_scheduler {
349 self.update_lr_scheduler(scheduler, val_loss, &mut optimizer)?;
350 }
351
352 if let Some(patience) = qnn.training_config.early_stopping_patience {
354 if val_loss < best_loss {
355 best_loss = val_loss;
356 patience_counter = 0;
357 } else {
358 patience_counter += 1;
359 if patience_counter >= patience {
360 println!("Early stopping at epoch {epoch} due to no improvement");
361 break;
362 }
363 }
364 }
365
366 let param_norm = self.compute_parameter_norm(&qnn)?;
368 let grad_norm = self.compute_last_gradient_norm()?;
369 self.stats.parameter_norm_history.push(param_norm);
370 self.stats.gradient_norm_history.push(grad_norm);
371
372 println!(
373 "Epoch {}: train_loss={:.6}, val_loss={:.6}, time={:.2}ms",
374 epoch,
375 train_loss,
376 val_loss,
377 epoch_start.elapsed().as_secs_f64() * 1000.0
378 );
379 }
380
381 let total_time = start_time.elapsed().as_secs_f64() * 1000.0;
382 self.stats.total_training_time_ms += total_time;
383
384 Ok(TrainingResult {
385 trained_qnn: qnn.clone(),
386 final_loss: *self.stats.loss_history.last().unwrap_or(&0.0),
387 final_validation_loss: *self.stats.validation_loss_history.last().unwrap_or(&0.0),
388 epochs_completed: self.stats.loss_history.len(),
389 total_time_ms: total_time,
390 converged: patience_counter
391 < qnn
392 .training_config
393 .early_stopping_patience
394 .unwrap_or(usize::MAX),
395 })
396 }
397
398 fn train_epoch(
400 &mut self,
401 qnn: &mut QuantumNeuralNetwork,
402 training_data: &[TrainingExample],
403 optimizer: &mut Box<dyn QMLOptimizer>,
404 ) -> Result<f64> {
405 let mut total_loss = 0.0;
406 let batch_size = qnn.training_config.batch_size;
407 let num_batches = training_data.len().div_ceil(batch_size);
408
409 for batch_idx in 0..num_batches {
410 let start_idx = batch_idx * batch_size;
411 let end_idx = (start_idx + batch_size).min(training_data.len());
412 let batch = &training_data[start_idx..end_idx];
413
414 let (predictions, loss) = self.forward_pass(qnn, batch)?;
416 total_loss += loss;
417
418 let gradients = self.backward_pass(qnn, batch, &predictions)?;
420
421 optimizer.update_parameters(qnn, &gradients)?;
423
424 self.stats.parameter_updates += 1;
425 }
426
427 Ok(total_loss / num_batches as f64)
428 }
429
430 fn validate_epoch(
432 &mut self,
433 qnn: &QuantumNeuralNetwork,
434 validation_data: &[TrainingExample],
435 ) -> Result<f64> {
436 let mut total_loss = 0.0;
437 let batch_size = qnn.training_config.batch_size;
438 let num_batches = validation_data.len().div_ceil(batch_size);
439
440 for batch_idx in 0..num_batches {
441 let start_idx = batch_idx * batch_size;
442 let end_idx = (start_idx + batch_size).min(validation_data.len());
443 let batch = &validation_data[start_idx..end_idx];
444
445 let (_, loss) = self.forward_pass(qnn, batch)?;
446 total_loss += loss;
447 }
448
449 Ok(total_loss / num_batches as f64)
450 }
451
452 fn forward_pass(
454 &mut self,
455 qnn: &QuantumNeuralNetwork,
456 batch: &[TrainingExample],
457 ) -> Result<(Vec<Array1<f64>>, f64)> {
458 let start_time = std::time::Instant::now();
459
460 let mut predictions = Vec::new();
461 let mut total_loss = 0.0;
462
463 for example in batch {
464 let prediction = self.evaluate_qnn(qnn, &example.input)?;
466
467 let loss = self.compute_loss(
469 &prediction,
470 &example.target,
471 &qnn.training_config.loss_function,
472 )?;
473
474 predictions.push(prediction);
475 total_loss += loss;
476 }
477
478 let eval_time = start_time.elapsed().as_secs_f64() * 1000.0;
479 self.stats.avg_circuit_time_ms = self
480 .stats
481 .avg_circuit_time_ms
482 .mul_add(self.stats.circuit_evaluations as f64, eval_time)
483 / (self.stats.circuit_evaluations + batch.len()) as f64;
484 self.stats.circuit_evaluations += batch.len();
485
486 Ok((predictions, total_loss / batch.len() as f64))
487 }
488
489 fn backward_pass(
491 &mut self,
492 qnn: &QuantumNeuralNetwork,
493 batch: &[TrainingExample],
494 predictions: &[Array1<f64>],
495 ) -> Result<HashMap<String, Vec<f64>>> {
496 let start_time = std::time::Instant::now();
497
498 let mut gradients = HashMap::new();
499
500 if self.config.enable_autodiff {
501 gradients = self.compute_gradients_autodiff(qnn, batch, predictions)?;
503 } else {
504 gradients = self.compute_gradients_parameter_shift(qnn, batch)?;
506 }
507
508 let grad_time = start_time.elapsed().as_secs_f64() * 1000.0;
509 self.stats.avg_gradient_time_ms = self
510 .stats
511 .avg_gradient_time_ms
512 .mul_add(self.stats.gradient_computations as f64, grad_time)
513 / (self.stats.gradient_computations + 1) as f64;
514 self.stats.gradient_computations += 1;
515
516 {
518 let mut cache = self.gradient_cache.lock().unwrap();
519 for (param_name, grad) in &gradients {
520 cache.insert(param_name.clone(), grad.clone());
521 }
522 }
523
524 Ok(gradients)
525 }
526
527 fn evaluate_qnn(
529 &mut self,
530 qnn: &QuantumNeuralNetwork,
531 input: &Array1<f64>,
532 ) -> Result<Array1<f64>> {
533 let total_qubits = qnn.layers.iter().map(|l| l.num_qubits).max().unwrap_or(1);
535 let mut state = Array1::zeros(1 << total_qubits);
536 state[0] = Complex64::new(1.0, 0.0); let mut current_output = input.clone();
539
540 for layer in &qnn.layers {
542 current_output = self.evaluate_layer(layer, ¤t_output, &mut state)?;
543 }
544
545 Ok(current_output)
546 }
547
548 fn evaluate_layer(
550 &mut self,
551 layer: &QMLLayer,
552 input: &Array1<f64>,
553 state: &mut Array1<Complex64>,
554 ) -> Result<Array1<f64>> {
555 match layer.layer_type {
556 QMLLayerType::DataEncoding => {
557 self.apply_data_encoding(layer, input, state)?;
558 Ok(input.clone()) }
560 QMLLayerType::VariationalCircuit => {
561 self.apply_variational_circuit(layer, state)?;
562 self.measure_qubits(layer, state)
563 }
564 QMLLayerType::Measurement => self.measure_qubits(layer, state),
565 QMLLayerType::Classical => self.apply_classical_processing(layer, input),
566 _ => {
567 Ok(input.clone())
569 }
570 }
571 }
572
573 fn apply_data_encoding(
575 &mut self,
576 layer: &QMLLayer,
577 input: &Array1<f64>,
578 state: &mut Array1<Complex64>,
579 ) -> Result<()> {
580 for (i, &value) in input.iter().enumerate() {
582 if i < layer.num_qubits {
583 let angle = value * std::f64::consts::PI;
585 self.apply_ry_rotation(i, angle, state)?;
586 }
587 }
588 Ok(())
589 }
590
591 fn apply_variational_circuit(
593 &mut self,
594 layer: &QMLLayer,
595 state: &mut Array1<Complex64>,
596 ) -> Result<()> {
597 if let Some(circuit_template) = &layer.circuit_template {
598 let mut circuit = circuit_template.clone();
600 self.parameterize_circuit(&mut circuit, &layer.parameters)?;
601
602 let compiled = self.circuit_interface.compile_circuit(
604 &circuit,
605 crate::circuit_interfaces::SimulationBackend::StateVector,
606 )?;
607 let result = self
608 .circuit_interface
609 .execute_circuit(&compiled, Some(state.clone()))?;
610
611 if let Some(final_state) = result.final_state {
612 *state = final_state;
613 }
614 }
615 Ok(())
616 }
617
618 fn measure_qubits(&self, layer: &QMLLayer, state: &Array1<Complex64>) -> Result<Array1<f64>> {
620 let mut measurements = Array1::zeros(layer.num_qubits);
621
622 for qubit in 0..layer.num_qubits {
623 let prob = self.compute_measurement_probability(qubit, state)?;
624 measurements[qubit] = prob;
625 }
626
627 Ok(measurements)
628 }
629
630 fn apply_classical_processing(
632 &self,
633 layer: &QMLLayer,
634 input: &Array1<f64>,
635 ) -> Result<Array1<f64>> {
636 Ok(input.clone())
638 }
639
640 fn apply_ry_rotation(
642 &self,
643 qubit: usize,
644 angle: f64,
645 state: &mut Array1<Complex64>,
646 ) -> Result<()> {
647 let qubit_mask = 1 << qubit;
648 let cos_half = (angle / 2.0).cos();
649 let sin_half = (angle / 2.0).sin();
650
651 for i in 0..state.len() {
652 if i & qubit_mask == 0 {
653 let j = i | qubit_mask;
654 if j < state.len() {
655 let amp_0 = state[i];
656 let amp_1 = state[j];
657
658 state[i] = cos_half * amp_0 - sin_half * amp_1;
659 state[j] = sin_half * amp_0 + cos_half * amp_1;
660 }
661 }
662 }
663
664 Ok(())
665 }
666
667 fn parameterize_circuit(
669 &self,
670 circuit: &mut InterfaceCircuit,
671 parameters: &[f64],
672 ) -> Result<()> {
673 let mut param_idx = 0;
674
675 for gate in &mut circuit.gates {
676 match &mut gate.gate_type {
677 InterfaceGateType::RX(ref mut angle)
678 | InterfaceGateType::RY(ref mut angle)
679 | InterfaceGateType::RZ(ref mut angle) => {
680 if param_idx < parameters.len() {
681 *angle = parameters[param_idx];
682 param_idx += 1;
683 }
684 }
685 InterfaceGateType::Phase(ref mut angle) => {
686 if param_idx < parameters.len() {
687 *angle = parameters[param_idx];
688 param_idx += 1;
689 }
690 }
691 _ => {}
692 }
693 }
694
695 Ok(())
696 }
697
698 fn compute_measurement_probability(
700 &self,
701 qubit: usize,
702 state: &Array1<Complex64>,
703 ) -> Result<f64> {
704 let qubit_mask = 1 << qubit;
705 let mut prob_one = 0.0;
706
707 for (i, &litude) in state.iter().enumerate() {
708 if i & qubit_mask != 0 {
709 prob_one += amplitude.norm_sqr();
710 }
711 }
712
713 Ok(prob_one)
714 }
715
716 fn compute_loss(
718 &self,
719 prediction: &Array1<f64>,
720 target: &Array1<f64>,
721 loss_fn: &LossFunction,
722 ) -> Result<f64> {
723 match loss_fn {
724 LossFunction::MeanSquaredError => {
725 let diff = prediction - target;
726 Ok(diff.mapv(|x| x * x).mean().unwrap_or(0.0))
727 }
728 LossFunction::MeanAbsoluteError => {
729 let diff = prediction - target;
730 Ok(diff.mapv(|x| x.abs()).mean().unwrap_or(0.0))
731 }
732 LossFunction::CrossEntropy => {
733 let mut loss = 0.0;
735 for (i, (&pred, &targ)) in prediction.iter().zip(target.iter()).enumerate() {
736 if targ > 0.0 {
737 loss -= targ * pred.ln();
738 }
739 }
740 Ok(loss)
741 }
742 _ => Ok(0.0), }
744 }
745
746 fn compute_gradients_autodiff(
748 &mut self,
749 qnn: &QuantumNeuralNetwork,
750 batch: &[TrainingExample],
751 predictions: &[Array1<f64>],
752 ) -> Result<HashMap<String, Vec<f64>>> {
753 self.compute_gradients_parameter_shift(qnn, batch)
755 }
756
757 fn compute_gradients_parameter_shift(
759 &mut self,
760 qnn: &QuantumNeuralNetwork,
761 batch: &[TrainingExample],
762 ) -> Result<HashMap<String, Vec<f64>>> {
763 let mut gradients = HashMap::new();
764 let shift = std::f64::consts::PI / 2.0;
765
766 let mut all_params = Vec::new();
768 let mut param_names = Vec::new();
769
770 for layer in &qnn.layers {
771 for (i, ¶m) in layer.parameters.iter().enumerate() {
772 all_params.push(param);
773 param_names.push(format!("{}_{}", layer.name, i));
774 }
775 }
776
777 for (param_idx, param_name) in param_names.iter().enumerate() {
778 let mut param_grad = 0.0;
779
780 for example in batch {
781 let mut qnn_plus = qnn.clone();
783 self.shift_parameter(&mut qnn_plus, param_idx, shift)?;
784 let pred_plus = self.evaluate_qnn(&qnn_plus, &example.input)?;
785 let loss_plus = self.compute_loss(
786 &pred_plus,
787 &example.target,
788 &qnn.training_config.loss_function,
789 )?;
790
791 let mut qnn_minus = qnn.clone();
793 self.shift_parameter(&mut qnn_minus, param_idx, -shift)?;
794 let pred_minus = self.evaluate_qnn(&qnn_minus, &example.input)?;
795 let loss_minus = self.compute_loss(
796 &pred_minus,
797 &example.target,
798 &qnn.training_config.loss_function,
799 )?;
800
801 param_grad += (loss_plus - loss_minus) / 2.0;
803 }
804
805 param_grad /= batch.len() as f64;
806 gradients.insert(param_name.clone(), vec![param_grad]);
807 }
808
809 Ok(gradients)
810 }
811
812 fn shift_parameter(
814 &self,
815 qnn: &mut QuantumNeuralNetwork,
816 param_idx: usize,
817 shift: f64,
818 ) -> Result<()> {
819 let mut current_idx = 0;
820
821 for layer in &mut qnn.layers {
822 if current_idx + layer.parameters.len() > param_idx {
823 let local_idx = param_idx - current_idx;
824 layer.parameters[local_idx] += shift;
825 return Ok(());
826 }
827 current_idx += layer.parameters.len();
828 }
829
830 Err(SimulatorError::InvalidInput(format!(
831 "Parameter index {param_idx} out of bounds"
832 )))
833 }
834
835 fn create_optimizer(&self, config: &TrainingConfig) -> Result<Box<dyn QMLOptimizer>> {
837 match config.optimizer {
838 OptimizerType::Adam => Ok(Box::new(AdamOptimizer::new(config.learning_rate))),
839 OptimizerType::SGD => Ok(Box::new(SGDOptimizer::new(config.learning_rate))),
840 _ => Ok(Box::new(AdamOptimizer::new(config.learning_rate))), }
842 }
843
844 fn update_lr_scheduler(
846 &self,
847 scheduler: &mut LRScheduler,
848 current_loss: f64,
849 optimizer: &mut Box<dyn QMLOptimizer>,
850 ) -> Result<()> {
851 match scheduler {
852 LRScheduler::StepLR {
853 step_size: _,
854 gamma,
855 } => {
856 optimizer.update_learning_rate(*gamma);
857 }
858 LRScheduler::ExponentialLR { gamma } => {
859 optimizer.update_learning_rate(*gamma);
860 }
861 LRScheduler::ReduceLROnPlateau {
862 patience: _,
863 factor,
864 } => {
865 optimizer.update_learning_rate(*factor);
867 }
868 _ => {}
869 }
870 Ok(())
871 }
872
873 fn compute_parameter_norm(&self, qnn: &QuantumNeuralNetwork) -> Result<f64> {
875 let mut norm_squared = 0.0;
876
877 for layer in &qnn.layers {
878 for ¶m in &layer.parameters {
879 norm_squared += param * param;
880 }
881 }
882
883 Ok(norm_squared.sqrt())
884 }
885
886 fn compute_last_gradient_norm(&self) -> Result<f64> {
888 let cache = self.gradient_cache.lock().unwrap();
889 let mut norm_squared = 0.0;
890
891 for (_, grads) in cache.iter() {
892 for &grad in grads {
893 norm_squared += grad * grad;
894 }
895 }
896
897 Ok(norm_squared.sqrt())
898 }
899
900 pub const fn get_stats(&self) -> &QMLTrainingStats {
902 &self.stats
903 }
904
905 pub fn reset_stats(&mut self) {
907 self.stats = QMLTrainingStats::default();
908 }
909}
910
911#[derive(Debug, Clone)]
913pub struct TrainingExample {
914 pub input: Array1<f64>,
916 pub target: Array1<f64>,
918}
919
920#[derive(Debug, Clone)]
922pub struct TrainingResult {
923 pub trained_qnn: QuantumNeuralNetwork,
925 pub final_loss: f64,
927 pub final_validation_loss: f64,
929 pub epochs_completed: usize,
931 pub total_time_ms: f64,
933 pub converged: bool,
935}
936
937pub trait QMLOptimizer {
939 fn update_parameters(
941 &mut self,
942 qnn: &mut QuantumNeuralNetwork,
943 gradients: &HashMap<String, Vec<f64>>,
944 ) -> Result<()>;
945
946 fn update_learning_rate(&mut self, factor: f64);
948
949 fn get_learning_rate(&self) -> f64;
951}
952
953pub struct AdamOptimizer {
955 learning_rate: f64,
956 beta1: f64,
957 beta2: f64,
958 epsilon: f64,
959 step: usize,
960 m: HashMap<String, Vec<f64>>, v: HashMap<String, Vec<f64>>, }
963
964impl AdamOptimizer {
965 pub fn new(learning_rate: f64) -> Self {
966 Self {
967 learning_rate,
968 beta1: 0.9,
969 beta2: 0.999,
970 epsilon: 1e-8,
971 step: 0,
972 m: HashMap::new(),
973 v: HashMap::new(),
974 }
975 }
976}
977
978impl QMLOptimizer for AdamOptimizer {
979 fn update_parameters(
980 &mut self,
981 qnn: &mut QuantumNeuralNetwork,
982 gradients: &HashMap<String, Vec<f64>>,
983 ) -> Result<()> {
984 self.step += 1;
985
986 for (param_name, grads) in gradients {
987 if !self.m.contains_key(param_name) {
989 self.m.insert(param_name.clone(), vec![0.0; grads.len()]);
990 self.v.insert(param_name.clone(), vec![0.0; grads.len()]);
991 }
992
993 let mut updates = Vec::new();
994
995 {
996 let m = self.m.get_mut(param_name).unwrap();
997 let v = self.v.get_mut(param_name).unwrap();
998
999 for (i, &grad) in grads.iter().enumerate() {
1000 m[i] = self.beta1.mul_add(m[i], (1.0 - self.beta1) * grad);
1002
1003 v[i] = self.beta2.mul_add(v[i], (1.0 - self.beta2) * grad * grad);
1005
1006 let m_hat = m[i] / (1.0 - self.beta1.powi(self.step as i32));
1008
1009 let v_hat = v[i] / (1.0 - self.beta2.powi(self.step as i32));
1011
1012 let update = self.learning_rate * m_hat / (v_hat.sqrt() + self.epsilon);
1014 updates.push((i, -update));
1015 }
1016 }
1017
1018 for (i, update) in updates {
1020 self.update_qnn_parameter(qnn, param_name, i, update)?;
1021 }
1022 }
1023
1024 Ok(())
1025 }
1026
1027 fn update_learning_rate(&mut self, factor: f64) {
1028 self.learning_rate *= factor;
1029 }
1030
1031 fn get_learning_rate(&self) -> f64 {
1032 self.learning_rate
1033 }
1034}
1035
1036impl AdamOptimizer {
1037 fn update_qnn_parameter(
1038 &self,
1039 qnn: &mut QuantumNeuralNetwork,
1040 param_name: &str,
1041 param_idx: usize,
1042 update: f64,
1043 ) -> Result<()> {
1044 let parts: Vec<&str> = param_name.split('_').collect();
1046 if parts.len() >= 2 {
1047 let layer_name = parts[0];
1048
1049 for layer in &mut qnn.layers {
1050 if layer.name == layer_name && param_idx < layer.parameters.len() {
1051 layer.parameters[param_idx] += update;
1052 return Ok(());
1053 }
1054 }
1055 }
1056
1057 Err(SimulatorError::InvalidInput(format!(
1058 "Parameter {param_name} not found"
1059 )))
1060 }
1061}
1062
1063pub struct SGDOptimizer {
1065 learning_rate: f64,
1066 momentum: f64,
1067 velocity: HashMap<String, Vec<f64>>,
1068}
1069
1070impl SGDOptimizer {
1071 pub fn new(learning_rate: f64) -> Self {
1072 Self {
1073 learning_rate,
1074 momentum: 0.9,
1075 velocity: HashMap::new(),
1076 }
1077 }
1078}
1079
1080impl QMLOptimizer for SGDOptimizer {
1081 fn update_parameters(
1082 &mut self,
1083 qnn: &mut QuantumNeuralNetwork,
1084 gradients: &HashMap<String, Vec<f64>>,
1085 ) -> Result<()> {
1086 for (param_name, grads) in gradients {
1087 if !self.velocity.contains_key(param_name) {
1089 self.velocity
1090 .insert(param_name.clone(), vec![0.0; grads.len()]);
1091 }
1092
1093 let mut updates = Vec::new();
1094
1095 {
1096 let velocity = self.velocity.get_mut(param_name).unwrap();
1097
1098 for (i, &grad) in grads.iter().enumerate() {
1099 velocity[i] = self
1101 .momentum
1102 .mul_add(velocity[i], -(self.learning_rate * grad));
1103 updates.push((i, velocity[i]));
1104 }
1105 }
1106
1107 for (i, update) in updates {
1109 self.update_qnn_parameter(qnn, param_name, i, update)?;
1110 }
1111 }
1112
1113 Ok(())
1114 }
1115
1116 fn update_learning_rate(&mut self, factor: f64) {
1117 self.learning_rate *= factor;
1118 }
1119
1120 fn get_learning_rate(&self) -> f64 {
1121 self.learning_rate
1122 }
1123}
1124
1125impl SGDOptimizer {
1126 fn update_qnn_parameter(
1127 &self,
1128 qnn: &mut QuantumNeuralNetwork,
1129 param_name: &str,
1130 param_idx: usize,
1131 update: f64,
1132 ) -> Result<()> {
1133 let parts: Vec<&str> = param_name.split('_').collect();
1135 if parts.len() >= 2 {
1136 let layer_name = parts[0];
1137
1138 for layer in &mut qnn.layers {
1139 if layer.name == layer_name && param_idx < layer.parameters.len() {
1140 layer.parameters[param_idx] += update;
1141 return Ok(());
1142 }
1143 }
1144 }
1145
1146 Err(SimulatorError::InvalidInput(format!(
1147 "Parameter {param_name} not found"
1148 )))
1149 }
1150}
1151
1152pub struct QMLUtils;
1154
1155impl QMLUtils {
1156 pub fn create_vqc(num_qubits: usize, num_layers: usize) -> QuantumNeuralNetwork {
1158 let mut layers = Vec::new();
1159
1160 layers.push(QMLLayer {
1162 layer_type: QMLLayerType::DataEncoding,
1163 name: "encoding".to_string(),
1164 num_qubits,
1165 parameters: Vec::new(),
1166 parameter_names: Vec::new(),
1167 circuit_template: None,
1168 classical_function: None,
1169 config: LayerConfig::default(),
1170 });
1171
1172 for layer_idx in 0..num_layers {
1174 let num_params = num_qubits * 3; let parameters = (0..num_params)
1176 .map(|_| fastrand::f64() * 2.0 * std::f64::consts::PI)
1177 .collect();
1178 let parameter_names = (0..num_params).map(|i| format!("param_{i}")).collect();
1179
1180 layers.push(QMLLayer {
1181 layer_type: QMLLayerType::VariationalCircuit,
1182 name: format!("var_layer_{layer_idx}"),
1183 num_qubits,
1184 parameters,
1185 parameter_names,
1186 circuit_template: Some(Self::create_variational_circuit_template(num_qubits)),
1187 classical_function: None,
1188 config: LayerConfig {
1189 repetitions: 1,
1190 entangling_pattern: (0..num_qubits - 1).map(|i| (i, i + 1)).collect(),
1191 ..Default::default()
1192 },
1193 });
1194 }
1195
1196 layers.push(QMLLayer {
1198 layer_type: QMLLayerType::Measurement,
1199 name: "measurement".to_string(),
1200 num_qubits,
1201 parameters: Vec::new(),
1202 parameter_names: Vec::new(),
1203 circuit_template: None,
1204 classical_function: None,
1205 config: LayerConfig::default(),
1206 });
1207
1208 QuantumNeuralNetwork {
1209 layers,
1210 global_parameters: HashMap::new(),
1211 metadata: QNNMetadata {
1212 name: Some("VQC".to_string()),
1213 total_parameters: num_layers * num_qubits * 3,
1214 trainable_parameters: num_layers * num_qubits * 3,
1215 ..Default::default()
1216 },
1217 training_config: TrainingConfig::default(),
1218 }
1219 }
1220
1221 fn create_variational_circuit_template(num_qubits: usize) -> InterfaceCircuit {
1223 let mut circuit = InterfaceCircuit::new(num_qubits, 0);
1224
1225 for qubit in 0..num_qubits {
1227 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RX(0.0), vec![qubit]));
1228 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RY(0.0), vec![qubit]));
1229 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RZ(0.0), vec![qubit]));
1230 }
1231
1232 for qubit in 0..num_qubits - 1 {
1234 circuit.add_gate(InterfaceGate::new(
1235 InterfaceGateType::CNOT,
1236 vec![qubit, qubit + 1],
1237 ));
1238 }
1239
1240 circuit
1241 }
1242
1243 pub fn create_xor_training_data() -> Vec<TrainingExample> {
1245 vec![
1246 TrainingExample {
1247 input: Array1::from(vec![0.0, 0.0]),
1248 target: Array1::from(vec![0.0]),
1249 },
1250 TrainingExample {
1251 input: Array1::from(vec![0.0, 1.0]),
1252 target: Array1::from(vec![1.0]),
1253 },
1254 TrainingExample {
1255 input: Array1::from(vec![1.0, 0.0]),
1256 target: Array1::from(vec![1.0]),
1257 },
1258 TrainingExample {
1259 input: Array1::from(vec![1.0, 1.0]),
1260 target: Array1::from(vec![0.0]),
1261 },
1262 ]
1263 }
1264
1265 pub fn benchmark_qml_integration() -> Result<QMLBenchmarkResults> {
1267 let mut results = QMLBenchmarkResults::default();
1268
1269 let configs = vec![
1270 QMLIntegrationConfig {
1271 framework: QMLFramework::SciRS2,
1272 enable_autodiff: false,
1273 batch_size: 4,
1274 ..Default::default()
1275 },
1276 QMLIntegrationConfig {
1277 framework: QMLFramework::SciRS2,
1278 enable_autodiff: true,
1279 batch_size: 4,
1280 ..Default::default()
1281 },
1282 ];
1283
1284 for (i, config) in configs.into_iter().enumerate() {
1285 let mut integration = QMLIntegration::new(config)?;
1286 let mut qnn = Self::create_vqc(2, 2);
1287 qnn.training_config.epochs = 10;
1288
1289 let training_data = Self::create_xor_training_data();
1290
1291 let start = std::time::Instant::now();
1292 let _result = integration.train_qnn(qnn, &training_data, None)?;
1293 let time = start.elapsed().as_secs_f64() * 1000.0;
1294
1295 results.training_times.push((format!("config_{i}"), time));
1296 }
1297
1298 Ok(results)
1299 }
1300}
1301
1302#[derive(Debug, Clone, Default)]
1304pub struct QMLBenchmarkResults {
1305 pub training_times: Vec<(String, f64)>,
1307}
1308
1309#[cfg(test)]
1310mod tests {
1311 use super::*;
1312 use approx::assert_abs_diff_eq;
1313
1314 #[test]
1315 fn test_qml_integration_creation() {
1316 let config = QMLIntegrationConfig::default();
1317 let integration = QMLIntegration::new(config);
1318 assert!(integration.is_ok());
1319 }
1320
1321 #[test]
1322 fn test_quantum_neural_network_creation() {
1323 let qnn = QMLUtils::create_vqc(2, 2);
1324 assert_eq!(qnn.layers.len(), 4); assert_eq!(qnn.metadata.total_parameters, 12); }
1327
1328 #[test]
1329 fn test_training_data_creation() {
1330 let data = QMLUtils::create_xor_training_data();
1331 assert_eq!(data.len(), 4);
1332 assert_eq!(data[0].input, Array1::from(vec![0.0, 0.0]));
1333 assert_eq!(data[0].target, Array1::from(vec![0.0]));
1334 }
1335
1336 #[test]
1337 fn test_adam_optimizer() {
1338 let mut optimizer = AdamOptimizer::new(0.01);
1339 assert_eq!(optimizer.get_learning_rate(), 0.01);
1340
1341 optimizer.update_learning_rate(0.5);
1342 assert_abs_diff_eq!(optimizer.get_learning_rate(), 0.005, epsilon = 1e-10);
1343 }
1344
1345 #[test]
1346 fn test_sgd_optimizer() {
1347 let mut optimizer = SGDOptimizer::new(0.1);
1348 assert_eq!(optimizer.get_learning_rate(), 0.1);
1349
1350 optimizer.update_learning_rate(0.9);
1351 assert_abs_diff_eq!(optimizer.get_learning_rate(), 0.09, epsilon = 1e-10);
1352 }
1353
1354 #[test]
1355 fn test_qml_layer_types() {
1356 let layer_types = [
1357 QMLLayerType::VariationalCircuit,
1358 QMLLayerType::DataEncoding,
1359 QMLLayerType::Measurement,
1360 QMLLayerType::Classical,
1361 ];
1362 assert_eq!(layer_types.len(), 4);
1363 }
1364
1365 #[test]
1366 fn test_training_config_default() {
1367 let config = TrainingConfig::default();
1368 assert_eq!(config.learning_rate, 0.01);
1369 assert_eq!(config.optimizer, OptimizerType::Adam);
1370 assert_eq!(config.loss_function, LossFunction::MeanSquaredError);
1371 }
1372
1373 #[test]
1374 fn test_measurement_probability_computation() {
1375 let config = QMLIntegrationConfig::default();
1376 let integration = QMLIntegration::new(config).unwrap();
1377
1378 let mut state = Array1::zeros(4);
1380 state[1] = Complex64::new(1.0, 0.0); let prob0 = integration
1383 .compute_measurement_probability(0, &state)
1384 .unwrap();
1385 let prob1 = integration
1386 .compute_measurement_probability(1, &state)
1387 .unwrap();
1388
1389 assert_abs_diff_eq!(prob0, 1.0, epsilon = 1e-10); assert_abs_diff_eq!(prob1, 0.0, epsilon = 1e-10); }
1392
1393 #[test]
1394 fn test_loss_computation() {
1395 let config = QMLIntegrationConfig::default();
1396 let integration = QMLIntegration::new(config).unwrap();
1397
1398 let prediction = Array1::from(vec![0.8, 0.2]);
1399 let target = Array1::from(vec![1.0, 0.0]);
1400
1401 let mse = integration
1402 .compute_loss(&prediction, &target, &LossFunction::MeanSquaredError)
1403 .unwrap();
1404 let mae = integration
1405 .compute_loss(&prediction, &target, &LossFunction::MeanAbsoluteError)
1406 .unwrap();
1407
1408 assert_abs_diff_eq!(mse, 0.04, epsilon = 1e-10); assert_abs_diff_eq!(mae, 0.2, epsilon = 1e-10); }
1411
1412 #[test]
1413 fn test_circuit_template_creation() {
1414 let circuit = QMLUtils::create_variational_circuit_template(3);
1415 assert_eq!(circuit.num_qubits, 3);
1416 assert_eq!(circuit.gates.len(), 11); }
1418}