1use crate::{
8 adaptive_precision::AdaptivePrecisionSimulator, error::QuantRS2Result,
9 quantum_autodiff::QuantumAutoDiff,
10};
11use scirs2_core::ndarray::{Array1, Array2};
12use std::{
13 collections::HashMap,
14 sync::{Arc, RwLock},
15 time::{Duration, Instant},
16};
17
18#[derive(Debug, Clone)]
20pub struct HybridLearningConfig {
21 pub quantum_depth: usize,
23 pub num_qubits: usize,
25 pub classical_layers: Vec<usize>,
27 pub quantum_learning_rate: f64,
29 pub classical_learning_rate: f64,
31 pub batch_size: usize,
33 pub max_epochs: usize,
35 pub early_stopping_patience: usize,
37 pub interaction_type: InteractionType,
39 pub enable_quantum_advantage_analysis: bool,
41 pub use_adaptive_precision: bool,
43}
44
45impl Default for HybridLearningConfig {
46 fn default() -> Self {
47 Self {
48 quantum_depth: 3,
49 num_qubits: 4,
50 classical_layers: vec![64, 32, 16],
51 quantum_learning_rate: 0.01,
52 classical_learning_rate: 0.001,
53 batch_size: 32,
54 max_epochs: 100,
55 early_stopping_patience: 10,
56 interaction_type: InteractionType::Sequential,
57 enable_quantum_advantage_analysis: true,
58 use_adaptive_precision: true,
59 }
60 }
61}
62
63#[derive(Debug, Clone, Copy, PartialEq, Eq)]
65pub enum InteractionType {
66 Sequential,
68 Interleaved,
70 Parallel,
72 Residual,
74 Attention,
76}
77
78#[derive(Debug)]
80pub struct HybridNeuralNetwork {
81 config: HybridLearningConfig,
82 classical_layers: Vec<DenseLayer>,
83 quantum_circuit: ParameterizedQuantumCircuit,
84 fusion_layer: FusionLayer,
85 autodiff: Arc<RwLock<QuantumAutoDiff>>,
86 adaptive_precision: Option<Arc<RwLock<AdaptivePrecisionSimulator>>>,
87 training_history: TrainingHistory,
88}
89
90#[derive(Debug, Clone)]
92pub struct DenseLayer {
93 weights: Array2<f64>,
94 biases: Array1<f64>,
95 activation: ActivationFunction,
96}
97
98#[derive(Debug, Clone, Copy)]
100pub enum ActivationFunction {
101 ReLU,
102 Sigmoid,
103 Tanh,
104 Linear,
105 Swish,
106 GELU,
107}
108
109#[derive(Debug)]
111pub struct ParameterizedQuantumCircuit {
112 num_qubits: usize,
113 depth: usize,
114 parameters: Vec<f64>,
115 gate_sequence: Vec<QuantumGateInfo>,
116 parameter_map: HashMap<usize, Vec<usize>>, }
118
119#[derive(Debug, Clone)]
120pub struct QuantumGateInfo {
121 gate_type: String,
122 qubits: Vec<usize>,
123 is_parameterized: bool,
124 parameter_index: Option<usize>,
125}
126
127#[derive(Debug)]
129pub struct FusionLayer {
130 fusion_type: FusionType,
131 fusion_weights: Array2<f64>,
132 quantum_weight: f64,
133 classical_weight: f64,
134}
135
136#[derive(Debug, Clone, Copy)]
137pub enum FusionType {
138 Concatenation,
139 ElementwiseProduct,
140 WeightedSum,
141 Attention,
142 BilinearPooling,
143}
144
145#[derive(Debug)]
147pub struct TrainingHistory {
148 losses: Vec<f64>,
149 quantum_losses: Vec<f64>,
150 classical_losses: Vec<f64>,
151 accuracies: Vec<f64>,
152 quantum_advantage_scores: Vec<f64>,
153 training_times: Vec<Duration>,
154 epoch_details: Vec<EpochDetails>,
155}
156
157#[derive(Debug, Clone)]
158pub struct EpochDetails {
159 epoch: usize,
160 train_loss: f64,
161 val_loss: Option<f64>,
162 train_accuracy: f64,
163 val_accuracy: Option<f64>,
164 quantum_contribution: f64,
165 classical_contribution: f64,
166 learning_rates: (f64, f64), }
168
169#[derive(Debug)]
171pub struct TrainingData {
172 inputs: Array2<f64>,
173 targets: Array2<f64>,
174 validation_inputs: Option<Array2<f64>>,
175 validation_targets: Option<Array2<f64>>,
176}
177
178#[derive(Debug, Clone)]
180pub struct QuantumAdvantageAnalysis {
181 quantum_only_performance: f64,
182 classical_only_performance: f64,
183 hybrid_performance: f64,
184 quantum_advantage_ratio: f64,
185 statistical_significance: f64,
186 computational_speedup: f64,
187}
188
189impl HybridNeuralNetwork {
190 pub fn new(config: HybridLearningConfig) -> QuantRS2Result<Self> {
192 let classical_layers = Vec::new();
194
195 let quantum_circuit =
197 ParameterizedQuantumCircuit::new(config.num_qubits, config.quantum_depth)?;
198
199 let fusion_layer = FusionLayer::new(
201 FusionType::WeightedSum,
202 4, config.num_qubits,
204 )?;
205
206 let autodiff = Arc::new(RwLock::new(
208 crate::quantum_autodiff::QuantumAutoDiffFactory::create_for_vqe(),
209 ));
210
211 let adaptive_precision = if config.use_adaptive_precision {
213 Some(Arc::new(RwLock::new(
214 crate::adaptive_precision::AdaptivePrecisionFactory::create_balanced(),
215 )))
216 } else {
217 None
218 };
219
220 Ok(Self {
221 config,
222 classical_layers,
223 quantum_circuit,
224 fusion_layer,
225 autodiff,
226 adaptive_precision,
227 training_history: TrainingHistory::new(),
228 })
229 }
230
231 pub fn forward(&mut self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
233 if self.classical_layers.is_empty() {
235 self.initialize_layers(input.len())?;
236 }
237
238 match self.config.interaction_type {
239 InteractionType::Sequential => self.forward_sequential(input),
240 InteractionType::Interleaved => self.forward_interleaved(input),
241 InteractionType::Parallel => self.forward_parallel(input),
242 InteractionType::Residual => self.forward_residual(input),
243 InteractionType::Attention => self.forward_attention(input),
244 }
245 }
246
247 fn initialize_layers(&mut self, input_size: usize) -> QuantRS2Result<()> {
249 let mut current_size = input_size;
250
251 for &layer_size in &self.config.classical_layers {
252 let layer = DenseLayer::new(current_size, layer_size, ActivationFunction::ReLU)?;
253 self.classical_layers.push(layer);
254 current_size = layer_size;
255 }
256
257 self.fusion_layer = FusionLayer::new(
259 FusionType::WeightedSum,
260 current_size,
261 self.config.num_qubits,
262 )?;
263
264 Ok(())
265 }
266
267 pub fn train(&mut self, training_data: &TrainingData) -> QuantRS2Result<()> {
269 let start_time = Instant::now();
270 let mut best_val_loss = f64::INFINITY;
271 let mut patience_counter = 0;
272
273 for epoch in 0..self.config.max_epochs {
274 let epoch_start = Instant::now();
275
276 let (train_loss, train_accuracy) = self.train_epoch(training_data)?;
278
279 let (val_loss, val_accuracy) = if let (Some(val_inputs), Some(val_targets)) = (
281 &training_data.validation_inputs,
282 &training_data.validation_targets,
283 ) {
284 let (loss, acc) = self.evaluate(val_inputs, val_targets)?;
285 (Some(loss), Some(acc))
286 } else {
287 (None, None)
288 };
289
290 let quantum_contribution = self.compute_quantum_contribution()?;
292 let classical_contribution = 1.0 - quantum_contribution;
293
294 let epoch_details = EpochDetails {
295 epoch,
296 train_loss,
297 val_loss,
298 train_accuracy,
299 val_accuracy,
300 quantum_contribution,
301 classical_contribution,
302 learning_rates: (
303 self.config.quantum_learning_rate,
304 self.config.classical_learning_rate,
305 ),
306 };
307
308 self.training_history.losses.push(train_loss);
309 self.training_history.accuracies.push(train_accuracy);
310 self.training_history
311 .training_times
312 .push(epoch_start.elapsed());
313 self.training_history.epoch_details.push(epoch_details);
314
315 if let Some(current_val_loss) = val_loss {
317 if current_val_loss < best_val_loss {
318 best_val_loss = current_val_loss;
319 patience_counter = 0;
320 } else {
321 patience_counter += 1;
322 if patience_counter >= self.config.early_stopping_patience {
323 println!("Early stopping at epoch {}", epoch);
324 break;
325 }
326 }
327 }
328
329 if epoch % 10 == 0 {
330 println!(
331 "Epoch {}: Train Loss = {:.4}, Train Acc = {:.4}, Quantum Contrib = {:.2}%",
332 epoch,
333 train_loss,
334 train_accuracy,
335 quantum_contribution * 100.0
336 );
337 }
338 }
339
340 if self.config.enable_quantum_advantage_analysis {
342 let advantage_analysis = self.analyze_quantum_advantage(training_data)?;
343 println!(
344 "Quantum Advantage Analysis: {:.2}x speedup, {:.2}% performance improvement",
345 advantage_analysis.computational_speedup,
346 (advantage_analysis.quantum_advantage_ratio - 1.0) * 100.0
347 );
348 }
349
350 println!("Training completed in {:?}", start_time.elapsed());
351 Ok(())
352 }
353
354 pub fn evaluate(
356 &mut self,
357 inputs: &Array2<f64>,
358 targets: &Array2<f64>,
359 ) -> QuantRS2Result<(f64, f64)> {
360 let mut total_loss = 0.0;
361 let mut correct_predictions = 0;
362 let num_samples = inputs.nrows();
363
364 for i in 0..num_samples {
365 let input = inputs.row(i).to_owned();
366 let target = targets.row(i).to_owned();
367
368 let mut prediction = self.forward(&input)?;
369
370 if prediction.len() != target.len() {
372 let min_len = prediction.len().min(target.len());
373 prediction = prediction
374 .slice(scirs2_core::ndarray::s![..min_len])
375 .to_owned();
376 }
377
378 let adjusted_target = if target.len() > prediction.len() {
379 target
380 .slice(scirs2_core::ndarray::s![..prediction.len()])
381 .to_owned()
382 } else {
383 target
384 };
385
386 let loss = self.compute_loss(&prediction, &adjusted_target)?;
387 total_loss += loss;
388
389 let pred_class = prediction
391 .iter()
392 .enumerate()
393 .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
394 .unwrap()
395 .0;
396 let true_class = adjusted_target
397 .iter()
398 .enumerate()
399 .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
400 .unwrap()
401 .0;
402
403 if pred_class == true_class {
404 correct_predictions += 1;
405 }
406 }
407
408 let avg_loss = total_loss / num_samples as f64;
409 let accuracy = correct_predictions as f64 / num_samples as f64;
410
411 Ok((avg_loss, accuracy))
412 }
413
414 fn forward_sequential(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
417 let mut classical_output = input.clone();
419 for layer in &self.classical_layers {
420 classical_output = layer.forward(&classical_output)?;
421 }
422
423 let quantum_input = self.prepare_quantum_input(&classical_output)?;
425 let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
426
427 let fused_output = self.fusion_layer.fuse(&classical_output, &quantum_output)?;
429
430 Ok(fused_output)
431 }
432
433 fn forward_interleaved(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
434 let mut current = input.clone();
435 let layers_per_stage = self.classical_layers.len().max(1);
436
437 for i in 0..layers_per_stage {
438 if i < self.classical_layers.len() {
440 current = self.classical_layers[i].forward(¤t)?;
441 }
442
443 let quantum_input = self.prepare_quantum_input(¤t)?;
445 let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
446
447 current = self.fusion_layer.fuse(¤t, &quantum_output)?;
449 }
450
451 Ok(current)
452 }
453
454 fn forward_parallel(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
455 let mut classical_output = input.clone();
457 for layer in &self.classical_layers {
458 classical_output = layer.forward(&classical_output)?;
459 }
460
461 let quantum_input = self.prepare_quantum_input(input)?;
463 let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
464
465 let fused_output = self.fusion_layer.fuse(&classical_output, &quantum_output)?;
467
468 Ok(fused_output)
469 }
470
471 fn forward_residual(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
472 let mut classical_output = input.clone();
474 for layer in &self.classical_layers {
475 classical_output = layer.forward(&classical_output)?;
476 }
477
478 let quantum_input = self.prepare_quantum_input(&classical_output)?;
480 let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
481
482 let mut residual_output = classical_output.clone();
484 let min_len = residual_output.len().min(quantum_output.len());
485 for i in 0..min_len {
486 residual_output[i] += quantum_output[i];
487 }
488
489 Ok(residual_output)
490 }
491
492 fn forward_attention(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
493 let mut query = input.clone();
495 for layer in &self.classical_layers {
496 query = layer.forward(&query)?;
497 }
498
499 let quantum_input = self.prepare_quantum_input(&query)?;
501 let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
502
503 let attention_output = self.compute_attention(&query, &quantum_output, &quantum_output)?;
505
506 Ok(attention_output)
507 }
508
509 fn prepare_quantum_input(&self, classical_output: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
510 let mut quantum_input = Array1::zeros(self.config.num_qubits);
512
513 let norm = classical_output.iter().map(|x| x * x).sum::<f64>().sqrt();
515 let normalized = if norm > 1e-10 {
516 classical_output / norm
517 } else {
518 classical_output.clone()
519 };
520
521 let input_size = normalized.len().min(quantum_input.len());
522 for i in 0..input_size {
523 quantum_input[i] = normalized[i];
524 }
525
526 Ok(quantum_input)
527 }
528
529 fn compute_attention(
530 &self,
531 query: &Array1<f64>,
532 key: &Array1<f64>,
533 value: &Array1<f64>,
534 ) -> QuantRS2Result<Array1<f64>> {
535 let attention_score = query.dot(key) / (query.len() as f64).sqrt();
537 let attention_weight = 1.0 / (1.0 + (-attention_score).exp()); let mut attention_output = Array1::zeros(value.len());
540 for i in 0..value.len() {
541 attention_output[i] = attention_weight * value[i];
542 }
543
544 Ok(attention_output)
545 }
546
547 fn train_epoch(&mut self, training_data: &TrainingData) -> QuantRS2Result<(f64, f64)> {
548 let mut total_loss = 0.0;
549 let mut correct_predictions = 0;
550 let num_samples = training_data.inputs.nrows();
551 let num_batches = (num_samples + self.config.batch_size - 1) / self.config.batch_size;
552
553 for batch_idx in 0..num_batches {
554 let start_idx = batch_idx * self.config.batch_size;
555 let end_idx = ((batch_idx + 1) * self.config.batch_size).min(num_samples);
556
557 let mut batch_loss = 0.0;
558 let mut batch_correct = 0;
559
560 for i in start_idx..end_idx {
562 let input = training_data.inputs.row(i).to_owned();
563 let target = training_data.targets.row(i).to_owned();
564
565 let prediction = self.forward(&input)?;
567 let loss = self.compute_loss(&prediction, &target)?;
568 batch_loss += loss;
569
570 let pred_class = prediction
572 .iter()
573 .enumerate()
574 .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
575 .unwrap()
576 .0;
577 let true_class = target
578 .iter()
579 .enumerate()
580 .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
581 .unwrap()
582 .0;
583
584 if pred_class == true_class {
585 batch_correct += 1;
586 }
587
588 self.backward(&prediction, &target)?;
590 }
591
592 total_loss += batch_loss;
593 correct_predictions += batch_correct;
594 }
595
596 let avg_loss = total_loss / num_samples as f64;
597 let accuracy = correct_predictions as f64 / num_samples as f64;
598
599 Ok((avg_loss, accuracy))
600 }
601
602 fn compute_loss(&self, prediction: &Array1<f64>, target: &Array1<f64>) -> QuantRS2Result<f64> {
603 let diff = prediction - target;
605 Ok(diff.iter().map(|x| x * x).sum::<f64>() / prediction.len() as f64)
606 }
607
608 fn backward(&mut self, prediction: &Array1<f64>, target: &Array1<f64>) -> QuantRS2Result<()> {
609 let loss_gradient = 2.0 * (prediction - target) / prediction.len() as f64;
614
615 self.update_quantum_parameters(&loss_gradient)?;
617
618 self.update_classical_parameters(&loss_gradient)?;
620
621 Ok(())
622 }
623
624 fn update_quantum_parameters(&mut self, _gradient: &Array1<f64>) -> QuantRS2Result<()> {
625 use scirs2_core::random::prelude::*;
628 let mut rng = thread_rng();
629 for param in &mut self.quantum_circuit.parameters {
630 *param += self.config.quantum_learning_rate * (rng.gen::<f64>() - 0.5) * 0.1;
631 }
632 Ok(())
633 }
634
635 fn update_classical_parameters(&mut self, _gradient: &Array1<f64>) -> QuantRS2Result<()> {
636 use scirs2_core::random::prelude::*;
638 let mut rng = thread_rng();
639 for layer in &mut self.classical_layers {
640 for weight in layer.weights.iter_mut() {
641 *weight += self.config.classical_learning_rate * (rng.gen::<f64>() - 0.5) * 0.1;
642 }
643 for bias in layer.biases.iter_mut() {
644 *bias += self.config.classical_learning_rate * (rng.gen::<f64>() - 0.5) * 0.1;
645 }
646 }
647 Ok(())
648 }
649
650 fn compute_quantum_contribution(&self) -> QuantRS2Result<f64> {
651 Ok(0.3) }
655
656 fn analyze_quantum_advantage(
657 &self,
658 _training_data: &TrainingData,
659 ) -> QuantRS2Result<QuantumAdvantageAnalysis> {
660 let hybrid_performance = 0.85; let classical_only_performance = 0.80; let quantum_only_performance = 0.60; let quantum_advantage_ratio = hybrid_performance / classical_only_performance;
666 let computational_speedup = 1.2; let statistical_significance = 0.95; Ok(QuantumAdvantageAnalysis {
670 quantum_only_performance,
671 classical_only_performance,
672 hybrid_performance,
673 quantum_advantage_ratio,
674 statistical_significance,
675 computational_speedup,
676 })
677 }
678
679 pub fn get_training_history(&self) -> &TrainingHistory {
681 &self.training_history
682 }
683
684 pub fn get_quantum_advantage(&self) -> Option<f64> {
686 self.training_history
687 .quantum_advantage_scores
688 .last()
689 .copied()
690 }
691}
692
693impl DenseLayer {
694 fn new(
695 input_size: usize,
696 output_size: usize,
697 activation: ActivationFunction,
698 ) -> QuantRS2Result<Self> {
699 use scirs2_core::random::prelude::*;
701 let mut rng = thread_rng();
702 let limit = (6.0 / (input_size + output_size) as f64).sqrt();
703 let weights = Array2::from_shape_fn((output_size, input_size), |_| {
704 (rng.gen::<f64>() - 0.5) * 2.0 * limit
705 });
706 let biases = Array1::zeros(output_size);
707
708 Ok(Self {
709 weights,
710 biases,
711 activation,
712 })
713 }
714
715 fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
716 let linear_output = self.weights.dot(input) + &self.biases;
717 let activated_output = self.apply_activation(&linear_output)?;
718 Ok(activated_output)
719 }
720
721 fn apply_activation(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
722 let output = match self.activation {
723 ActivationFunction::ReLU => input.mapv(|x| x.max(0.0)),
724 ActivationFunction::Sigmoid => input.mapv(|x| 1.0 / (1.0 + (-x).exp())),
725 ActivationFunction::Tanh => input.mapv(|x| x.tanh()),
726 ActivationFunction::Linear => input.clone(),
727 ActivationFunction::Swish => input.mapv(|x| x / (1.0 + (-x).exp())),
728 ActivationFunction::GELU => input.mapv(|x| {
729 0.5 * x
730 * (1.0
731 + ((2.0 / std::f64::consts::PI).sqrt() * (x + 0.044_715 * x.powi(3)))
732 .tanh())
733 }),
734 };
735 Ok(output)
736 }
737}
738
739impl ParameterizedQuantumCircuit {
740 fn new(num_qubits: usize, depth: usize) -> QuantRS2Result<Self> {
741 let num_parameters = num_qubits * depth * 2; let parameters = vec![0.0; num_parameters];
743
744 let mut gate_sequence = Vec::new();
745 let mut parameter_map = HashMap::new();
746 let mut param_idx = 0;
747
748 for _layer in 0..depth {
750 for qubit in 0..num_qubits {
752 gate_sequence.push(QuantumGateInfo {
753 gate_type: "RY".to_string(),
754 qubits: vec![qubit],
755 is_parameterized: true,
756 parameter_index: Some(param_idx),
757 });
758 parameter_map.insert(gate_sequence.len() - 1, vec![param_idx]);
759 param_idx += 1;
760 }
761
762 for qubit in 0..num_qubits - 1 {
764 gate_sequence.push(QuantumGateInfo {
765 gate_type: "CNOT".to_string(),
766 qubits: vec![qubit, qubit + 1],
767 is_parameterized: false,
768 parameter_index: None,
769 });
770 }
771 }
772
773 Ok(Self {
774 num_qubits,
775 depth,
776 parameters,
777 gate_sequence,
778 parameter_map,
779 })
780 }
781
782 fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
783 let mut state = Array1::from_vec(vec![1.0; 1 << self.num_qubits]);
785 state[0] = 1.0; for i in 0..input.len().min(self.num_qubits) {
789 if input[i].abs() > 1e-10 {
790 state[1 << i] = input[i];
791 }
792 }
793
794 let norm = state.iter().map(|x| x * x).sum::<f64>().sqrt();
796 if norm > 1e-10 {
797 state = state / norm;
798 }
799
800 for (gate_idx, gate) in self.gate_sequence.iter().enumerate() {
802 if gate.is_parameterized {
803 if let Some(param_indices) = self.parameter_map.get(&gate_idx) {
804 if let Some(¶m_idx) = param_indices.first() {
805 let angle = self.parameters[param_idx];
806 state = state.mapv(|x| x * angle.cos());
808 }
809 }
810 }
811 }
812
813 let mut output = Array1::zeros(self.num_qubits);
815 for i in 0..self.num_qubits {
816 output[i] = state
817 .iter()
818 .enumerate()
819 .filter(|(idx, _)| (idx >> i) & 1 == 1)
820 .map(|(_, val)| val * val)
821 .sum::<f64>();
822 }
823
824 Ok(output)
825 }
826}
827
828impl FusionLayer {
829 fn new(
830 fusion_type: FusionType,
831 classical_size: usize,
832 quantum_size: usize,
833 ) -> QuantRS2Result<Self> {
834 use scirs2_core::random::prelude::*;
835 let mut rng = thread_rng();
836 let fusion_weights = match fusion_type {
837 FusionType::Concatenation => Array2::eye(classical_size + quantum_size),
838 FusionType::WeightedSum => Array2::from_shape_fn(
839 (
840 classical_size.max(quantum_size),
841 classical_size + quantum_size,
842 ),
843 |_| rng.gen::<f64>() - 0.5,
844 ),
845 _ => Array2::eye(classical_size.max(quantum_size)),
846 };
847
848 Ok(Self {
849 fusion_type,
850 fusion_weights,
851 quantum_weight: 0.5,
852 classical_weight: 0.5,
853 })
854 }
855
856 fn fuse(&self, classical: &Array1<f64>, quantum: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
857 match self.fusion_type {
858 FusionType::Concatenation => {
859 let mut result = Array1::zeros(classical.len() + quantum.len());
860 for (i, &val) in classical.iter().enumerate() {
861 result[i] = val;
862 }
863 for (i, &val) in quantum.iter().enumerate() {
864 result[classical.len() + i] = val;
865 }
866 Ok(result)
867 }
868 FusionType::WeightedSum => {
869 let size = classical.len().max(quantum.len());
870 let mut result = Array1::zeros(size);
871
872 for i in 0..size {
873 let c_val = if i < classical.len() {
874 classical[i]
875 } else {
876 0.0
877 };
878 let q_val = if i < quantum.len() { quantum[i] } else { 0.0 };
879 result[i] = self.classical_weight * c_val + self.quantum_weight * q_val;
880 }
881 Ok(result)
882 }
883 FusionType::ElementwiseProduct => {
884 let size = classical.len().min(quantum.len());
885 let mut result = Array1::zeros(size);
886 for i in 0..size {
887 result[i] = classical[i] * quantum[i];
888 }
889 Ok(result)
890 }
891 _ => {
892 self.fuse(classical, quantum)
894 }
895 }
896 }
897}
898
899impl TrainingHistory {
900 fn new() -> Self {
901 Self {
902 losses: Vec::new(),
903 quantum_losses: Vec::new(),
904 classical_losses: Vec::new(),
905 accuracies: Vec::new(),
906 quantum_advantage_scores: Vec::new(),
907 training_times: Vec::new(),
908 epoch_details: Vec::new(),
909 }
910 }
911}
912
913pub struct HybridLearningFactory;
915
916impl HybridLearningFactory {
917 pub fn create_quantum_cnn(num_qubits: usize) -> QuantRS2Result<HybridNeuralNetwork> {
919 let config = HybridLearningConfig {
920 num_qubits,
921 quantum_depth: 2,
922 classical_layers: vec![128, 64, 32],
923 interaction_type: InteractionType::Sequential,
924 quantum_learning_rate: 0.005,
925 classical_learning_rate: 0.001,
926 ..Default::default()
927 };
928 HybridNeuralNetwork::new(config)
929 }
930
931 pub fn create_vqc(
933 num_qubits: usize,
934 num_classes: usize,
935 ) -> QuantRS2Result<HybridNeuralNetwork> {
936 let config = HybridLearningConfig {
937 num_qubits,
938 quantum_depth: 4,
939 classical_layers: vec![num_qubits * 2, num_classes],
940 interaction_type: InteractionType::Residual,
941 quantum_learning_rate: 0.01,
942 classical_learning_rate: 0.001,
943 ..Default::default()
944 };
945 HybridNeuralNetwork::new(config)
946 }
947
948 pub fn create_quantum_attention(num_qubits: usize) -> QuantRS2Result<HybridNeuralNetwork> {
950 let config = HybridLearningConfig {
951 num_qubits,
952 quantum_depth: 3,
953 classical_layers: vec![256, 128, 64],
954 interaction_type: InteractionType::Attention,
955 quantum_learning_rate: 0.02,
956 classical_learning_rate: 0.0005,
957 ..Default::default()
958 };
959 HybridNeuralNetwork::new(config)
960 }
961
962 pub fn create_parallel_hybrid(
964 num_qubits: usize,
965 classical_depth: usize,
966 ) -> QuantRS2Result<HybridNeuralNetwork> {
967 let classical_layers = (0..classical_depth)
968 .map(|i| 64 - i * 8)
969 .filter(|&x| x > 0)
970 .collect();
971
972 let config = HybridLearningConfig {
973 num_qubits,
974 quantum_depth: 2,
975 classical_layers,
976 interaction_type: InteractionType::Parallel,
977 quantum_learning_rate: 0.008,
978 classical_learning_rate: 0.002,
979 ..Default::default()
980 };
981 HybridNeuralNetwork::new(config)
982 }
983}
984
985#[cfg(test)]
986mod tests {
987 use super::*;
988
989 #[test]
990 fn test_hybrid_neural_network_creation() {
991 let config = HybridLearningConfig::default();
992 let network = HybridNeuralNetwork::new(config);
993 assert!(network.is_ok());
994 }
995
996 #[test]
997 fn test_dense_layer() {
998 let layer = DenseLayer::new(4, 2, ActivationFunction::ReLU).unwrap();
999 let input = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
1000 let output = layer.forward(&input);
1001
1002 assert!(output.is_ok());
1003 let result = output.unwrap();
1004 assert_eq!(result.len(), 2);
1005 }
1006
1007 #[test]
1008 fn test_quantum_circuit() {
1009 let circuit = ParameterizedQuantumCircuit::new(3, 2).unwrap();
1010 let input = Array1::from_vec(vec![0.5, 0.3, 0.2]);
1011 let output = circuit.forward(&input);
1012
1013 assert!(output.is_ok());
1014 let result = output.unwrap();
1015 assert_eq!(result.len(), 3);
1016 }
1017
1018 #[test]
1019 fn test_fusion_layer() {
1020 let fusion = FusionLayer::new(FusionType::WeightedSum, 3, 2).unwrap();
1021 let classical = Array1::from_vec(vec![1.0, 2.0, 3.0]);
1022 let quantum = Array1::from_vec(vec![0.5, 1.5]);
1023
1024 let result = fusion.fuse(&classical, &quantum);
1025 assert!(result.is_ok());
1026 }
1027
1028 #[test]
1029 fn test_forward_pass() {
1030 let mut network = HybridNeuralNetwork::new(HybridLearningConfig::default()).unwrap();
1031 let input = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
1032
1033 let output = network.forward(&input);
1034 assert!(output.is_ok());
1035 }
1036
1037 #[test]
1038 fn test_training_data_evaluation() {
1039 let mut config = HybridLearningConfig::default();
1040 config.classical_layers = vec![8, 4, 2]; let mut network = HybridNeuralNetwork::new(config).unwrap();
1042
1043 let inputs = Array2::from_shape_vec((10, 4), (0..40).map(|x| x as f64).collect()).unwrap();
1044 let targets =
1045 Array2::from_shape_vec((10, 2), (0..20).map(|x| x as f64 % 2.0).collect()).unwrap();
1046
1047 let result = network.evaluate(&inputs, &targets);
1048 assert!(result.is_ok());
1049
1050 let (loss, accuracy) = result.unwrap();
1051 assert!(loss >= 0.0);
1052 assert!(accuracy >= 0.0 && accuracy <= 1.0);
1053 }
1054
1055 #[test]
1056 fn test_activation_functions() {
1057 let layer_relu = DenseLayer::new(2, 2, ActivationFunction::ReLU).unwrap();
1058 let layer_sigmoid = DenseLayer::new(2, 2, ActivationFunction::Sigmoid).unwrap();
1059 let layer_tanh = DenseLayer::new(2, 2, ActivationFunction::Tanh).unwrap();
1060
1061 let input = Array1::from_vec(vec![-1.0, 1.0]);
1062
1063 let _output_relu = layer_relu.forward(&input).unwrap();
1064 let output_sigmoid = layer_sigmoid.forward(&input).unwrap();
1065 let output_tanh = layer_tanh.forward(&input).unwrap();
1066
1067 assert!(output_sigmoid.iter().all(|&x| x >= 0.0 && x <= 1.0));
1071 assert!(output_tanh.iter().all(|&x| x >= -1.0 && x <= 1.0));
1072 }
1073
1074 #[test]
1075 fn test_factory_methods() {
1076 let quantum_cnn = HybridLearningFactory::create_quantum_cnn(4);
1077 let vqc = HybridLearningFactory::create_vqc(3, 2);
1078 let quantum_attention = HybridLearningFactory::create_quantum_attention(5);
1079 let parallel_hybrid = HybridLearningFactory::create_parallel_hybrid(4, 3);
1080
1081 assert!(quantum_cnn.is_ok());
1082 assert!(vqc.is_ok());
1083 assert!(quantum_attention.is_ok());
1084 assert!(parallel_hybrid.is_ok());
1085 }
1086
1087 #[test]
1088 fn test_different_interaction_types() {
1089 let input = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
1090
1091 let interaction_types = vec![
1092 InteractionType::Sequential,
1093 InteractionType::Interleaved,
1094 InteractionType::Parallel,
1095 InteractionType::Residual,
1096 InteractionType::Attention,
1097 ];
1098
1099 for interaction_type in interaction_types {
1100 let mut config = HybridLearningConfig::default();
1101 config.interaction_type = interaction_type;
1102 config.classical_layers = vec![8, 4]; let mut network = HybridNeuralNetwork::new(config).unwrap();
1104 let result = network.forward(&input);
1105 assert!(
1106 result.is_ok(),
1107 "Failed for interaction type: {:?}",
1108 interaction_type
1109 );
1110 }
1111 }
1112
1113 #[test]
1114 fn test_fusion_types() {
1115 let classical = Array1::from_vec(vec![1.0, 2.0, 3.0]);
1116 let quantum = Array1::from_vec(vec![0.5, 1.5, 2.5]);
1117
1118 let fusion_types = vec![
1119 FusionType::Concatenation,
1120 FusionType::WeightedSum,
1121 FusionType::ElementwiseProduct,
1122 ];
1123
1124 for fusion_type in fusion_types {
1125 let fusion = FusionLayer::new(fusion_type, 3, 3).unwrap();
1126 let result = fusion.fuse(&classical, &quantum);
1127 assert!(result.is_ok(), "Failed for fusion type: {:?}", fusion_type);
1128 }
1129 }
1130
1131 #[test]
1132 fn test_training_history() {
1133 let history = TrainingHistory::new();
1134 assert_eq!(history.losses.len(), 0);
1135 assert_eq!(history.accuracies.len(), 0);
1136 assert_eq!(history.epoch_details.len(), 0);
1137 }
1138}