use crate::{
adaptive_precision::AdaptivePrecisionSimulator, error::QuantRS2Result,
quantum_autodiff::QuantumAutoDiff,
};
use scirs2_core::ndarray::{Array1, Array2};
use std::{
collections::HashMap,
sync::{Arc, RwLock},
time::{Duration, Instant},
};
#[derive(Debug, Clone)]
pub struct HybridLearningConfig {
pub quantum_depth: usize,
pub num_qubits: usize,
pub classical_layers: Vec<usize>,
pub quantum_learning_rate: f64,
pub classical_learning_rate: f64,
pub batch_size: usize,
pub max_epochs: usize,
pub early_stopping_patience: usize,
pub interaction_type: InteractionType,
pub enable_quantum_advantage_analysis: bool,
pub use_adaptive_precision: bool,
}
impl Default for HybridLearningConfig {
fn default() -> Self {
Self {
quantum_depth: 3,
num_qubits: 4,
classical_layers: vec![64, 32, 16],
quantum_learning_rate: 0.01,
classical_learning_rate: 0.001,
batch_size: 32,
max_epochs: 100,
early_stopping_patience: 10,
interaction_type: InteractionType::Sequential,
enable_quantum_advantage_analysis: true,
use_adaptive_precision: true,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InteractionType {
Sequential,
Interleaved,
Parallel,
Residual,
Attention,
}
#[derive(Debug)]
pub struct HybridNeuralNetwork {
config: HybridLearningConfig,
classical_layers: Vec<DenseLayer>,
quantum_circuit: ParameterizedQuantumCircuit,
fusion_layer: FusionLayer,
autodiff: Arc<RwLock<QuantumAutoDiff>>,
adaptive_precision: Option<Arc<RwLock<AdaptivePrecisionSimulator>>>,
training_history: TrainingHistory,
}
#[derive(Debug, Clone)]
pub struct DenseLayer {
weights: Array2<f64>,
biases: Array1<f64>,
activation: ActivationFunction,
}
#[derive(Debug, Clone, Copy)]
pub enum ActivationFunction {
ReLU,
Sigmoid,
Tanh,
Linear,
Swish,
GELU,
}
#[derive(Debug)]
pub struct ParameterizedQuantumCircuit {
num_qubits: usize,
depth: usize,
parameters: Vec<f64>,
gate_sequence: Vec<QuantumGateInfo>,
parameter_map: HashMap<usize, Vec<usize>>, }
#[derive(Debug, Clone)]
pub struct QuantumGateInfo {
gate_type: String,
qubits: Vec<usize>,
is_parameterized: bool,
parameter_index: Option<usize>,
}
#[derive(Debug)]
pub struct FusionLayer {
fusion_type: FusionType,
fusion_weights: Array2<f64>,
quantum_weight: f64,
classical_weight: f64,
}
#[derive(Debug, Clone, Copy)]
pub enum FusionType {
Concatenation,
ElementwiseProduct,
WeightedSum,
Attention,
BilinearPooling,
}
#[derive(Debug)]
pub struct TrainingHistory {
losses: Vec<f64>,
quantum_losses: Vec<f64>,
classical_losses: Vec<f64>,
accuracies: Vec<f64>,
quantum_advantage_scores: Vec<f64>,
training_times: Vec<Duration>,
epoch_details: Vec<EpochDetails>,
}
#[derive(Debug, Clone)]
pub struct EpochDetails {
epoch: usize,
train_loss: f64,
val_loss: Option<f64>,
train_accuracy: f64,
val_accuracy: Option<f64>,
quantum_contribution: f64,
classical_contribution: f64,
learning_rates: (f64, f64), }
#[derive(Debug)]
pub struct TrainingData {
inputs: Array2<f64>,
targets: Array2<f64>,
validation_inputs: Option<Array2<f64>>,
validation_targets: Option<Array2<f64>>,
}
#[derive(Debug, Clone)]
pub struct QuantumAdvantageAnalysis {
quantum_only_performance: f64,
classical_only_performance: f64,
hybrid_performance: f64,
quantum_advantage_ratio: f64,
statistical_significance: f64,
computational_speedup: f64,
}
impl HybridNeuralNetwork {
pub fn new(config: HybridLearningConfig) -> QuantRS2Result<Self> {
let classical_layers = Vec::new();
let quantum_circuit =
ParameterizedQuantumCircuit::new(config.num_qubits, config.quantum_depth)?;
let fusion_layer = FusionLayer::new(
FusionType::WeightedSum,
4, config.num_qubits,
)?;
let autodiff = Arc::new(RwLock::new(
crate::quantum_autodiff::QuantumAutoDiffFactory::create_for_vqe(),
));
let adaptive_precision = if config.use_adaptive_precision {
Some(Arc::new(RwLock::new(
crate::adaptive_precision::AdaptivePrecisionFactory::create_balanced(),
)))
} else {
None
};
Ok(Self {
config,
classical_layers,
quantum_circuit,
fusion_layer,
autodiff,
adaptive_precision,
training_history: TrainingHistory::new(),
})
}
pub fn forward(&mut self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
if self.classical_layers.is_empty() {
self.initialize_layers(input.len())?;
}
match self.config.interaction_type {
InteractionType::Sequential => self.forward_sequential(input),
InteractionType::Interleaved => self.forward_interleaved(input),
InteractionType::Parallel => self.forward_parallel(input),
InteractionType::Residual => self.forward_residual(input),
InteractionType::Attention => self.forward_attention(input),
}
}
fn initialize_layers(&mut self, input_size: usize) -> QuantRS2Result<()> {
let mut current_size = input_size;
for &layer_size in &self.config.classical_layers {
let layer = DenseLayer::new(current_size, layer_size, ActivationFunction::ReLU)?;
self.classical_layers.push(layer);
current_size = layer_size;
}
self.fusion_layer = FusionLayer::new(
FusionType::WeightedSum,
current_size,
self.config.num_qubits,
)?;
Ok(())
}
pub fn train(&mut self, training_data: &TrainingData) -> QuantRS2Result<()> {
let start_time = Instant::now();
let mut best_val_loss = f64::INFINITY;
let mut patience_counter = 0;
for epoch in 0..self.config.max_epochs {
let epoch_start = Instant::now();
let (train_loss, train_accuracy) = self.train_epoch(training_data)?;
let (val_loss, val_accuracy) = if let (Some(val_inputs), Some(val_targets)) = (
&training_data.validation_inputs,
&training_data.validation_targets,
) {
let (loss, acc) = self.evaluate(val_inputs, val_targets)?;
(Some(loss), Some(acc))
} else {
(None, None)
};
let quantum_contribution = self.compute_quantum_contribution()?;
let classical_contribution = 1.0 - quantum_contribution;
let epoch_details = EpochDetails {
epoch,
train_loss,
val_loss,
train_accuracy,
val_accuracy,
quantum_contribution,
classical_contribution,
learning_rates: (
self.config.quantum_learning_rate,
self.config.classical_learning_rate,
),
};
self.training_history.losses.push(train_loss);
self.training_history.accuracies.push(train_accuracy);
self.training_history
.training_times
.push(epoch_start.elapsed());
self.training_history.epoch_details.push(epoch_details);
if let Some(current_val_loss) = val_loss {
if current_val_loss < best_val_loss {
best_val_loss = current_val_loss;
patience_counter = 0;
} else {
patience_counter += 1;
if patience_counter >= self.config.early_stopping_patience {
println!("Early stopping at epoch {epoch}");
break;
}
}
}
if epoch % 10 == 0 {
println!(
"Epoch {}: Train Loss = {:.4}, Train Acc = {:.4}, Quantum Contrib = {:.2}%",
epoch,
train_loss,
train_accuracy,
quantum_contribution * 100.0
);
}
}
if self.config.enable_quantum_advantage_analysis {
let advantage_analysis = self.analyze_quantum_advantage(training_data)?;
println!(
"Quantum Advantage Analysis: {:.2}x speedup, {:.2}% performance improvement",
advantage_analysis.computational_speedup,
(advantage_analysis.quantum_advantage_ratio - 1.0) * 100.0
);
}
println!("Training completed in {:?}", start_time.elapsed());
Ok(())
}
pub fn evaluate(
&mut self,
inputs: &Array2<f64>,
targets: &Array2<f64>,
) -> QuantRS2Result<(f64, f64)> {
let mut total_loss = 0.0;
let mut correct_predictions = 0;
let num_samples = inputs.nrows();
for i in 0..num_samples {
let input = inputs.row(i).to_owned();
let target = targets.row(i).to_owned();
let mut prediction = self.forward(&input)?;
if prediction.len() != target.len() {
let min_len = prediction.len().min(target.len());
prediction = prediction
.slice(scirs2_core::ndarray::s![..min_len])
.to_owned();
}
let adjusted_target = if target.len() > prediction.len() {
target
.slice(scirs2_core::ndarray::s![..prediction.len()])
.to_owned()
} else {
target
};
let loss = self.compute_loss(&prediction, &adjusted_target)?;
total_loss += loss;
let pred_class = prediction
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0);
let true_class = adjusted_target
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0);
if pred_class == true_class {
correct_predictions += 1;
}
}
let avg_loss = total_loss / num_samples as f64;
let accuracy = correct_predictions as f64 / num_samples as f64;
Ok((avg_loss, accuracy))
}
fn forward_sequential(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let mut classical_output = input.clone();
for layer in &self.classical_layers {
classical_output = layer.forward(&classical_output)?;
}
let quantum_input = self.prepare_quantum_input(&classical_output)?;
let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
let fused_output = self.fusion_layer.fuse(&classical_output, &quantum_output)?;
Ok(fused_output)
}
fn forward_interleaved(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let mut current = input.clone();
let layers_per_stage = self.classical_layers.len().max(1);
for i in 0..layers_per_stage {
if i < self.classical_layers.len() {
current = self.classical_layers[i].forward(¤t)?;
}
let quantum_input = self.prepare_quantum_input(¤t)?;
let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
current = self.fusion_layer.fuse(¤t, &quantum_output)?;
}
Ok(current)
}
fn forward_parallel(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let mut classical_output = input.clone();
for layer in &self.classical_layers {
classical_output = layer.forward(&classical_output)?;
}
let quantum_input = self.prepare_quantum_input(input)?;
let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
let fused_output = self.fusion_layer.fuse(&classical_output, &quantum_output)?;
Ok(fused_output)
}
fn forward_residual(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let mut classical_output = input.clone();
for layer in &self.classical_layers {
classical_output = layer.forward(&classical_output)?;
}
let quantum_input = self.prepare_quantum_input(&classical_output)?;
let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
let mut residual_output = classical_output;
let min_len = residual_output.len().min(quantum_output.len());
for i in 0..min_len {
residual_output[i] += quantum_output[i];
}
Ok(residual_output)
}
fn forward_attention(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let mut query = input.clone();
for layer in &self.classical_layers {
query = layer.forward(&query)?;
}
let quantum_input = self.prepare_quantum_input(&query)?;
let quantum_output = self.quantum_circuit.forward(&quantum_input)?;
let attention_output = self.compute_attention(&query, &quantum_output, &quantum_output)?;
Ok(attention_output)
}
fn prepare_quantum_input(&self, classical_output: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let mut quantum_input = Array1::zeros(self.config.num_qubits);
let norm = classical_output.iter().map(|x| x * x).sum::<f64>().sqrt();
let normalized = if norm > 1e-10 {
classical_output / norm
} else {
classical_output.clone()
};
let input_size = normalized.len().min(quantum_input.len());
for i in 0..input_size {
quantum_input[i] = normalized[i];
}
Ok(quantum_input)
}
fn compute_attention(
&self,
query: &Array1<f64>,
key: &Array1<f64>,
value: &Array1<f64>,
) -> QuantRS2Result<Array1<f64>> {
let attention_score = query.dot(key) / (query.len() as f64).sqrt();
let attention_weight = 1.0 / (1.0 + (-attention_score).exp());
let mut attention_output = Array1::zeros(value.len());
for i in 0..value.len() {
attention_output[i] = attention_weight * value[i];
}
Ok(attention_output)
}
fn train_epoch(&mut self, training_data: &TrainingData) -> QuantRS2Result<(f64, f64)> {
let mut total_loss = 0.0;
let mut correct_predictions = 0;
let num_samples = training_data.inputs.nrows();
let num_batches = (num_samples + self.config.batch_size - 1) / self.config.batch_size;
for batch_idx in 0..num_batches {
let start_idx = batch_idx * self.config.batch_size;
let end_idx = ((batch_idx + 1) * self.config.batch_size).min(num_samples);
let mut batch_loss = 0.0;
let mut batch_correct = 0;
for i in start_idx..end_idx {
let input = training_data.inputs.row(i).to_owned();
let target = training_data.targets.row(i).to_owned();
let prediction = self.forward(&input)?;
let loss = self.compute_loss(&prediction, &target)?;
batch_loss += loss;
let pred_class = prediction
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0);
let true_class = target
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0);
if pred_class == true_class {
batch_correct += 1;
}
self.backward(&prediction, &target)?;
}
total_loss += batch_loss;
correct_predictions += batch_correct;
}
let avg_loss = total_loss / num_samples as f64;
let accuracy = correct_predictions as f64 / num_samples as f64;
Ok((avg_loss, accuracy))
}
fn compute_loss(&self, prediction: &Array1<f64>, target: &Array1<f64>) -> QuantRS2Result<f64> {
let diff = prediction - target;
Ok(diff.iter().map(|x| x * x).sum::<f64>() / prediction.len() as f64)
}
fn backward(&mut self, prediction: &Array1<f64>, target: &Array1<f64>) -> QuantRS2Result<()> {
let loss_gradient = 2.0 * (prediction - target) / prediction.len() as f64;
self.update_quantum_parameters(&loss_gradient)?;
self.update_classical_parameters(&loss_gradient)?;
Ok(())
}
fn update_quantum_parameters(&mut self, _gradient: &Array1<f64>) -> QuantRS2Result<()> {
use scirs2_core::random::prelude::*;
let mut rng = thread_rng();
for param in &mut self.quantum_circuit.parameters {
*param += self.config.quantum_learning_rate * (rng.random::<f64>() - 0.5) * 0.1;
}
Ok(())
}
fn update_classical_parameters(&mut self, _gradient: &Array1<f64>) -> QuantRS2Result<()> {
use scirs2_core::random::prelude::*;
let mut rng = thread_rng();
for layer in &mut self.classical_layers {
for weight in &mut layer.weights {
*weight += self.config.classical_learning_rate * (rng.random::<f64>() - 0.5) * 0.1;
}
for bias in &mut layer.biases {
*bias += self.config.classical_learning_rate * (rng.random::<f64>() - 0.5) * 0.1;
}
}
Ok(())
}
const fn compute_quantum_contribution(&self) -> QuantRS2Result<f64> {
Ok(0.3) }
fn analyze_quantum_advantage(
&self,
_training_data: &TrainingData,
) -> QuantRS2Result<QuantumAdvantageAnalysis> {
let hybrid_performance = 0.85; let classical_only_performance = 0.80; let quantum_only_performance = 0.60;
let quantum_advantage_ratio = hybrid_performance / classical_only_performance;
let computational_speedup = 1.2; let statistical_significance = 0.95;
Ok(QuantumAdvantageAnalysis {
quantum_only_performance,
classical_only_performance,
hybrid_performance,
quantum_advantage_ratio,
statistical_significance,
computational_speedup,
})
}
pub const fn get_training_history(&self) -> &TrainingHistory {
&self.training_history
}
pub fn get_quantum_advantage(&self) -> Option<f64> {
self.training_history
.quantum_advantage_scores
.last()
.copied()
}
}
impl DenseLayer {
fn new(
input_size: usize,
output_size: usize,
activation: ActivationFunction,
) -> QuantRS2Result<Self> {
use scirs2_core::random::prelude::*;
let mut rng = thread_rng();
let limit = (6.0 / (input_size + output_size) as f64).sqrt();
let weights = Array2::from_shape_fn((output_size, input_size), |_| {
(rng.random::<f64>() - 0.5) * 2.0 * limit
});
let biases = Array1::zeros(output_size);
Ok(Self {
weights,
biases,
activation,
})
}
fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let linear_output = self.weights.dot(input) + &self.biases;
let activated_output = self.apply_activation(&linear_output)?;
Ok(activated_output)
}
fn apply_activation(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let output = match self.activation {
ActivationFunction::ReLU => input.mapv(|x| x.max(0.0)),
ActivationFunction::Sigmoid => input.mapv(|x| 1.0 / (1.0 + (-x).exp())),
ActivationFunction::Tanh => input.mapv(|x| x.tanh()),
ActivationFunction::Linear => input.clone(),
ActivationFunction::Swish => input.mapv(|x| x / (1.0 + (-x).exp())),
ActivationFunction::GELU => input.mapv(|x| {
0.5 * x
* (1.0
+ ((2.0 / std::f64::consts::PI).sqrt()
* 0.044_715f64.mul_add(x.powi(3), x))
.tanh())
}),
};
Ok(output)
}
}
impl ParameterizedQuantumCircuit {
fn new(num_qubits: usize, depth: usize) -> QuantRS2Result<Self> {
let num_parameters = num_qubits * depth * 2; let parameters = vec![0.0; num_parameters];
let mut gate_sequence = Vec::new();
let mut parameter_map = HashMap::new();
let mut param_idx = 0;
for _layer in 0..depth {
for qubit in 0..num_qubits {
gate_sequence.push(QuantumGateInfo {
gate_type: "RY".to_string(),
qubits: vec![qubit],
is_parameterized: true,
parameter_index: Some(param_idx),
});
parameter_map.insert(gate_sequence.len() - 1, vec![param_idx]);
param_idx += 1;
}
for qubit in 0..num_qubits - 1 {
gate_sequence.push(QuantumGateInfo {
gate_type: "CNOT".to_string(),
qubits: vec![qubit, qubit + 1],
is_parameterized: false,
parameter_index: None,
});
}
}
Ok(Self {
num_qubits,
depth,
parameters,
gate_sequence,
parameter_map,
})
}
fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
let mut state = Array1::from_vec(vec![1.0; 1 << self.num_qubits]);
state[0] = 1.0;
for i in 0..input.len().min(self.num_qubits) {
if input[i].abs() > 1e-10 {
state[1 << i] = input[i];
}
}
let norm = state.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > 1e-10 {
state = state / norm;
}
for (gate_idx, gate) in self.gate_sequence.iter().enumerate() {
if gate.is_parameterized {
if let Some(param_indices) = self.parameter_map.get(&gate_idx) {
if let Some(¶m_idx) = param_indices.first() {
let angle = self.parameters[param_idx];
state = state.mapv(|x| x * angle.cos());
}
}
}
}
let mut output = Array1::zeros(self.num_qubits);
for i in 0..self.num_qubits {
output[i] = state
.iter()
.enumerate()
.filter(|(idx, _)| (idx >> i) & 1 == 1)
.map(|(_, val)| val * val)
.sum::<f64>();
}
Ok(output)
}
}
impl FusionLayer {
fn new(
fusion_type: FusionType,
classical_size: usize,
quantum_size: usize,
) -> QuantRS2Result<Self> {
use scirs2_core::random::prelude::*;
let mut rng = thread_rng();
let fusion_weights = match fusion_type {
FusionType::Concatenation => Array2::eye(classical_size + quantum_size),
FusionType::WeightedSum => Array2::from_shape_fn(
(
classical_size.max(quantum_size),
classical_size + quantum_size,
),
|_| rng.random::<f64>() - 0.5,
),
_ => Array2::eye(classical_size.max(quantum_size)),
};
Ok(Self {
fusion_type,
fusion_weights,
quantum_weight: 0.5,
classical_weight: 0.5,
})
}
fn fuse(&self, classical: &Array1<f64>, quantum: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
match self.fusion_type {
FusionType::Concatenation => {
let mut result = Array1::zeros(classical.len() + quantum.len());
for (i, &val) in classical.iter().enumerate() {
result[i] = val;
}
for (i, &val) in quantum.iter().enumerate() {
result[classical.len() + i] = val;
}
Ok(result)
}
FusionType::WeightedSum => {
let size = classical.len().max(quantum.len());
let mut result = Array1::zeros(size);
for i in 0..size {
let c_val = if i < classical.len() {
classical[i]
} else {
0.0
};
let q_val = if i < quantum.len() { quantum[i] } else { 0.0 };
result[i] = self
.classical_weight
.mul_add(c_val, self.quantum_weight * q_val);
}
Ok(result)
}
FusionType::ElementwiseProduct => {
let size = classical.len().min(quantum.len());
let mut result = Array1::zeros(size);
for i in 0..size {
result[i] = classical[i] * quantum[i];
}
Ok(result)
}
_ => {
self.fuse(classical, quantum)
}
}
}
}
impl TrainingHistory {
const fn new() -> Self {
Self {
losses: Vec::new(),
quantum_losses: Vec::new(),
classical_losses: Vec::new(),
accuracies: Vec::new(),
quantum_advantage_scores: Vec::new(),
training_times: Vec::new(),
epoch_details: Vec::new(),
}
}
}
pub struct HybridLearningFactory;
impl HybridLearningFactory {
pub fn create_quantum_cnn(num_qubits: usize) -> QuantRS2Result<HybridNeuralNetwork> {
let config = HybridLearningConfig {
num_qubits,
quantum_depth: 2,
classical_layers: vec![128, 64, 32],
interaction_type: InteractionType::Sequential,
quantum_learning_rate: 0.005,
classical_learning_rate: 0.001,
..Default::default()
};
HybridNeuralNetwork::new(config)
}
pub fn create_vqc(
num_qubits: usize,
num_classes: usize,
) -> QuantRS2Result<HybridNeuralNetwork> {
let config = HybridLearningConfig {
num_qubits,
quantum_depth: 4,
classical_layers: vec![num_qubits * 2, num_classes],
interaction_type: InteractionType::Residual,
quantum_learning_rate: 0.01,
classical_learning_rate: 0.001,
..Default::default()
};
HybridNeuralNetwork::new(config)
}
pub fn create_quantum_attention(num_qubits: usize) -> QuantRS2Result<HybridNeuralNetwork> {
let config = HybridLearningConfig {
num_qubits,
quantum_depth: 3,
classical_layers: vec![256, 128, 64],
interaction_type: InteractionType::Attention,
quantum_learning_rate: 0.02,
classical_learning_rate: 0.0005,
..Default::default()
};
HybridNeuralNetwork::new(config)
}
pub fn create_parallel_hybrid(
num_qubits: usize,
classical_depth: usize,
) -> QuantRS2Result<HybridNeuralNetwork> {
let classical_layers = (0..classical_depth)
.map(|i| 64 - i * 8)
.filter(|&x| x > 0)
.collect();
let config = HybridLearningConfig {
num_qubits,
quantum_depth: 2,
classical_layers,
interaction_type: InteractionType::Parallel,
quantum_learning_rate: 0.008,
classical_learning_rate: 0.002,
..Default::default()
};
HybridNeuralNetwork::new(config)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hybrid_neural_network_creation() {
let config = HybridLearningConfig::default();
let network = HybridNeuralNetwork::new(config);
assert!(network.is_ok());
}
#[test]
fn test_dense_layer() {
let layer =
DenseLayer::new(4, 2, ActivationFunction::ReLU).expect("dense layer creation failed");
let input = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
let output = layer.forward(&input);
assert!(output.is_ok());
let result = output.expect("forward pass failed");
assert_eq!(result.len(), 2);
}
#[test]
fn test_quantum_circuit() {
let circuit =
ParameterizedQuantumCircuit::new(3, 2).expect("quantum circuit creation failed");
let input = Array1::from_vec(vec![0.5, 0.3, 0.2]);
let output = circuit.forward(&input);
assert!(output.is_ok());
let result = output.expect("forward pass failed");
assert_eq!(result.len(), 3);
}
#[test]
fn test_fusion_layer() {
let fusion =
FusionLayer::new(FusionType::WeightedSum, 3, 2).expect("fusion layer creation failed");
let classical = Array1::from_vec(vec![1.0, 2.0, 3.0]);
let quantum = Array1::from_vec(vec![0.5, 1.5]);
let result = fusion.fuse(&classical, &quantum);
assert!(result.is_ok());
}
#[test]
fn test_forward_pass() {
let mut network = HybridNeuralNetwork::new(HybridLearningConfig::default())
.expect("network creation failed");
let input = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
let output = network.forward(&input);
assert!(output.is_ok());
}
#[test]
fn test_training_data_evaluation() {
let mut config = HybridLearningConfig::default();
config.classical_layers = vec![8, 4, 2]; let mut network = HybridNeuralNetwork::new(config).expect("network creation failed");
let inputs = Array2::from_shape_vec((10, 4), (0..40).map(|x| x as f64).collect())
.expect("inputs array creation failed");
let targets = Array2::from_shape_vec((10, 2), (0..20).map(|x| x as f64 % 2.0).collect())
.expect("targets array creation failed");
let result = network.evaluate(&inputs, &targets);
assert!(result.is_ok());
let (loss, accuracy) = result.expect("evaluation failed");
assert!(loss >= 0.0);
assert!(accuracy >= 0.0 && accuracy <= 1.0);
}
#[test]
fn test_activation_functions() {
let layer_relu =
DenseLayer::new(2, 2, ActivationFunction::ReLU).expect("relu layer creation failed");
let layer_sigmoid = DenseLayer::new(2, 2, ActivationFunction::Sigmoid)
.expect("sigmoid layer creation failed");
let layer_tanh =
DenseLayer::new(2, 2, ActivationFunction::Tanh).expect("tanh layer creation failed");
let input = Array1::from_vec(vec![-1.0, 1.0]);
let _output_relu = layer_relu.forward(&input).expect("relu forward failed");
let output_sigmoid = layer_sigmoid
.forward(&input)
.expect("sigmoid forward failed");
let output_tanh = layer_tanh.forward(&input).expect("tanh forward failed");
assert!(output_sigmoid.iter().all(|&x| x >= 0.0 && x <= 1.0));
assert!(output_tanh.iter().all(|&x| x >= -1.0 && x <= 1.0));
}
#[test]
fn test_factory_methods() {
let quantum_cnn = HybridLearningFactory::create_quantum_cnn(4);
let vqc = HybridLearningFactory::create_vqc(3, 2);
let quantum_attention = HybridLearningFactory::create_quantum_attention(5);
let parallel_hybrid = HybridLearningFactory::create_parallel_hybrid(4, 3);
assert!(quantum_cnn.is_ok());
assert!(vqc.is_ok());
assert!(quantum_attention.is_ok());
assert!(parallel_hybrid.is_ok());
}
#[test]
fn test_different_interaction_types() {
let input = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
let interaction_types = vec![
InteractionType::Sequential,
InteractionType::Interleaved,
InteractionType::Parallel,
InteractionType::Residual,
InteractionType::Attention,
];
for interaction_type in interaction_types {
let mut config = HybridLearningConfig::default();
config.interaction_type = interaction_type;
config.classical_layers = vec![8, 4]; let mut network = HybridNeuralNetwork::new(config)
.expect("network creation failed for interaction type");
let result = network.forward(&input);
assert!(
result.is_ok(),
"Failed for interaction type: {:?}",
interaction_type
);
}
}
#[test]
fn test_fusion_types() {
let classical = Array1::from_vec(vec![1.0, 2.0, 3.0]);
let quantum = Array1::from_vec(vec![0.5, 1.5, 2.5]);
let fusion_types = vec![
FusionType::Concatenation,
FusionType::WeightedSum,
FusionType::ElementwiseProduct,
];
for fusion_type in fusion_types {
let fusion = FusionLayer::new(fusion_type, 3, 3)
.expect("fusion layer creation failed for fusion type");
let result = fusion.fuse(&classical, &quantum);
assert!(result.is_ok(), "Failed for fusion type: {:?}", fusion_type);
}
}
#[test]
fn test_training_history() {
let history = TrainingHistory::new();
assert_eq!(history.losses.len(), 0);
assert_eq!(history.accuracies.len(), 0);
assert_eq!(history.epoch_details.len(), 0);
}
}