use crate::prelude::{InterfaceGate, InterfaceGateType, SimulatorError};
use scirs2_core::ndarray::Array1;
use scirs2_core::parallel_ops::{IndexedParallelIterator, ParallelIterator};
use scirs2_core::Complex64;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use crate::autodiff_vqe::AutoDiffContext;
use crate::circuit_interfaces::{CircuitInterface, InterfaceCircuit};
use crate::error::Result;
use crate::scirs2_integration::SciRS2Backend;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum QMLFramework {
PyTorch,
TensorFlow,
JAX,
SciRS2,
Custom,
}
#[derive(Debug, Clone)]
pub struct QMLIntegrationConfig {
pub framework: QMLFramework,
pub enable_autodiff: bool,
pub enable_gradient_optimization: bool,
pub batch_size: usize,
pub enable_parameter_sharing: bool,
pub hardware_aware_optimization: bool,
pub gradient_memory_limit: usize,
pub enable_distributed_training: bool,
pub enable_mixed_precision: bool,
}
impl Default for QMLIntegrationConfig {
fn default() -> Self {
Self {
framework: QMLFramework::SciRS2,
enable_autodiff: true,
enable_gradient_optimization: true,
batch_size: 32,
enable_parameter_sharing: true,
hardware_aware_optimization: true,
gradient_memory_limit: 8_000_000_000, enable_distributed_training: false,
enable_mixed_precision: false,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum QMLLayerType {
VariationalCircuit,
QuantumConvolutional,
QuantumRecurrent,
QuantumAttention,
DataEncoding,
Measurement,
Classical,
}
#[derive(Debug, Clone)]
pub struct QMLLayer {
pub layer_type: QMLLayerType,
pub name: String,
pub num_qubits: usize,
pub parameters: Vec<f64>,
pub parameter_names: Vec<String>,
pub circuit_template: Option<InterfaceCircuit>,
pub classical_function: Option<String>,
pub config: LayerConfig,
}
#[derive(Debug, Clone, Default)]
pub struct LayerConfig {
pub repetitions: usize,
pub entangling_pattern: Vec<(usize, usize)>,
pub activation: Option<String>,
pub regularization: Option<RegularizationConfig>,
pub hardware_mapping: Option<Vec<usize>>,
}
#[derive(Debug, Clone)]
pub struct RegularizationConfig {
pub l1_strength: f64,
pub l2_strength: f64,
pub dropout_prob: f64,
}
#[derive(Debug, Clone)]
pub struct QuantumNeuralNetwork {
pub layers: Vec<QMLLayer>,
pub global_parameters: HashMap<String, f64>,
pub metadata: QNNMetadata,
pub training_config: TrainingConfig,
}
#[derive(Debug, Clone, Default)]
pub struct QNNMetadata {
pub name: Option<String>,
pub description: Option<String>,
pub created_at: Option<std::time::SystemTime>,
pub total_parameters: usize,
pub trainable_parameters: usize,
pub complexity_score: f64,
}
#[derive(Debug, Clone)]
pub struct TrainingConfig {
pub learning_rate: f64,
pub optimizer: OptimizerType,
pub loss_function: LossFunction,
pub epochs: usize,
pub batch_size: usize,
pub validation_split: f64,
pub early_stopping_patience: Option<usize>,
pub lr_scheduler: Option<LRScheduler>,
}
impl Default for TrainingConfig {
fn default() -> Self {
Self {
learning_rate: 0.01,
optimizer: OptimizerType::Adam,
loss_function: LossFunction::MeanSquaredError,
epochs: 100,
batch_size: 32,
validation_split: 0.2,
early_stopping_patience: Some(10),
lr_scheduler: None,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OptimizerType {
SGD,
Adam,
AdamW,
RMSprop,
LBFGS,
NaturalGradient,
QuantumNaturalGradient,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LossFunction {
MeanSquaredError,
MeanAbsoluteError,
CrossEntropy,
BinaryCrossEntropy,
Hinge,
CustomQuantum,
}
#[derive(Debug, Clone)]
pub enum LRScheduler {
StepLR { step_size: usize, gamma: f64 },
ExponentialLR { gamma: f64 },
CosineAnnealingLR { t_max: usize },
ReduceLROnPlateau { patience: usize, factor: f64 },
}
pub struct QMLIntegration {
config: QMLIntegrationConfig,
circuit_interface: CircuitInterface,
backend: Option<SciRS2Backend>,
autodiff_context: Option<AutoDiffContext>,
parameter_cache: Arc<Mutex<HashMap<String, Vec<f64>>>>,
gradient_cache: Arc<Mutex<HashMap<String, Vec<f64>>>>,
stats: QMLTrainingStats,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct QMLTrainingStats {
pub total_training_time_ms: f64,
pub parameter_updates: usize,
pub gradient_computations: usize,
pub avg_gradient_time_ms: f64,
pub circuit_evaluations: usize,
pub avg_circuit_time_ms: f64,
pub loss_history: Vec<f64>,
pub validation_loss_history: Vec<f64>,
pub parameter_norm_history: Vec<f64>,
pub gradient_norm_history: Vec<f64>,
}
impl QMLIntegration {
pub fn new(config: QMLIntegrationConfig) -> Result<Self> {
let circuit_interface = CircuitInterface::new(Default::default())?;
Ok(Self {
config,
circuit_interface,
backend: None,
autodiff_context: None,
parameter_cache: Arc::new(Mutex::new(HashMap::new())),
gradient_cache: Arc::new(Mutex::new(HashMap::new())),
stats: QMLTrainingStats::default(),
})
}
pub fn with_backend(mut self) -> Result<Self> {
self.backend = Some(SciRS2Backend::new());
self.circuit_interface = self.circuit_interface.with_backend()?;
if self.config.enable_autodiff {
self.autodiff_context = Some(AutoDiffContext::new(
Vec::new(),
crate::autodiff_vqe::GradientMethod::ParameterShift,
));
}
Ok(self)
}
pub fn train_qnn(
&mut self,
mut qnn: QuantumNeuralNetwork,
training_data: &[TrainingExample],
validation_data: Option<&[TrainingExample]>,
) -> Result<TrainingResult> {
let start_time = std::time::Instant::now();
let mut optimizer = self.create_optimizer(&qnn.training_config)?;
let mut lr_scheduler = qnn.training_config.lr_scheduler.clone();
let mut best_loss = f64::INFINITY;
let mut patience_counter = 0;
for epoch in 0..qnn.training_config.epochs {
let epoch_start = std::time::Instant::now();
let train_loss = self.train_epoch(&mut qnn, training_data, &mut optimizer)?;
self.stats.loss_history.push(train_loss);
let val_loss = if let Some(val_data) = validation_data {
self.validate_epoch(&qnn, val_data)?
} else {
train_loss
};
self.stats.validation_loss_history.push(val_loss);
if let Some(ref mut scheduler) = lr_scheduler {
self.update_lr_scheduler(scheduler, val_loss, &mut optimizer)?;
}
if let Some(patience) = qnn.training_config.early_stopping_patience {
if val_loss < best_loss {
best_loss = val_loss;
patience_counter = 0;
} else {
patience_counter += 1;
if patience_counter >= patience {
println!("Early stopping at epoch {epoch} due to no improvement");
break;
}
}
}
let param_norm = self.compute_parameter_norm(&qnn)?;
let grad_norm = self.compute_last_gradient_norm()?;
self.stats.parameter_norm_history.push(param_norm);
self.stats.gradient_norm_history.push(grad_norm);
println!(
"Epoch {}: train_loss={:.6}, val_loss={:.6}, time={:.2}ms",
epoch,
train_loss,
val_loss,
epoch_start.elapsed().as_secs_f64() * 1000.0
);
}
let total_time = start_time.elapsed().as_secs_f64() * 1000.0;
self.stats.total_training_time_ms += total_time;
Ok(TrainingResult {
trained_qnn: qnn.clone(),
final_loss: *self.stats.loss_history.last().unwrap_or(&0.0),
final_validation_loss: *self.stats.validation_loss_history.last().unwrap_or(&0.0),
epochs_completed: self.stats.loss_history.len(),
total_time_ms: total_time,
converged: patience_counter
< qnn
.training_config
.early_stopping_patience
.unwrap_or(usize::MAX),
})
}
fn train_epoch(
&mut self,
qnn: &mut QuantumNeuralNetwork,
training_data: &[TrainingExample],
optimizer: &mut Box<dyn QMLOptimizer>,
) -> Result<f64> {
let mut total_loss = 0.0;
let batch_size = qnn.training_config.batch_size;
let num_batches = training_data.len().div_ceil(batch_size);
for batch_idx in 0..num_batches {
let start_idx = batch_idx * batch_size;
let end_idx = (start_idx + batch_size).min(training_data.len());
let batch = &training_data[start_idx..end_idx];
let (predictions, loss) = self.forward_pass(qnn, batch)?;
total_loss += loss;
let gradients = self.backward_pass(qnn, batch, &predictions)?;
optimizer.update_parameters(qnn, &gradients)?;
self.stats.parameter_updates += 1;
}
Ok(total_loss / num_batches as f64)
}
fn validate_epoch(
&mut self,
qnn: &QuantumNeuralNetwork,
validation_data: &[TrainingExample],
) -> Result<f64> {
let mut total_loss = 0.0;
let batch_size = qnn.training_config.batch_size;
let num_batches = validation_data.len().div_ceil(batch_size);
for batch_idx in 0..num_batches {
let start_idx = batch_idx * batch_size;
let end_idx = (start_idx + batch_size).min(validation_data.len());
let batch = &validation_data[start_idx..end_idx];
let (_, loss) = self.forward_pass(qnn, batch)?;
total_loss += loss;
}
Ok(total_loss / num_batches as f64)
}
fn forward_pass(
&mut self,
qnn: &QuantumNeuralNetwork,
batch: &[TrainingExample],
) -> Result<(Vec<Array1<f64>>, f64)> {
let start_time = std::time::Instant::now();
let mut predictions = Vec::new();
let mut total_loss = 0.0;
for example in batch {
let prediction = self.evaluate_qnn(qnn, &example.input)?;
let loss = self.compute_loss(
&prediction,
&example.target,
&qnn.training_config.loss_function,
)?;
predictions.push(prediction);
total_loss += loss;
}
let eval_time = start_time.elapsed().as_secs_f64() * 1000.0;
self.stats.avg_circuit_time_ms = self
.stats
.avg_circuit_time_ms
.mul_add(self.stats.circuit_evaluations as f64, eval_time)
/ (self.stats.circuit_evaluations + batch.len()) as f64;
self.stats.circuit_evaluations += batch.len();
Ok((predictions, total_loss / batch.len() as f64))
}
fn backward_pass(
&mut self,
qnn: &QuantumNeuralNetwork,
batch: &[TrainingExample],
predictions: &[Array1<f64>],
) -> Result<HashMap<String, Vec<f64>>> {
let start_time = std::time::Instant::now();
let mut gradients = if self.config.enable_autodiff {
self.compute_gradients_autodiff(qnn, batch, predictions)?
} else {
self.compute_gradients_parameter_shift(qnn, batch)?
};
let grad_time = start_time.elapsed().as_secs_f64() * 1000.0;
self.stats.avg_gradient_time_ms = self
.stats
.avg_gradient_time_ms
.mul_add(self.stats.gradient_computations as f64, grad_time)
/ (self.stats.gradient_computations + 1) as f64;
self.stats.gradient_computations += 1;
{
let mut cache = self.gradient_cache.lock().map_err(|e| {
SimulatorError::InvalidOperation(format!("Gradient cache lock poisoned: {e}"))
})?;
for (param_name, grad) in &gradients {
cache.insert(param_name.clone(), grad.clone());
}
}
Ok(gradients)
}
fn evaluate_qnn(
&mut self,
qnn: &QuantumNeuralNetwork,
input: &Array1<f64>,
) -> Result<Array1<f64>> {
let total_qubits = qnn.layers.iter().map(|l| l.num_qubits).max().unwrap_or(1);
let mut state = Array1::zeros(1 << total_qubits);
state[0] = Complex64::new(1.0, 0.0);
let mut current_output = input.clone();
for layer in &qnn.layers {
current_output = self.evaluate_layer(layer, ¤t_output, &mut state)?;
}
Ok(current_output)
}
fn evaluate_layer(
&mut self,
layer: &QMLLayer,
input: &Array1<f64>,
state: &mut Array1<Complex64>,
) -> Result<Array1<f64>> {
match layer.layer_type {
QMLLayerType::DataEncoding => {
self.apply_data_encoding(layer, input, state)?;
Ok(input.clone()) }
QMLLayerType::VariationalCircuit => {
self.apply_variational_circuit(layer, state)?;
self.measure_qubits(layer, state)
}
QMLLayerType::Measurement => self.measure_qubits(layer, state),
QMLLayerType::Classical => self.apply_classical_processing(layer, input),
_ => {
Ok(input.clone())
}
}
}
fn apply_data_encoding(
&self,
layer: &QMLLayer,
input: &Array1<f64>,
state: &mut Array1<Complex64>,
) -> Result<()> {
for (i, &value) in input.iter().enumerate() {
if i < layer.num_qubits {
let angle = value * std::f64::consts::PI;
self.apply_ry_rotation(i, angle, state)?;
}
}
Ok(())
}
fn apply_variational_circuit(
&mut self,
layer: &QMLLayer,
state: &mut Array1<Complex64>,
) -> Result<()> {
if let Some(circuit_template) = &layer.circuit_template {
let mut circuit = circuit_template.clone();
self.parameterize_circuit(&mut circuit, &layer.parameters)?;
let compiled = self.circuit_interface.compile_circuit(
&circuit,
crate::circuit_interfaces::SimulationBackend::StateVector,
)?;
let result = self
.circuit_interface
.execute_circuit(&compiled, Some(state.clone()))?;
if let Some(final_state) = result.final_state {
*state = final_state;
}
}
Ok(())
}
fn measure_qubits(&self, layer: &QMLLayer, state: &Array1<Complex64>) -> Result<Array1<f64>> {
let mut measurements = Array1::zeros(layer.num_qubits);
for qubit in 0..layer.num_qubits {
let prob = self.compute_measurement_probability(qubit, state)?;
measurements[qubit] = prob;
}
Ok(measurements)
}
fn apply_classical_processing(
&self,
layer: &QMLLayer,
input: &Array1<f64>,
) -> Result<Array1<f64>> {
Ok(input.clone())
}
fn apply_ry_rotation(
&self,
qubit: usize,
angle: f64,
state: &mut Array1<Complex64>,
) -> Result<()> {
let qubit_mask = 1 << qubit;
let cos_half = (angle / 2.0).cos();
let sin_half = (angle / 2.0).sin();
for i in 0..state.len() {
if i & qubit_mask == 0 {
let j = i | qubit_mask;
if j < state.len() {
let amp_0 = state[i];
let amp_1 = state[j];
state[i] = cos_half * amp_0 - sin_half * amp_1;
state[j] = sin_half * amp_0 + cos_half * amp_1;
}
}
}
Ok(())
}
fn parameterize_circuit(
&self,
circuit: &mut InterfaceCircuit,
parameters: &[f64],
) -> Result<()> {
let mut param_idx = 0;
for gate in &mut circuit.gates {
match &mut gate.gate_type {
InterfaceGateType::RX(ref mut angle)
| InterfaceGateType::RY(ref mut angle)
| InterfaceGateType::RZ(ref mut angle) => {
if param_idx < parameters.len() {
*angle = parameters[param_idx];
param_idx += 1;
}
}
InterfaceGateType::Phase(ref mut angle) => {
if param_idx < parameters.len() {
*angle = parameters[param_idx];
param_idx += 1;
}
}
_ => {}
}
}
Ok(())
}
fn compute_measurement_probability(
&self,
qubit: usize,
state: &Array1<Complex64>,
) -> Result<f64> {
let qubit_mask = 1 << qubit;
let mut prob_one = 0.0;
for (i, &litude) in state.iter().enumerate() {
if i & qubit_mask != 0 {
prob_one += amplitude.norm_sqr();
}
}
Ok(prob_one)
}
fn compute_loss(
&self,
prediction: &Array1<f64>,
target: &Array1<f64>,
loss_fn: &LossFunction,
) -> Result<f64> {
match loss_fn {
LossFunction::MeanSquaredError => {
let diff = prediction - target;
Ok(diff.mapv(|x| x * x).mean().unwrap_or(0.0))
}
LossFunction::MeanAbsoluteError => {
let diff = prediction - target;
Ok(diff.mapv(f64::abs).mean().unwrap_or(0.0))
}
LossFunction::CrossEntropy => {
let mut loss = 0.0;
for (i, (&pred, &targ)) in prediction.iter().zip(target.iter()).enumerate() {
if targ > 0.0 {
loss -= targ * pred.ln();
}
}
Ok(loss)
}
_ => Ok(0.0), }
}
fn compute_gradients_autodiff(
&mut self,
qnn: &QuantumNeuralNetwork,
batch: &[TrainingExample],
predictions: &[Array1<f64>],
) -> Result<HashMap<String, Vec<f64>>> {
self.compute_gradients_parameter_shift(qnn, batch)
}
fn compute_gradients_parameter_shift(
&mut self,
qnn: &QuantumNeuralNetwork,
batch: &[TrainingExample],
) -> Result<HashMap<String, Vec<f64>>> {
let mut gradients = HashMap::new();
let shift = std::f64::consts::PI / 2.0;
let mut all_params = Vec::new();
let mut param_names = Vec::new();
for layer in &qnn.layers {
for (i, ¶m) in layer.parameters.iter().enumerate() {
all_params.push(param);
param_names.push(format!("{}_{}", layer.name, i));
}
}
for (param_idx, param_name) in param_names.iter().enumerate() {
let mut param_grad = 0.0;
for example in batch {
let mut qnn_plus = qnn.clone();
self.shift_parameter(&mut qnn_plus, param_idx, shift)?;
let pred_plus = self.evaluate_qnn(&qnn_plus, &example.input)?;
let loss_plus = self.compute_loss(
&pred_plus,
&example.target,
&qnn.training_config.loss_function,
)?;
let mut qnn_minus = qnn.clone();
self.shift_parameter(&mut qnn_minus, param_idx, -shift)?;
let pred_minus = self.evaluate_qnn(&qnn_minus, &example.input)?;
let loss_minus = self.compute_loss(
&pred_minus,
&example.target,
&qnn.training_config.loss_function,
)?;
param_grad += (loss_plus - loss_minus) / 2.0;
}
param_grad /= batch.len() as f64;
gradients.insert(param_name.clone(), vec![param_grad]);
}
Ok(gradients)
}
fn shift_parameter(
&self,
qnn: &mut QuantumNeuralNetwork,
param_idx: usize,
shift: f64,
) -> Result<()> {
let mut current_idx = 0;
for layer in &mut qnn.layers {
if current_idx + layer.parameters.len() > param_idx {
let local_idx = param_idx - current_idx;
layer.parameters[local_idx] += shift;
return Ok(());
}
current_idx += layer.parameters.len();
}
Err(SimulatorError::InvalidInput(format!(
"Parameter index {param_idx} out of bounds"
)))
}
fn create_optimizer(&self, config: &TrainingConfig) -> Result<Box<dyn QMLOptimizer>> {
match config.optimizer {
OptimizerType::Adam => Ok(Box::new(AdamOptimizer::new(config.learning_rate))),
OptimizerType::SGD => Ok(Box::new(SGDOptimizer::new(config.learning_rate))),
_ => Ok(Box::new(AdamOptimizer::new(config.learning_rate))), }
}
fn update_lr_scheduler(
&self,
scheduler: &mut LRScheduler,
current_loss: f64,
optimizer: &mut Box<dyn QMLOptimizer>,
) -> Result<()> {
match scheduler {
LRScheduler::StepLR {
step_size: _,
gamma,
} => {
optimizer.update_learning_rate(*gamma);
}
LRScheduler::ExponentialLR { gamma } => {
optimizer.update_learning_rate(*gamma);
}
LRScheduler::ReduceLROnPlateau {
patience: _,
factor,
} => {
optimizer.update_learning_rate(*factor);
}
LRScheduler::CosineAnnealingLR { .. } => {}
}
Ok(())
}
fn compute_parameter_norm(&self, qnn: &QuantumNeuralNetwork) -> Result<f64> {
let mut norm_squared = 0.0;
for layer in &qnn.layers {
for ¶m in &layer.parameters {
norm_squared += param * param;
}
}
Ok(norm_squared.sqrt())
}
fn compute_last_gradient_norm(&self) -> Result<f64> {
let cache = self.gradient_cache.lock().map_err(|e| {
SimulatorError::InvalidOperation(format!("Gradient cache lock poisoned: {e}"))
})?;
let mut norm_squared = 0.0;
for (_, grads) in cache.iter() {
for &grad in grads {
norm_squared += grad * grad;
}
}
Ok(norm_squared.sqrt())
}
#[must_use]
pub const fn get_stats(&self) -> &QMLTrainingStats {
&self.stats
}
pub fn reset_stats(&mut self) {
self.stats = QMLTrainingStats::default();
}
}
#[derive(Debug, Clone)]
pub struct TrainingExample {
pub input: Array1<f64>,
pub target: Array1<f64>,
}
#[derive(Debug, Clone)]
pub struct TrainingResult {
pub trained_qnn: QuantumNeuralNetwork,
pub final_loss: f64,
pub final_validation_loss: f64,
pub epochs_completed: usize,
pub total_time_ms: f64,
pub converged: bool,
}
pub trait QMLOptimizer {
fn update_parameters(
&mut self,
qnn: &mut QuantumNeuralNetwork,
gradients: &HashMap<String, Vec<f64>>,
) -> Result<()>;
fn update_learning_rate(&mut self, factor: f64);
fn get_learning_rate(&self) -> f64;
}
pub struct AdamOptimizer {
learning_rate: f64,
beta1: f64,
beta2: f64,
epsilon: f64,
step: usize,
m: HashMap<String, Vec<f64>>, v: HashMap<String, Vec<f64>>, }
impl AdamOptimizer {
#[must_use]
pub fn new(learning_rate: f64) -> Self {
Self {
learning_rate,
beta1: 0.9,
beta2: 0.999,
epsilon: 1e-8,
step: 0,
m: HashMap::new(),
v: HashMap::new(),
}
}
}
impl QMLOptimizer for AdamOptimizer {
fn update_parameters(
&mut self,
qnn: &mut QuantumNeuralNetwork,
gradients: &HashMap<String, Vec<f64>>,
) -> Result<()> {
self.step += 1;
for (param_name, grads) in gradients {
if !self.m.contains_key(param_name) {
self.m.insert(param_name.clone(), vec![0.0; grads.len()]);
self.v.insert(param_name.clone(), vec![0.0; grads.len()]);
}
let mut updates = Vec::new();
{
let m = self.m.get_mut(param_name).ok_or_else(|| {
SimulatorError::InvalidOperation(format!(
"Parameter {param_name} not found in first moment estimates"
))
})?;
let v = self.v.get_mut(param_name).ok_or_else(|| {
SimulatorError::InvalidOperation(format!(
"Parameter {param_name} not found in second moment estimates"
))
})?;
for (i, &grad) in grads.iter().enumerate() {
m[i] = self.beta1.mul_add(m[i], (1.0 - self.beta1) * grad);
v[i] = self.beta2.mul_add(v[i], (1.0 - self.beta2) * grad * grad);
let m_hat = m[i] / (1.0 - self.beta1.powi(self.step as i32));
let v_hat = v[i] / (1.0 - self.beta2.powi(self.step as i32));
let update = self.learning_rate * m_hat / (v_hat.sqrt() + self.epsilon);
updates.push((i, -update));
}
}
for (i, update) in updates {
self.update_qnn_parameter(qnn, param_name, i, update)?;
}
}
Ok(())
}
fn update_learning_rate(&mut self, factor: f64) {
self.learning_rate *= factor;
}
fn get_learning_rate(&self) -> f64 {
self.learning_rate
}
}
impl AdamOptimizer {
fn update_qnn_parameter(
&self,
qnn: &mut QuantumNeuralNetwork,
param_name: &str,
param_idx: usize,
update: f64,
) -> Result<()> {
let parts: Vec<&str> = param_name.split('_').collect();
if parts.len() >= 2 {
let layer_name = parts[0];
for layer in &mut qnn.layers {
if layer.name == layer_name && param_idx < layer.parameters.len() {
layer.parameters[param_idx] += update;
return Ok(());
}
}
}
Err(SimulatorError::InvalidInput(format!(
"Parameter {param_name} not found"
)))
}
}
pub struct SGDOptimizer {
learning_rate: f64,
momentum: f64,
velocity: HashMap<String, Vec<f64>>,
}
impl SGDOptimizer {
#[must_use]
pub fn new(learning_rate: f64) -> Self {
Self {
learning_rate,
momentum: 0.9,
velocity: HashMap::new(),
}
}
}
impl QMLOptimizer for SGDOptimizer {
fn update_parameters(
&mut self,
qnn: &mut QuantumNeuralNetwork,
gradients: &HashMap<String, Vec<f64>>,
) -> Result<()> {
for (param_name, grads) in gradients {
if !self.velocity.contains_key(param_name) {
self.velocity
.insert(param_name.clone(), vec![0.0; grads.len()]);
}
let mut updates = Vec::new();
{
let velocity = self.velocity.get_mut(param_name).ok_or_else(|| {
SimulatorError::InvalidOperation(format!(
"Parameter {param_name} not found in velocity cache"
))
})?;
for (i, &grad) in grads.iter().enumerate() {
velocity[i] = self
.momentum
.mul_add(velocity[i], -(self.learning_rate * grad));
updates.push((i, velocity[i]));
}
}
for (i, update) in updates {
self.update_qnn_parameter(qnn, param_name, i, update)?;
}
}
Ok(())
}
fn update_learning_rate(&mut self, factor: f64) {
self.learning_rate *= factor;
}
fn get_learning_rate(&self) -> f64 {
self.learning_rate
}
}
impl SGDOptimizer {
fn update_qnn_parameter(
&self,
qnn: &mut QuantumNeuralNetwork,
param_name: &str,
param_idx: usize,
update: f64,
) -> Result<()> {
let parts: Vec<&str> = param_name.split('_').collect();
if parts.len() >= 2 {
let layer_name = parts[0];
for layer in &mut qnn.layers {
if layer.name == layer_name && param_idx < layer.parameters.len() {
layer.parameters[param_idx] += update;
return Ok(());
}
}
}
Err(SimulatorError::InvalidInput(format!(
"Parameter {param_name} not found"
)))
}
}
pub struct QMLUtils;
impl QMLUtils {
#[must_use]
pub fn create_vqc(num_qubits: usize, num_layers: usize) -> QuantumNeuralNetwork {
let mut layers = Vec::new();
layers.push(QMLLayer {
layer_type: QMLLayerType::DataEncoding,
name: "encoding".to_string(),
num_qubits,
parameters: Vec::new(),
parameter_names: Vec::new(),
circuit_template: None,
classical_function: None,
config: LayerConfig::default(),
});
for layer_idx in 0..num_layers {
let num_params = num_qubits * 3; let parameters = (0..num_params)
.map(|_| fastrand::f64() * 2.0 * std::f64::consts::PI)
.collect();
let parameter_names = (0..num_params).map(|i| format!("param_{i}")).collect();
layers.push(QMLLayer {
layer_type: QMLLayerType::VariationalCircuit,
name: format!("var_layer_{layer_idx}"),
num_qubits,
parameters,
parameter_names,
circuit_template: Some(Self::create_variational_circuit_template(num_qubits)),
classical_function: None,
config: LayerConfig {
repetitions: 1,
entangling_pattern: (0..num_qubits - 1).map(|i| (i, i + 1)).collect(),
..Default::default()
},
});
}
layers.push(QMLLayer {
layer_type: QMLLayerType::Measurement,
name: "measurement".to_string(),
num_qubits,
parameters: Vec::new(),
parameter_names: Vec::new(),
circuit_template: None,
classical_function: None,
config: LayerConfig::default(),
});
QuantumNeuralNetwork {
layers,
global_parameters: HashMap::new(),
metadata: QNNMetadata {
name: Some("VQC".to_string()),
total_parameters: num_layers * num_qubits * 3,
trainable_parameters: num_layers * num_qubits * 3,
..Default::default()
},
training_config: TrainingConfig::default(),
}
}
fn create_variational_circuit_template(num_qubits: usize) -> InterfaceCircuit {
let mut circuit = InterfaceCircuit::new(num_qubits, 0);
for qubit in 0..num_qubits {
circuit.add_gate(InterfaceGate::new(InterfaceGateType::RX(0.0), vec![qubit]));
circuit.add_gate(InterfaceGate::new(InterfaceGateType::RY(0.0), vec![qubit]));
circuit.add_gate(InterfaceGate::new(InterfaceGateType::RZ(0.0), vec![qubit]));
}
for qubit in 0..num_qubits - 1 {
circuit.add_gate(InterfaceGate::new(
InterfaceGateType::CNOT,
vec![qubit, qubit + 1],
));
}
circuit
}
#[must_use]
pub fn create_xor_training_data() -> Vec<TrainingExample> {
vec![
TrainingExample {
input: Array1::from(vec![0.0, 0.0]),
target: Array1::from(vec![0.0]),
},
TrainingExample {
input: Array1::from(vec![0.0, 1.0]),
target: Array1::from(vec![1.0]),
},
TrainingExample {
input: Array1::from(vec![1.0, 0.0]),
target: Array1::from(vec![1.0]),
},
TrainingExample {
input: Array1::from(vec![1.0, 1.0]),
target: Array1::from(vec![0.0]),
},
]
}
pub fn benchmark_qml_integration() -> Result<QMLBenchmarkResults> {
let mut results = QMLBenchmarkResults::default();
let configs = vec![
QMLIntegrationConfig {
framework: QMLFramework::SciRS2,
enable_autodiff: false,
batch_size: 4,
..Default::default()
},
QMLIntegrationConfig {
framework: QMLFramework::SciRS2,
enable_autodiff: true,
batch_size: 4,
..Default::default()
},
];
for (i, config) in configs.into_iter().enumerate() {
let mut integration = QMLIntegration::new(config)?;
let mut qnn = Self::create_vqc(2, 2);
qnn.training_config.epochs = 10;
let training_data = Self::create_xor_training_data();
let start = std::time::Instant::now();
let _result = integration.train_qnn(qnn, &training_data, None)?;
let time = start.elapsed().as_secs_f64() * 1000.0;
results.training_times.push((format!("config_{i}"), time));
}
Ok(results)
}
}
#[derive(Debug, Clone, Default)]
pub struct QMLBenchmarkResults {
pub training_times: Vec<(String, f64)>,
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
#[test]
fn test_qml_integration_creation() {
let config = QMLIntegrationConfig::default();
let integration = QMLIntegration::new(config);
assert!(integration.is_ok());
}
#[test]
fn test_quantum_neural_network_creation() {
let qnn = QMLUtils::create_vqc(2, 2);
assert_eq!(qnn.layers.len(), 4); assert_eq!(qnn.metadata.total_parameters, 12); }
#[test]
fn test_training_data_creation() {
let data = QMLUtils::create_xor_training_data();
assert_eq!(data.len(), 4);
assert_eq!(data[0].input, Array1::from(vec![0.0, 0.0]));
assert_eq!(data[0].target, Array1::from(vec![0.0]));
}
#[test]
fn test_adam_optimizer() {
let mut optimizer = AdamOptimizer::new(0.01);
assert_eq!(optimizer.get_learning_rate(), 0.01);
optimizer.update_learning_rate(0.5);
assert_abs_diff_eq!(optimizer.get_learning_rate(), 0.005, epsilon = 1e-10);
}
#[test]
fn test_sgd_optimizer() {
let mut optimizer = SGDOptimizer::new(0.1);
assert_eq!(optimizer.get_learning_rate(), 0.1);
optimizer.update_learning_rate(0.9);
assert_abs_diff_eq!(optimizer.get_learning_rate(), 0.09, epsilon = 1e-10);
}
#[test]
fn test_qml_layer_types() {
let layer_types = [
QMLLayerType::VariationalCircuit,
QMLLayerType::DataEncoding,
QMLLayerType::Measurement,
QMLLayerType::Classical,
];
assert_eq!(layer_types.len(), 4);
}
#[test]
fn test_training_config_default() {
let config = TrainingConfig::default();
assert_eq!(config.learning_rate, 0.01);
assert_eq!(config.optimizer, OptimizerType::Adam);
assert_eq!(config.loss_function, LossFunction::MeanSquaredError);
}
#[test]
fn test_measurement_probability_computation() {
let config = QMLIntegrationConfig::default();
let integration = QMLIntegration::new(config).expect("Failed to create QML integration");
let mut state = Array1::zeros(4);
state[1] = Complex64::new(1.0, 0.0);
let prob0 = integration
.compute_measurement_probability(0, &state)
.expect("Failed to compute measurement probability for qubit 0");
let prob1 = integration
.compute_measurement_probability(1, &state)
.expect("Failed to compute measurement probability for qubit 1");
assert_abs_diff_eq!(prob0, 1.0, epsilon = 1e-10); assert_abs_diff_eq!(prob1, 0.0, epsilon = 1e-10); }
#[test]
fn test_loss_computation() {
let config = QMLIntegrationConfig::default();
let integration = QMLIntegration::new(config).expect("Failed to create QML integration");
let prediction = Array1::from(vec![0.8, 0.2]);
let target = Array1::from(vec![1.0, 0.0]);
let mse = integration
.compute_loss(&prediction, &target, &LossFunction::MeanSquaredError)
.expect("Failed to compute MSE loss");
let mae = integration
.compute_loss(&prediction, &target, &LossFunction::MeanAbsoluteError)
.expect("Failed to compute MAE loss");
assert_abs_diff_eq!(mse, 0.04, epsilon = 1e-10); assert_abs_diff_eq!(mae, 0.2, epsilon = 1e-10); }
#[test]
fn test_circuit_template_creation() {
let circuit = QMLUtils::create_variational_circuit_template(3);
assert_eq!(circuit.num_qubits, 3);
assert_eq!(circuit.gates.len(), 11); }
}