use scirs2_core::ndarray::Array1;
use std::collections::HashMap;
use super::circuit::ParameterizedQuantumCircuit;
use super::config::{HardwareArchitecture, QMLAlgorithmType, QMLConfig};
use super::trainer::QuantumMLTrainer;
use crate::circuit_interfaces::InterfaceCircuit;
use crate::error::Result;
pub fn benchmark_quantum_ml_algorithms() -> Result<HashMap<String, f64>> {
let mut results = HashMap::new();
let algorithms = vec![
QMLAlgorithmType::VQE,
QMLAlgorithmType::QAOA,
QMLAlgorithmType::QCNN,
QMLAlgorithmType::QSVM,
];
let hardware_archs = vec![
HardwareArchitecture::NISQ,
HardwareArchitecture::Superconducting,
HardwareArchitecture::TrappedIon,
];
for &algorithm in &algorithms {
for &hardware in &hardware_archs {
let benchmark_time = benchmark_algorithm_hardware_combination(algorithm, hardware)?;
results.insert(format!("{algorithm:?}_{hardware:?}"), benchmark_time);
}
}
Ok(results)
}
fn benchmark_algorithm_hardware_combination(
algorithm: QMLAlgorithmType,
hardware: HardwareArchitecture,
) -> Result<f64> {
let start = std::time::Instant::now();
let config = QMLConfig {
algorithm_type: algorithm,
hardware_architecture: hardware,
num_qubits: 4,
circuit_depth: 2,
num_parameters: 8,
max_epochs: 5,
batch_size: 4,
..Default::default()
};
let circuit = create_test_circuit(config.num_qubits)?;
let parameters = Array1::from_vec(vec![0.1; config.num_parameters]);
let parameter_names = (0..config.num_parameters)
.map(|i| format!("param_{i}"))
.collect();
let pqc = ParameterizedQuantumCircuit::new(circuit, parameters, parameter_names, hardware);
let mut trainer = QuantumMLTrainer::new(config, pqc, None)?;
let loss_fn = |params: &Array1<f64>| -> Result<f64> {
Ok(params.iter().map(|&x| x * x).sum::<f64>())
};
let _result = trainer.train(loss_fn)?;
Ok(start.elapsed().as_secs_f64() * 1000.0)
}
fn create_test_circuit(num_qubits: usize) -> Result<InterfaceCircuit> {
let circuit = InterfaceCircuit::new(num_qubits, 0);
Ok(circuit)
}
pub fn benchmark_gradient_methods() -> Result<HashMap<String, f64>> {
let mut results = HashMap::new();
let methods = vec![
"parameter_shift",
"finite_differences",
"automatic_differentiation",
"natural_gradients",
];
for method in methods {
let benchmark_time = benchmark_gradient_method(method)?;
results.insert(method.to_string(), benchmark_time);
}
Ok(results)
}
fn benchmark_gradient_method(method: &str) -> Result<f64> {
let start = std::time::Instant::now();
let test_function = |params: &Array1<f64>| -> Result<f64> {
Ok(params
.iter()
.enumerate()
.map(|(i, &x)| (i as f64 + 1.0) * x * x)
.sum::<f64>())
};
let test_params = Array1::from_vec(vec![0.1, 0.2, 0.3, 0.4]);
match method {
"parameter_shift" => {
compute_parameter_shift_gradient(&test_function, &test_params)?;
}
"finite_differences" => {
compute_finite_difference_gradient(&test_function, &test_params)?;
}
"automatic_differentiation" => {
compute_autodiff_gradient(&test_function, &test_params)?;
}
"natural_gradients" => {
compute_natural_gradient(&test_function, &test_params)?;
}
_ => {
return Err(crate::error::SimulatorError::InvalidInput(format!(
"Unknown gradient method: {method}"
)))
}
}
Ok(start.elapsed().as_secs_f64() * 1000.0)
}
fn compute_parameter_shift_gradient<F>(
function: &F,
parameters: &Array1<f64>,
) -> Result<Array1<f64>>
where
F: Fn(&Array1<f64>) -> Result<f64>,
{
let num_params = parameters.len();
let mut gradient = Array1::zeros(num_params);
let shift = std::f64::consts::PI / 2.0;
for i in 0..num_params {
let mut params_plus = parameters.clone();
let mut params_minus = parameters.clone();
params_plus[i] += shift;
params_minus[i] -= shift;
let loss_plus = function(¶ms_plus)?;
let loss_minus = function(¶ms_minus)?;
gradient[i] = (loss_plus - loss_minus) / 2.0;
}
Ok(gradient)
}
fn compute_finite_difference_gradient<F>(
function: &F,
parameters: &Array1<f64>,
) -> Result<Array1<f64>>
where
F: Fn(&Array1<f64>) -> Result<f64>,
{
let num_params = parameters.len();
let mut gradient = Array1::zeros(num_params);
let eps = 1e-8;
for i in 0..num_params {
let mut params_plus = parameters.clone();
params_plus[i] += eps;
let loss_plus = function(¶ms_plus)?;
let loss_current = function(parameters)?;
gradient[i] = (loss_plus - loss_current) / eps;
}
Ok(gradient)
}
fn compute_autodiff_gradient<F>(function: &F, parameters: &Array1<f64>) -> Result<Array1<f64>>
where
F: Fn(&Array1<f64>) -> Result<f64>,
{
compute_parameter_shift_gradient(function, parameters)
}
fn compute_natural_gradient<F>(function: &F, parameters: &Array1<f64>) -> Result<Array1<f64>>
where
F: Fn(&Array1<f64>) -> Result<f64>,
{
compute_parameter_shift_gradient(function, parameters)
}
pub fn benchmark_optimizers() -> Result<HashMap<String, f64>> {
let mut results = HashMap::new();
let optimizers = vec!["adam", "sgd", "rmsprop", "lbfgs"];
for optimizer in optimizers {
let benchmark_time = benchmark_optimizer(optimizer)?;
results.insert(optimizer.to_string(), benchmark_time);
}
Ok(results)
}
fn benchmark_optimizer(optimizer: &str) -> Result<f64> {
let start = std::time::Instant::now();
let mut params = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
let target = Array1::<f64>::zeros(4);
for _iteration in 0..100 {
let gradient = ¶ms - ⌖
match optimizer {
"adam" => {
params = ¶ms - 0.01 * &gradient;
}
"sgd" => {
params = ¶ms - 0.01 * &gradient;
}
"rmsprop" => {
params = ¶ms - 0.01 * &gradient;
}
"lbfgs" => {
params = ¶ms - 0.01 * &gradient;
}
_ => {
return Err(crate::error::SimulatorError::InvalidInput(format!(
"Unknown optimizer: {optimizer}"
)))
}
}
}
Ok(start.elapsed().as_secs_f64() * 1000.0)
}
pub fn run_comprehensive_benchmarks() -> Result<HashMap<String, HashMap<String, f64>>> {
let mut all_results = HashMap::new();
all_results.insert("algorithms".to_string(), benchmark_quantum_ml_algorithms()?);
all_results.insert("gradients".to_string(), benchmark_gradient_methods()?);
all_results.insert("optimizers".to_string(), benchmark_optimizers()?);
Ok(all_results)
}