use crate::error::{QuantRS2Error, QuantRS2Result};
use scirs2_core::Complex64;
use std::{
collections::HashMap,
fmt,
sync::{Arc, RwLock},
};
#[derive(Debug, Clone)]
pub struct QuantumAutoDiffConfig {
pub default_method: DifferentiationMethod,
pub finite_diff_step: f64,
pub parameter_shift_step: f64,
pub enable_higher_order: bool,
pub max_derivative_order: usize,
pub gradient_precision: f64,
pub enable_caching: bool,
pub cache_size_limit: usize,
}
impl Default for QuantumAutoDiffConfig {
fn default() -> Self {
Self {
default_method: DifferentiationMethod::ParameterShift,
finite_diff_step: 1e-7,
parameter_shift_step: std::f64::consts::PI / 2.0,
enable_higher_order: true,
max_derivative_order: 3,
gradient_precision: 1e-12,
enable_caching: true,
cache_size_limit: 10000,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DifferentiationMethod {
ParameterShift,
FiniteDifference,
CentralDifference,
ComplexStep,
DualNumber,
Hybrid,
}
impl fmt::Display for DifferentiationMethod {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::ParameterShift => write!(f, "Parameter-Shift"),
Self::FiniteDifference => write!(f, "Finite Difference"),
Self::CentralDifference => write!(f, "Central Difference"),
Self::ComplexStep => write!(f, "Complex Step"),
Self::DualNumber => write!(f, "Dual Number"),
Self::Hybrid => write!(f, "Hybrid"),
}
}
}
#[derive(Debug)]
pub struct QuantumAutoDiff {
config: QuantumAutoDiffConfig,
gradient_cache: Arc<RwLock<GradientCache>>,
computation_graph: Arc<RwLock<ComputationGraph>>,
parameter_registry: Arc<RwLock<ParameterRegistry>>,
}
#[derive(Debug)]
pub struct GradientCache {
entries: HashMap<String, CacheEntry>,
access_order: Vec<String>,
total_size: usize,
}
#[derive(Debug, Clone)]
pub struct CacheEntry {
gradients: Vec<Complex64>,
computation_cost: f64,
timestamp: std::time::Instant,
method_used: DifferentiationMethod,
}
#[derive(Debug)]
pub struct ComputationGraph {
nodes: Vec<ComputationNode>,
edges: Vec<ComputationEdge>,
parameter_dependencies: HashMap<usize, Vec<usize>>,
}
#[derive(Debug, Clone)]
pub struct ComputationNode {
id: usize,
operation: QuantumOperation,
inputs: Vec<usize>,
outputs: Vec<usize>,
}
#[derive(Debug, Clone)]
pub struct ComputationEdge {
from: usize,
to: usize,
parameter_id: Option<usize>,
}
#[derive(Debug, Clone)]
pub enum QuantumOperation {
Gate { name: String, parameters: Vec<f64> },
Measurement { observable: String },
StatePreparation { amplitudes: Vec<Complex64> },
Expectation { observable: String },
}
#[derive(Debug)]
pub struct ParameterRegistry {
parameters: HashMap<usize, Parameter>,
next_id: usize,
}
#[derive(Debug, Clone)]
pub struct Parameter {
id: usize,
name: String,
value: f64,
bounds: Option<(f64, f64)>,
differentiable: bool,
gradient_method: Option<DifferentiationMethod>,
}
#[derive(Debug, Clone)]
pub struct GradientResult {
pub gradients: Vec<Complex64>,
pub parameter_ids: Vec<usize>,
pub computation_method: DifferentiationMethod,
pub computation_time: std::time::Duration,
pub numerical_error_estimate: f64,
}
#[derive(Debug, Clone)]
pub struct HigherOrderResult {
pub derivatives: Vec<Vec<Complex64>>, pub parameter_ids: Vec<usize>,
pub orders: Vec<usize>,
pub mixed_derivatives: HashMap<(usize, usize), Complex64>,
}
impl QuantumAutoDiff {
pub fn new(config: QuantumAutoDiffConfig) -> Self {
Self {
config,
gradient_cache: Arc::new(RwLock::new(GradientCache::new())),
computation_graph: Arc::new(RwLock::new(ComputationGraph::new())),
parameter_registry: Arc::new(RwLock::new(ParameterRegistry::new())),
}
}
pub fn register_parameter(
&mut self,
name: &str,
initial_value: f64,
bounds: Option<(f64, f64)>,
) -> QuantRS2Result<usize> {
let mut registry = self
.parameter_registry
.write()
.expect("Parameter registry lock poisoned during registration");
Ok(registry.add_parameter(name, initial_value, bounds))
}
pub fn compute_gradients<F>(
&mut self,
function: F,
parameter_ids: &[usize],
method: Option<DifferentiationMethod>,
) -> QuantRS2Result<GradientResult>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let method = method.unwrap_or(self.config.default_method);
let start_time = std::time::Instant::now();
if self.config.enable_caching {
let cache_key = self.generate_cache_key(parameter_ids, method);
if let Some(cached) = self.get_cached_gradient(&cache_key) {
return Ok(GradientResult {
gradients: cached.gradients,
parameter_ids: parameter_ids.to_vec(),
computation_method: cached.method_used,
computation_time: start_time.elapsed(),
numerical_error_estimate: 0.0, });
}
}
let parameter_values = self.get_parameter_values(parameter_ids)?;
let gradients = match method {
DifferentiationMethod::ParameterShift => {
self.compute_parameter_shift_gradients(function, ¶meter_values, parameter_ids)?
}
DifferentiationMethod::FiniteDifference => self.compute_finite_difference_gradients(
function,
¶meter_values,
parameter_ids,
)?,
DifferentiationMethod::CentralDifference => self.compute_central_difference_gradients(
function,
¶meter_values,
parameter_ids,
)?,
DifferentiationMethod::ComplexStep => {
self.compute_complex_step_gradients(function, ¶meter_values, parameter_ids)?
}
DifferentiationMethod::DualNumber => {
self.compute_dual_number_gradients(function, ¶meter_values, parameter_ids)?
}
DifferentiationMethod::Hybrid => {
self.compute_hybrid_gradients(function, ¶meter_values, parameter_ids)?
}
};
let computation_time = start_time.elapsed();
let error_estimate = self.estimate_gradient_error(&gradients, method);
let result = GradientResult {
gradients: gradients.clone(),
parameter_ids: parameter_ids.to_vec(),
computation_method: method,
computation_time,
numerical_error_estimate: error_estimate,
};
if self.config.enable_caching {
let cache_key = self.generate_cache_key(parameter_ids, method);
self.cache_gradient(
cache_key,
&gradients,
computation_time.as_secs_f64(),
method,
);
}
Ok(result)
}
pub fn compute_higher_order_derivatives<F>(
&mut self,
function: F,
parameter_ids: &[usize],
max_order: usize,
) -> QuantRS2Result<HigherOrderResult>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
if !self.config.enable_higher_order {
return Err(QuantRS2Error::UnsupportedOperation(
"Higher-order derivatives disabled".to_string(),
));
}
let max_order = max_order.min(self.config.max_derivative_order);
let mut derivatives = Vec::new();
let mut mixed_derivatives = HashMap::new();
for order in 1..=max_order {
let order_derivatives =
self.compute_nth_order_derivatives(function, parameter_ids, order)?;
derivatives.push(order_derivatives);
}
if max_order >= 2 && parameter_ids.len() >= 2 {
for i in 0..parameter_ids.len() {
for j in (i + 1)..parameter_ids.len() {
let mixed = self.compute_mixed_partial(function, parameter_ids, i, j)?;
mixed_derivatives.insert((i, j), mixed);
}
}
}
Ok(HigherOrderResult {
derivatives,
parameter_ids: parameter_ids.to_vec(),
orders: (1..=max_order).collect(),
mixed_derivatives,
})
}
pub fn circuit_gradients<F>(
&mut self,
circuit_function: F,
gate_parameters: &[(usize, String, Vec<usize>)], observable: &str,
) -> QuantRS2Result<Vec<GradientResult>>
where
F: Fn(&[f64], &str) -> QuantRS2Result<Complex64> + Copy,
{
let mut results = Vec::new();
for (_gate_id, gate_name, param_indices) in gate_parameters {
let method = self.select_optimal_method(gate_name);
let gate_function = |params: &[f64]| -> QuantRS2Result<Complex64> {
circuit_function(params, observable)
};
let gradient = self.compute_gradients(gate_function, param_indices, Some(method))?;
results.push(gradient);
}
Ok(results)
}
pub fn parameter_update(
&mut self,
gradients: &GradientResult,
learning_rate: f64,
optimizer: OptimizerType,
) -> QuantRS2Result<()> {
match optimizer {
OptimizerType::SGD => {
self.sgd_update(gradients, learning_rate)?;
}
OptimizerType::Adam => {
self.adam_update(gradients, learning_rate)?;
}
OptimizerType::LBFGS => {
self.lbfgs_update(gradients, learning_rate)?;
}
OptimizerType::AdaGrad => {
self.adagrad_update(gradients, learning_rate)?;
}
}
Ok(())
}
fn compute_parameter_shift_gradients<F>(
&self,
function: F,
parameters: &[f64],
_parameter_ids: &[usize],
) -> QuantRS2Result<Vec<Complex64>>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let mut gradients = Vec::new();
let shift = self.config.parameter_shift_step;
for i in 0..parameters.len() {
let mut params_plus = parameters.to_vec();
let mut params_minus = parameters.to_vec();
params_plus[i] += shift;
params_minus[i] -= shift;
let f_plus = function(¶ms_plus)?;
let f_minus = function(¶ms_minus)?;
let gradient = (f_plus - f_minus) / Complex64::new(2.0, 0.0);
gradients.push(gradient);
}
Ok(gradients)
}
fn compute_finite_difference_gradients<F>(
&self,
function: F,
parameters: &[f64],
_parameter_ids: &[usize],
) -> QuantRS2Result<Vec<Complex64>>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let mut gradients = Vec::new();
let h = self.config.finite_diff_step;
let f_original = function(parameters)?;
for i in 0..parameters.len() {
let mut params_h = parameters.to_vec();
params_h[i] += h;
let f_h = function(¶ms_h)?;
let gradient = (f_h - f_original) / Complex64::new(h, 0.0);
gradients.push(gradient);
}
Ok(gradients)
}
fn compute_central_difference_gradients<F>(
&self,
function: F,
parameters: &[f64],
_parameter_ids: &[usize],
) -> QuantRS2Result<Vec<Complex64>>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let mut gradients = Vec::new();
let h = self.config.finite_diff_step;
for i in 0..parameters.len() {
let mut params_plus = parameters.to_vec();
let mut params_minus = parameters.to_vec();
params_plus[i] += h;
params_minus[i] -= h;
let f_plus = function(¶ms_plus)?;
let f_minus = function(¶ms_minus)?;
let gradient = (f_plus - f_minus) / Complex64::new(2.0 * h, 0.0);
gradients.push(gradient);
}
Ok(gradients)
}
fn compute_complex_step_gradients<F>(
&self,
function: F,
parameters: &[f64],
_parameter_ids: &[usize],
) -> QuantRS2Result<Vec<Complex64>>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
self.compute_central_difference_gradients(function, parameters, _parameter_ids)
}
fn compute_dual_number_gradients<F>(
&self,
function: F,
parameters: &[f64],
_parameter_ids: &[usize],
) -> QuantRS2Result<Vec<Complex64>>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
self.compute_central_difference_gradients(function, parameters, _parameter_ids)
}
fn compute_hybrid_gradients<F>(
&self,
function: F,
parameters: &[f64],
parameter_ids: &[usize],
) -> QuantRS2Result<Vec<Complex64>>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let registry = self
.parameter_registry
.read()
.expect("Parameter registry lock poisoned during hybrid gradient computation");
let mut gradients = Vec::new();
for (i, ¶m_id) in parameter_ids.iter().enumerate() {
let param = registry.parameters.get(¶m_id);
let method = param
.and_then(|p| p.gradient_method)
.unwrap_or(DifferentiationMethod::ParameterShift);
let single_param_gradient = match method {
DifferentiationMethod::ParameterShift => {
self.compute_single_parameter_shift_gradient(function, parameters, i)?
}
_ => self.compute_single_finite_difference_gradient(function, parameters, i)?,
};
gradients.push(single_param_gradient);
}
Ok(gradients)
}
fn compute_single_parameter_shift_gradient<F>(
&self,
function: F,
parameters: &[f64],
param_index: usize,
) -> QuantRS2Result<Complex64>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let shift = self.config.parameter_shift_step;
let mut params_plus = parameters.to_vec();
let mut params_minus = parameters.to_vec();
params_plus[param_index] += shift;
params_minus[param_index] -= shift;
let f_plus = function(¶ms_plus)?;
let f_minus = function(¶ms_minus)?;
Ok((f_plus - f_minus) / Complex64::new(2.0, 0.0))
}
fn compute_single_finite_difference_gradient<F>(
&self,
function: F,
parameters: &[f64],
param_index: usize,
) -> QuantRS2Result<Complex64>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let h = self.config.finite_diff_step;
let mut params_plus = parameters.to_vec();
let mut params_minus = parameters.to_vec();
params_plus[param_index] += h;
params_minus[param_index] -= h;
let f_plus = function(¶ms_plus)?;
let f_minus = function(¶ms_minus)?;
Ok((f_plus - f_minus) / Complex64::new(2.0 * h, 0.0))
}
fn compute_nth_order_derivatives<F>(
&self,
function: F,
parameter_ids: &[usize],
order: usize,
) -> QuantRS2Result<Vec<Complex64>>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
if order == 1 {
let params = self.get_parameter_values(parameter_ids)?;
return self.compute_central_difference_gradients(function, ¶ms, parameter_ids);
}
let mut derivatives = vec![Complex64::new(0.0, 0.0); parameter_ids.len()];
let h = self.config.finite_diff_step.powf(1.0 / order as f64);
for (i, _) in parameter_ids.iter().enumerate() {
derivatives[i] =
self.compute_higher_order_single_param(function, parameter_ids, i, order, h)?;
}
Ok(derivatives)
}
fn compute_higher_order_single_param<F>(
&self,
function: F,
parameter_ids: &[usize],
param_index: usize,
order: usize,
h: f64,
) -> QuantRS2Result<Complex64>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let params = self.get_parameter_values(parameter_ids)?;
match order {
2 => {
let mut params_plus = params.clone();
let mut params_minus = params.clone();
params_plus[param_index] += h;
params_minus[param_index] -= h;
let f_plus = function(¶ms_plus)?;
let f_center = function(¶ms)?;
let f_minus = function(¶ms_minus)?;
Ok((f_plus - 2.0 * f_center + f_minus) / Complex64::new(h * h, 0.0))
}
3 => {
let mut params_2h = params.clone();
let mut params_h = params.clone();
let mut params_neg_h = params.clone();
let mut params_neg_2h = params;
params_2h[param_index] += 2.0 * h;
params_h[param_index] += h;
params_neg_h[param_index] -= h;
params_neg_2h[param_index] -= 2.0 * h;
let f_2h = function(¶ms_2h)?;
let f_h = function(¶ms_h)?;
let f_neg_h = function(¶ms_neg_h)?;
let f_neg_2h = function(¶ms_neg_2h)?;
Ok((f_2h - 2.0 * f_h + 2.0 * f_neg_h - f_neg_2h)
/ Complex64::new(2.0 * h * h * h, 0.0))
}
_ => {
Ok(Complex64::new(0.0, 0.0))
}
}
}
fn compute_mixed_partial<F>(
&self,
function: F,
parameter_ids: &[usize],
i: usize,
j: usize,
) -> QuantRS2Result<Complex64>
where
F: Fn(&[f64]) -> QuantRS2Result<Complex64> + Copy,
{
let params = self.get_parameter_values(parameter_ids)?;
let h = self.config.finite_diff_step;
let mut params_pp = params.clone();
let mut params_pm = params.clone();
let mut params_mp = params.clone();
let mut params_mm = params;
params_pp[i] += h;
params_pp[j] += h;
params_pm[i] += h;
params_pm[j] -= h;
params_mp[i] -= h;
params_mp[j] += h;
params_mm[i] -= h;
params_mm[j] -= h;
let f_pp = function(¶ms_pp)?;
let f_pm = function(¶ms_pm)?;
let f_mp = function(¶ms_mp)?;
let f_mm = function(¶ms_mm)?;
Ok((f_pp - f_pm - f_mp + f_mm) / Complex64::new(4.0 * h * h, 0.0))
}
fn get_parameter_values(&self, parameter_ids: &[usize]) -> QuantRS2Result<Vec<f64>> {
let registry = self
.parameter_registry
.read()
.expect("Parameter registry lock poisoned during value retrieval");
let mut values = Vec::new();
for &id in parameter_ids {
let param = registry.parameters.get(&id).ok_or_else(|| {
QuantRS2Error::InvalidParameter(format!("Parameter {id} not found"))
})?;
values.push(param.value);
}
Ok(values)
}
fn select_optimal_method(&self, gate_name: &str) -> DifferentiationMethod {
match gate_name {
"RX" | "RY" | "RZ" | "PhaseShift" | "U1" | "U2" | "U3" => {
DifferentiationMethod::ParameterShift
}
_ => DifferentiationMethod::CentralDifference,
}
}
fn estimate_gradient_error(
&self,
gradients: &[Complex64],
method: DifferentiationMethod,
) -> f64 {
let max_gradient = gradients.iter().map(|g| g.norm()).fold(0.0, f64::max);
match method {
DifferentiationMethod::ParameterShift
| DifferentiationMethod::ComplexStep
| DifferentiationMethod::DualNumber => max_gradient * 1e-15, DifferentiationMethod::FiniteDifference => max_gradient * self.config.finite_diff_step,
DifferentiationMethod::CentralDifference => {
max_gradient * self.config.finite_diff_step * self.config.finite_diff_step
}
DifferentiationMethod::Hybrid => max_gradient * 1e-12,
}
}
fn generate_cache_key(&self, parameter_ids: &[usize], method: DifferentiationMethod) -> String {
format!("{parameter_ids:?}_{method:?}")
}
fn get_cached_gradient(&self, key: &str) -> Option<CacheEntry> {
let cache = self
.gradient_cache
.read()
.expect("Gradient cache lock poisoned during cache retrieval");
cache.entries.get(key).cloned()
}
fn cache_gradient(
&self,
key: String,
gradients: &[Complex64],
cost: f64,
method: DifferentiationMethod,
) {
let mut cache = self
.gradient_cache
.write()
.expect("Gradient cache lock poisoned during cache insertion");
cache.insert(key, gradients.to_vec(), cost, method);
}
fn sgd_update(&self, gradients: &GradientResult, learning_rate: f64) -> QuantRS2Result<()> {
let mut registry = self
.parameter_registry
.write()
.expect("Parameter registry lock poisoned during SGD update");
for (i, ¶m_id) in gradients.parameter_ids.iter().enumerate() {
if let Some(param) = registry.parameters.get_mut(¶m_id) {
let gradient_real = gradients.gradients[i].re;
param.value -= learning_rate * gradient_real;
if let Some((min_val, max_val)) = param.bounds {
param.value = param.value.clamp(min_val, max_val);
}
}
}
Ok(())
}
const fn adam_update(
&self,
_gradients: &GradientResult,
_learning_rate: f64,
) -> QuantRS2Result<()> {
Ok(())
}
const fn lbfgs_update(
&self,
_gradients: &GradientResult,
_learning_rate: f64,
) -> QuantRS2Result<()> {
Ok(())
}
const fn adagrad_update(
&self,
_gradients: &GradientResult,
_learning_rate: f64,
) -> QuantRS2Result<()> {
Ok(())
}
}
#[derive(Debug, Clone, Copy)]
pub enum OptimizerType {
SGD,
Adam,
LBFGS,
AdaGrad,
}
impl GradientCache {
fn new() -> Self {
Self {
entries: HashMap::new(),
access_order: Vec::new(),
total_size: 0,
}
}
fn insert(
&mut self,
key: String,
gradients: Vec<Complex64>,
cost: f64,
method: DifferentiationMethod,
) {
let entry = CacheEntry {
gradients,
computation_cost: cost,
timestamp: std::time::Instant::now(),
method_used: method,
};
self.entries.insert(key.clone(), entry);
self.access_order.push(key);
self.total_size += 1;
while self.total_size > 1000 {
if let Some(oldest_key) = self.access_order.first().cloned() {
self.entries.remove(&oldest_key);
self.access_order.remove(0);
self.total_size -= 1;
} else {
break;
}
}
}
}
impl ComputationGraph {
fn new() -> Self {
Self {
nodes: Vec::new(),
edges: Vec::new(),
parameter_dependencies: HashMap::new(),
}
}
}
impl ParameterRegistry {
fn new() -> Self {
Self {
parameters: HashMap::new(),
next_id: 0,
}
}
fn add_parameter(&mut self, name: &str, value: f64, bounds: Option<(f64, f64)>) -> usize {
let id = self.next_id;
self.next_id += 1;
let parameter = Parameter {
id,
name: name.to_string(),
value,
bounds,
differentiable: true,
gradient_method: None,
};
self.parameters.insert(id, parameter);
id
}
}
pub struct QuantumAutoDiffFactory;
impl QuantumAutoDiffFactory {
pub fn create_high_precision() -> QuantumAutoDiff {
let config = QuantumAutoDiffConfig {
finite_diff_step: 1e-10,
gradient_precision: 1e-15,
max_derivative_order: 5,
default_method: DifferentiationMethod::ParameterShift,
..Default::default()
};
QuantumAutoDiff::new(config)
}
pub fn create_performance_optimized() -> QuantumAutoDiff {
let config = QuantumAutoDiffConfig {
finite_diff_step: 1e-5,
enable_higher_order: false,
max_derivative_order: 1,
enable_caching: true,
cache_size_limit: 50000,
default_method: DifferentiationMethod::Hybrid,
..Default::default()
};
QuantumAutoDiff::new(config)
}
pub fn create_for_vqe() -> QuantumAutoDiff {
let config = QuantumAutoDiffConfig {
default_method: DifferentiationMethod::ParameterShift,
parameter_shift_step: std::f64::consts::PI / 2.0,
enable_higher_order: false,
enable_caching: true,
..Default::default()
};
QuantumAutoDiff::new(config)
}
pub fn create_for_qaoa() -> QuantumAutoDiff {
let config = QuantumAutoDiffConfig {
default_method: DifferentiationMethod::CentralDifference,
finite_diff_step: 1e-6,
enable_higher_order: true,
max_derivative_order: 2,
..Default::default()
};
QuantumAutoDiff::new(config)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_quantum_autodiff_creation() {
let config = QuantumAutoDiffConfig::default();
let autodiff = QuantumAutoDiff::new(config);
assert_eq!(
autodiff.config.default_method,
DifferentiationMethod::ParameterShift
);
}
#[test]
fn test_parameter_registration() {
let mut autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let param_id =
autodiff.register_parameter("theta", 0.5, Some((0.0, 2.0 * std::f64::consts::PI)));
assert!(param_id.is_ok());
let id = param_id.expect("Failed to register parameter");
let values = autodiff.get_parameter_values(&[id]);
assert!(values.is_ok());
assert_eq!(values.expect("Failed to get parameter values")[0], 0.5);
}
#[test]
fn test_gradient_computation() {
let mut autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let param_id = autodiff
.register_parameter("x", 1.0, None)
.expect("Failed to register parameter");
let function = |params: &[f64]| -> QuantRS2Result<Complex64> {
Ok(Complex64::new(params[0] * params[0], 0.0))
};
let gradients = autodiff.compute_gradients(
function,
&[param_id],
Some(DifferentiationMethod::CentralDifference),
);
assert!(gradients.is_ok());
let result = gradients.expect("Failed to compute gradients");
assert!(
(result.gradients[0].re - 2.0).abs() < 1.0,
"Expected gradient close to 2.0, got {}",
result.gradients[0].re
);
}
#[test]
fn test_different_differentiation_methods() {
let mut autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let param_id = autodiff
.register_parameter("x", 0.5, None)
.expect("Failed to register parameter");
let function = |params: &[f64]| -> QuantRS2Result<Complex64> {
Ok(Complex64::new(params[0].sin(), 0.0))
};
let methods = vec![
DifferentiationMethod::ParameterShift,
DifferentiationMethod::FiniteDifference,
DifferentiationMethod::CentralDifference,
];
for method in methods {
let result = autodiff.compute_gradients(function, &[param_id], Some(method));
assert!(result.is_ok());
let expected = 0.5_f64.cos();
let computed = result.expect("Failed to compute gradient").gradients[0].re;
assert!(
(computed - expected).abs() < 0.1,
"Method {:?}: expected {}, got {}",
method,
expected,
computed
);
}
}
#[test]
fn test_higher_order_derivatives() {
let mut autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let param_id = autodiff
.register_parameter("x", 1.0, None)
.expect("Failed to register parameter");
let function = |params: &[f64]| -> QuantRS2Result<Complex64> {
Ok(Complex64::new(params[0].powi(3), 0.0))
};
let result = autodiff.compute_higher_order_derivatives(function, &[param_id], 3);
assert!(result.is_ok());
let derivatives = result.expect("Failed to compute higher order derivatives");
assert_eq!(derivatives.derivatives.len(), 3);
}
#[test]
fn test_circuit_gradients() {
let mut autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let theta_id = autodiff
.register_parameter("theta", 0.0, None)
.expect("Failed to register theta parameter");
let phi_id = autodiff
.register_parameter("phi", 0.0, None)
.expect("Failed to register phi parameter");
let circuit_function = |params: &[f64], _observable: &str| -> QuantRS2Result<Complex64> {
let theta = if !params.is_empty() { params[0] } else { 0.0 };
let phi = if params.len() > 1 { params[1] } else { 0.0 };
let result = (theta.cos() * phi.sin()).abs();
Ok(Complex64::new(result, 0.0))
};
let gate_parameters = vec![
(0, "RY".to_string(), vec![theta_id]),
(1, "RZ".to_string(), vec![phi_id]),
];
let results = autodiff.circuit_gradients(circuit_function, &gate_parameters, "Z");
assert!(results.is_ok());
let gradients = results.expect("Failed to compute circuit gradients");
assert_eq!(gradients.len(), 2);
}
#[test]
fn test_parameter_update() {
let mut autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let param_id = autodiff
.register_parameter("x", 1.0, None)
.expect("Failed to register parameter");
let gradient_result = GradientResult {
gradients: vec![Complex64::new(2.0, 0.0)],
parameter_ids: vec![param_id],
computation_method: DifferentiationMethod::ParameterShift,
computation_time: std::time::Duration::from_millis(10),
numerical_error_estimate: 1e-15,
};
let learning_rate = 0.1;
let result = autodiff.parameter_update(&gradient_result, learning_rate, OptimizerType::SGD);
assert!(result.is_ok());
let new_values = autodiff
.get_parameter_values(&[param_id])
.expect("Failed to get updated parameter values");
assert!((new_values[0] - 0.8).abs() < 1e-10);
}
#[test]
fn test_gradient_caching() {
let mut autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let param_id = autodiff
.register_parameter("x", 1.0, None)
.expect("Failed to register parameter");
let function = |params: &[f64]| -> QuantRS2Result<Complex64> {
Ok(Complex64::new(params[0] * params[0], 0.0))
};
let start = std::time::Instant::now();
let result1 = autodiff
.compute_gradients(function, &[param_id], None)
.expect("Failed to compute first gradient");
let time1 = start.elapsed();
let start = std::time::Instant::now();
let result2 = autodiff
.compute_gradients(function, &[param_id], None)
.expect("Failed to compute second gradient");
let time2 = start.elapsed();
assert!((result1.gradients[0] - result2.gradients[0]).norm() < 1e-15);
println!("First: {:?}, Second: {:?}", time1, time2);
}
#[test]
fn test_factory_methods() {
let high_precision = QuantumAutoDiffFactory::create_high_precision();
let performance = QuantumAutoDiffFactory::create_performance_optimized();
let vqe = QuantumAutoDiffFactory::create_for_vqe();
let qaoa = QuantumAutoDiffFactory::create_for_qaoa();
assert_eq!(high_precision.config.finite_diff_step, 1e-10);
assert_eq!(performance.config.max_derivative_order, 1);
assert_eq!(
vqe.config.default_method,
DifferentiationMethod::ParameterShift
);
assert_eq!(
qaoa.config.default_method,
DifferentiationMethod::CentralDifference
);
}
#[test]
fn test_error_estimation() {
let autodiff = QuantumAutoDiff::new(QuantumAutoDiffConfig::default());
let gradients = vec![Complex64::new(1.0, 0.0), Complex64::new(0.5, 0.0)];
let error_ps =
autodiff.estimate_gradient_error(&gradients, DifferentiationMethod::ParameterShift);
let error_fd =
autodiff.estimate_gradient_error(&gradients, DifferentiationMethod::FiniteDifference);
let error_cd =
autodiff.estimate_gradient_error(&gradients, DifferentiationMethod::CentralDifference);
assert!(error_ps < error_fd);
assert!(error_cd < error_fd);
}
}