use crate::error::{Result, SimulatorError};
use crate::pauli::{PauliOperatorSum, PauliString};
use crate::statevector::StateVectorSimulator;
use quantrs2_core::gate::GateOp;
use scirs2_core::ndarray::{Array1, Array2};
use scirs2_core::random::prelude::*;
use scirs2_core::Complex64;
use std::f64::consts::PI;
#[cfg(feature = "optimize")]
use crate::optirs_integration::{OptiRSConfig, OptiRSQuantumOptimizer};
#[derive(Debug, Clone, Copy)]
pub enum GradientMethod {
ParameterShift,
FiniteDifference { step_size: f64 },
SPSA { step_size: f64 },
}
#[derive(Debug, Clone)]
pub struct AutoDiffContext {
pub parameters: Vec<f64>,
pub parameter_names: Vec<String>,
pub method: GradientMethod,
pub gradients: Vec<f64>,
pub grad_evaluations: usize,
pub func_evaluations: usize,
}
impl AutoDiffContext {
#[must_use]
pub fn new(parameters: Vec<f64>, method: GradientMethod) -> Self {
let num_params = parameters.len();
Self {
parameters,
parameter_names: (0..num_params).map(|i| format!("θ{i}")).collect(),
method,
gradients: vec![0.0; num_params],
grad_evaluations: 0,
func_evaluations: 0,
}
}
#[must_use]
pub fn with_parameter_names(mut self, names: Vec<String>) -> Self {
assert_eq!(names.len(), self.parameters.len());
self.parameter_names = names;
self
}
pub fn update_parameters(&mut self, new_params: Vec<f64>) {
assert_eq!(new_params.len(), self.parameters.len());
self.parameters = new_params;
}
#[must_use]
pub fn get_parameter(&self, name: &str) -> Option<f64> {
self.parameter_names
.iter()
.position(|n| n == name)
.map(|i| self.parameters[i])
}
pub fn set_parameter(&mut self, name: &str, value: f64) -> Result<()> {
if let Some(i) = self.parameter_names.iter().position(|n| n == name) {
self.parameters[i] = value;
Ok(())
} else {
Err(SimulatorError::InvalidInput(format!(
"Parameter '{name}' not found"
)))
}
}
}
pub trait ParametricGate: Send + Sync {
fn name(&self) -> &str;
fn qubits(&self) -> Vec<usize>;
fn parameter_indices(&self) -> Vec<usize>;
fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>>;
fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>>;
fn parameter_shift_gradient(
&self,
params: &[f64],
param_idx: usize,
) -> Result<(Array2<Complex64>, Array2<Complex64>)> {
let shift = PI / 2.0;
let mut params_plus = params.to_vec();
let mut params_minus = params.to_vec();
if param_idx < params.len() {
params_plus[param_idx] += shift;
params_minus[param_idx] -= shift;
}
let matrix_plus = self.matrix(¶ms_plus)?;
let matrix_minus = self.matrix(¶ms_minus)?;
Ok((matrix_plus, matrix_minus))
}
}
pub struct ParametricRX {
pub qubit: usize,
pub param_idx: usize,
}
impl ParametricGate for ParametricRX {
fn name(&self) -> &'static str {
"RX"
}
fn qubits(&self) -> Vec<usize> {
vec![self.qubit]
}
fn parameter_indices(&self) -> Vec<usize> {
vec![self.param_idx]
}
fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
let theta = params[self.param_idx];
let cos_half = (theta / 2.0).cos();
let sin_half = (theta / 2.0).sin();
Ok(scirs2_core::ndarray::array![
[Complex64::new(cos_half, 0.), Complex64::new(0., -sin_half)],
[Complex64::new(0., -sin_half), Complex64::new(cos_half, 0.)]
])
}
fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
if param_idx != self.param_idx {
return Ok(Array2::zeros((2, 2)));
}
let theta = params[self.param_idx];
let cos_half = (theta / 2.0).cos();
let sin_half = (theta / 2.0).sin();
Ok(scirs2_core::ndarray::array![
[
Complex64::new(-sin_half / 2.0, 0.),
Complex64::new(0., -cos_half / 2.0)
],
[
Complex64::new(0., -cos_half / 2.0),
Complex64::new(-sin_half / 2.0, 0.)
]
])
}
}
pub struct ParametricRY {
pub qubit: usize,
pub param_idx: usize,
}
impl ParametricGate for ParametricRY {
fn name(&self) -> &'static str {
"RY"
}
fn qubits(&self) -> Vec<usize> {
vec![self.qubit]
}
fn parameter_indices(&self) -> Vec<usize> {
vec![self.param_idx]
}
fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
let theta = params[self.param_idx];
let cos_half = (theta / 2.0).cos();
let sin_half = (theta / 2.0).sin();
Ok(scirs2_core::ndarray::array![
[Complex64::new(cos_half, 0.), Complex64::new(-sin_half, 0.)],
[Complex64::new(sin_half, 0.), Complex64::new(cos_half, 0.)]
])
}
fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
if param_idx != self.param_idx {
return Ok(Array2::zeros((2, 2)));
}
let theta = params[self.param_idx];
let cos_half = (theta / 2.0).cos();
let sin_half = (theta / 2.0).sin();
Ok(scirs2_core::ndarray::array![
[
Complex64::new(-sin_half / 2.0, 0.),
Complex64::new(-cos_half / 2.0, 0.)
],
[
Complex64::new(cos_half / 2.0, 0.),
Complex64::new(-sin_half / 2.0, 0.)
]
])
}
}
pub struct ParametricRZ {
pub qubit: usize,
pub param_idx: usize,
}
impl ParametricGate for ParametricRZ {
fn name(&self) -> &'static str {
"RZ"
}
fn qubits(&self) -> Vec<usize> {
vec![self.qubit]
}
fn parameter_indices(&self) -> Vec<usize> {
vec![self.param_idx]
}
fn matrix(&self, params: &[f64]) -> Result<Array2<Complex64>> {
let theta = params[self.param_idx];
let exp_pos = Complex64::from_polar(1.0, theta / 2.0);
let exp_neg = Complex64::from_polar(1.0, -theta / 2.0);
Ok(scirs2_core::ndarray::array![
[exp_neg, Complex64::new(0., 0.)],
[Complex64::new(0., 0.), exp_pos]
])
}
fn gradient(&self, params: &[f64], param_idx: usize) -> Result<Array2<Complex64>> {
if param_idx != self.param_idx {
return Ok(Array2::zeros((2, 2)));
}
let theta = params[self.param_idx];
let exp_pos = Complex64::from_polar(1.0, theta / 2.0);
let exp_neg = Complex64::from_polar(1.0, -theta / 2.0);
Ok(scirs2_core::ndarray::array![
[exp_neg * Complex64::new(0., -0.5), Complex64::new(0., 0.)],
[Complex64::new(0., 0.), exp_pos * Complex64::new(0., 0.5)]
])
}
}
pub struct ParametricCircuit {
pub gates: Vec<Box<dyn ParametricGate>>,
pub num_qubits: usize,
pub num_parameters: usize,
}
impl ParametricCircuit {
#[must_use]
pub fn new(num_qubits: usize) -> Self {
Self {
gates: Vec::new(),
num_qubits,
num_parameters: 0,
}
}
pub fn add_gate(&mut self, gate: Box<dyn ParametricGate>) {
for ¶m_idx in &gate.parameter_indices() {
self.num_parameters = self.num_parameters.max(param_idx + 1);
}
self.gates.push(gate);
}
pub fn rx(&mut self, qubit: usize, param_idx: usize) {
self.add_gate(Box::new(ParametricRX { qubit, param_idx }));
}
pub fn ry(&mut self, qubit: usize, param_idx: usize) {
self.add_gate(Box::new(ParametricRY { qubit, param_idx }));
}
pub fn rz(&mut self, qubit: usize, param_idx: usize) {
self.add_gate(Box::new(ParametricRZ { qubit, param_idx }));
}
pub fn evaluate(&self, params: &[f64]) -> Result<Array1<Complex64>> {
if params.len() != self.num_parameters {
return Err(SimulatorError::InvalidInput(format!(
"Expected {} parameters, got {}",
self.num_parameters,
params.len()
)));
}
let mut simulator = StateVectorSimulator::new();
for gate in &self.gates {
let matrix = gate.matrix(params)?;
let qubits = gate.qubits();
if qubits.len() == 1 {
} else if qubits.len() == 2 {
}
}
let mut state = Array1::zeros(1 << self.num_qubits);
state[0] = Complex64::new(1.0, 0.0); Ok(state)
}
pub fn gradient_expectation(
&self,
observable: &PauliOperatorSum,
params: &[f64],
method: GradientMethod,
) -> Result<Vec<f64>> {
match method {
GradientMethod::ParameterShift => self.parameter_shift_gradient(observable, params),
GradientMethod::FiniteDifference { step_size } => {
self.finite_difference_gradient(observable, params, step_size)
}
GradientMethod::SPSA { step_size } => self.spsa_gradient(observable, params, step_size),
}
}
fn parameter_shift_gradient(
&self,
observable: &PauliOperatorSum,
params: &[f64],
) -> Result<Vec<f64>> {
let mut gradients = vec![0.0; self.num_parameters];
for param_idx in 0..self.num_parameters {
let shift = PI / 2.0;
let mut params_plus = params.to_vec();
params_plus[param_idx] += shift;
let state_plus = self.evaluate(¶ms_plus)?;
let expectation_plus = compute_expectation_value(&state_plus, observable)?;
let mut params_minus = params.to_vec();
params_minus[param_idx] -= shift;
let state_minus = self.evaluate(¶ms_minus)?;
let expectation_minus = compute_expectation_value(&state_minus, observable)?;
gradients[param_idx] = (expectation_plus - expectation_minus) / 2.0;
}
Ok(gradients)
}
fn finite_difference_gradient(
&self,
observable: &PauliOperatorSum,
params: &[f64],
step_size: f64,
) -> Result<Vec<f64>> {
let mut gradients = vec![0.0; self.num_parameters];
for param_idx in 0..self.num_parameters {
let mut params_plus = params.to_vec();
params_plus[param_idx] += step_size;
let state_plus = self.evaluate(¶ms_plus)?;
let expectation_plus = compute_expectation_value(&state_plus, observable)?;
let state = self.evaluate(params)?;
let expectation = compute_expectation_value(&state, observable)?;
gradients[param_idx] = (expectation_plus - expectation) / step_size;
}
Ok(gradients)
}
fn spsa_gradient(
&self,
observable: &PauliOperatorSum,
params: &[f64],
step_size: f64,
) -> Result<Vec<f64>> {
let mut rng = thread_rng();
let mut perturbation = vec![0.0; self.num_parameters];
for p in &mut perturbation {
*p = if rng.random::<bool>() { 1.0 } else { -1.0 };
}
let mut params_plus = params.to_vec();
let mut params_minus = params.to_vec();
for i in 0..self.num_parameters {
params_plus[i] += step_size * perturbation[i];
params_minus[i] -= step_size * perturbation[i];
}
let state_plus = self.evaluate(¶ms_plus)?;
let state_minus = self.evaluate(¶ms_minus)?;
let expectation_plus = compute_expectation_value(&state_plus, observable)?;
let expectation_minus = compute_expectation_value(&state_minus, observable)?;
let diff = (expectation_plus - expectation_minus) / (2.0 * step_size);
let gradients = perturbation.iter().map(|&p| diff / p).collect();
Ok(gradients)
}
}
pub struct VQEWithAutodiff {
pub ansatz: ParametricCircuit,
pub hamiltonian: PauliOperatorSum,
pub context: AutoDiffContext,
pub history: Vec<VQEIteration>,
pub convergence: ConvergenceCriteria,
}
#[derive(Clone)]
pub struct VQEIteration {
pub iteration: usize,
pub parameters: Vec<f64>,
pub energy: f64,
pub gradient_norm: f64,
pub func_evals: usize,
pub grad_evals: usize,
}
pub struct ConvergenceCriteria {
pub max_iterations: usize,
pub energy_tolerance: f64,
pub gradient_tolerance: f64,
pub max_func_evals: usize,
}
impl Default for ConvergenceCriteria {
fn default() -> Self {
Self {
max_iterations: 1000,
energy_tolerance: 1e-6,
gradient_tolerance: 1e-6,
max_func_evals: 10_000,
}
}
}
impl VQEWithAutodiff {
#[must_use]
pub fn new(
ansatz: ParametricCircuit,
hamiltonian: PauliOperatorSum,
initial_params: Vec<f64>,
gradient_method: GradientMethod,
) -> Self {
let context = AutoDiffContext::new(initial_params, gradient_method);
Self {
ansatz,
hamiltonian,
context,
history: Vec::new(),
convergence: ConvergenceCriteria::default(),
}
}
#[must_use]
pub const fn with_convergence(mut self, convergence: ConvergenceCriteria) -> Self {
self.convergence = convergence;
self
}
pub fn evaluate_energy(&mut self) -> Result<f64> {
let state = self.ansatz.evaluate(&self.context.parameters)?;
let energy = compute_expectation_value(&state, &self.hamiltonian)?;
self.context.func_evaluations += 1;
Ok(energy)
}
pub fn compute_gradient(&mut self) -> Result<Vec<f64>> {
let gradients = self.ansatz.gradient_expectation(
&self.hamiltonian,
&self.context.parameters,
self.context.method,
)?;
self.context.gradients.clone_from(&gradients);
self.context.grad_evaluations += 1;
Ok(gradients)
}
pub fn step(&mut self, learning_rate: f64) -> Result<VQEIteration> {
let energy = self.evaluate_energy()?;
let gradients = self.compute_gradient()?;
for (i, &grad) in gradients.iter().enumerate() {
self.context.parameters[i] -= learning_rate * grad;
}
let gradient_norm = gradients.iter().map(|g| g * g).sum::<f64>().sqrt();
let iteration = VQEIteration {
iteration: self.history.len(),
parameters: self.context.parameters.clone(),
energy,
gradient_norm,
func_evals: self.context.func_evaluations,
grad_evals: self.context.grad_evaluations,
};
self.history.push(iteration.clone());
Ok(iteration)
}
pub fn optimize(&mut self, learning_rate: f64) -> Result<VQEResult> {
while !self.is_converged()? {
let iteration = self.step(learning_rate)?;
if iteration.iteration >= self.convergence.max_iterations {
break;
}
if iteration.func_evals >= self.convergence.max_func_evals {
break;
}
}
let final_iteration = self.history.last().ok_or_else(|| {
SimulatorError::InvalidOperation("VQE optimization produced no iterations".to_string())
})?;
Ok(VQEResult {
optimal_parameters: final_iteration.parameters.clone(),
optimal_energy: final_iteration.energy,
iterations: self.history.len(),
converged: self.is_converged()?,
history: self.history.clone(),
})
}
fn is_converged(&self) -> Result<bool> {
if self.history.len() < 2 {
return Ok(false);
}
let current = &self.history[self.history.len() - 1];
let previous = &self.history[self.history.len() - 2];
let energy_converged =
(current.energy - previous.energy).abs() < self.convergence.energy_tolerance;
let gradient_converged = current.gradient_norm < self.convergence.gradient_tolerance;
Ok(energy_converged && gradient_converged)
}
#[cfg(feature = "optimize")]
pub fn optimize_with_optirs(&mut self, config: OptiRSConfig) -> Result<VQEResult> {
use std::time::Instant;
let start_time = Instant::now();
let mut optimizer = OptiRSQuantumOptimizer::new(config)?;
while !self.is_converged()? && !optimizer.has_converged() {
let energy = self.evaluate_energy()?;
let gradients = self.compute_gradient()?;
let new_params =
optimizer.optimize_step(&self.context.parameters, &gradients, energy)?;
self.context.parameters = new_params;
let gradient_norm = gradients.iter().map(|g| g * g).sum::<f64>().sqrt();
let iteration = VQEIteration {
iteration: self.history.len(),
parameters: self.context.parameters.clone(),
energy,
gradient_norm,
func_evals: self.context.func_evaluations,
grad_evals: self.context.grad_evaluations,
};
self.history.push(iteration);
if self.history.len() >= self.convergence.max_iterations {
break;
}
if self.context.func_evaluations >= self.convergence.max_func_evals {
break;
}
}
let _optimization_time = start_time.elapsed();
let final_iteration = self.history.last().ok_or_else(|| {
SimulatorError::InvalidOperation(
"VQE optimization with OptiRS produced no iterations".to_string(),
)
})?;
Ok(VQEResult {
optimal_parameters: final_iteration.parameters.clone(),
optimal_energy: final_iteration.energy,
iterations: self.history.len(),
converged: self.is_converged()?,
history: self.history.clone(),
})
}
}
pub struct VQEResult {
pub optimal_parameters: Vec<f64>,
pub optimal_energy: f64,
pub iterations: usize,
pub converged: bool,
pub history: Vec<VQEIteration>,
}
fn compute_expectation_value(
state: &Array1<Complex64>,
observable: &PauliOperatorSum,
) -> Result<f64> {
let mut expectation = 0.0;
for term in &observable.terms {
let pauli_expectation = compute_pauli_expectation_from_state(state, term)?;
expectation += term.coefficient.re * pauli_expectation.re;
}
Ok(expectation)
}
fn compute_pauli_expectation_from_state(
state: &Array1<Complex64>,
pauli_string: &PauliString,
) -> Result<Complex64> {
let num_qubits = pauli_string.num_qubits;
let dim = 1 << num_qubits;
let mut result = Complex64::new(0.0, 0.0);
for (i, &litude) in state.iter().enumerate() {
if i >= dim {
break;
}
let mut coeff = Complex64::new(1.0, 0.0);
let mut target_state = i;
for (qubit, &pauli_op) in pauli_string.operators.iter().enumerate() {
let bit = (i >> qubit) & 1;
use crate::pauli::PauliOperator;
match pauli_op {
PauliOperator::I => {} PauliOperator::X => {
target_state ^= 1 << qubit;
}
PauliOperator::Y => {
target_state ^= 1 << qubit;
coeff *= if bit == 0 {
Complex64::new(0.0, 1.0)
} else {
Complex64::new(0.0, -1.0)
};
}
PauliOperator::Z => {
if bit == 1 {
coeff *= Complex64::new(-1.0, 0.0);
}
}
}
}
if target_state < dim {
result += amplitude.conj() * coeff * state[target_state];
}
}
Ok(result * pauli_string.coefficient)
}
pub mod ansatze {
use super::ParametricCircuit;
#[must_use]
pub fn hardware_efficient(num_qubits: usize, num_layers: usize) -> ParametricCircuit {
let mut circuit = ParametricCircuit::new(num_qubits);
let mut param_idx = 0;
for _layer in 0..num_layers {
for qubit in 0..num_qubits {
circuit.ry(qubit, param_idx);
param_idx += 1;
circuit.rz(qubit, param_idx);
param_idx += 1;
}
}
circuit
}
#[must_use]
pub fn qaoa_maxcut(
num_qubits: usize,
num_layers: usize,
edges: &[(usize, usize)],
) -> ParametricCircuit {
let mut circuit = ParametricCircuit::new(num_qubits);
let mut param_idx = 0;
for qubit in 0..num_qubits {
circuit.ry(qubit, param_idx); }
for _layer in 0..num_layers {
for &(i, j) in edges {
circuit.rz(i, param_idx);
circuit.rz(j, param_idx);
param_idx += 1;
}
for qubit in 0..num_qubits {
circuit.rx(qubit, param_idx);
param_idx += 1;
}
}
circuit
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parametric_rx_matrix() {
let rx_gate = ParametricRX {
qubit: 0,
param_idx: 0,
};
let params = vec![PI / 2.0];
let matrix = rx_gate
.matrix(¶ms)
.expect("RX gate matrix computation should succeed");
let expected_val = 1.0 / 2.0_f64.sqrt();
assert!((matrix[[0, 0]].re - expected_val).abs() < 1e-10);
assert!((matrix[[0, 1]].im + expected_val).abs() < 1e-10);
}
#[test]
fn test_autodiff_context() {
let params = vec![1.0, 2.0, 3.0];
let mut context = AutoDiffContext::new(params.clone(), GradientMethod::ParameterShift);
assert_eq!(context.parameters, params);
assert_eq!(context.gradients.len(), 3);
context.update_parameters(vec![4.0, 5.0, 6.0]);
assert_eq!(context.parameters, vec![4.0, 5.0, 6.0]);
}
#[test]
fn test_parametric_circuit_creation() {
let mut circuit = ParametricCircuit::new(2);
circuit.rx(0, 0);
circuit.ry(1, 1);
assert_eq!(circuit.gates.len(), 2);
assert_eq!(circuit.num_parameters, 2);
}
#[test]
fn test_hardware_efficient_ansatz() {
let ansatz = ansatze::hardware_efficient(3, 2);
assert_eq!(ansatz.num_qubits, 3);
assert!(ansatz.num_parameters > 0);
}
}