use crate::{
error::{QuantRS2Error, QuantRS2Result},
variational::VariationalCircuit,
};
use scirs2_core::ndarray::{Array1, Array2};
use crate::optimization_stubs::{minimize, Method, OptimizeResult, Options};
use crate::parallel_ops_stubs::*;
use rustc_hash::FxHashMap;
use std::sync::{Arc, Mutex};
pub struct VariationalQuantumOptimizer {
method: OptimizationMethod,
config: OptimizationConfig,
history: OptimizationHistory,
fisher_cache: Option<FisherCache>,
}
#[derive(Debug, Clone)]
pub enum OptimizationMethod {
GradientDescent { learning_rate: f64 },
Momentum { learning_rate: f64, momentum: f64 },
Adam {
learning_rate: f64,
beta1: f64,
beta2: f64,
epsilon: f64,
},
RMSprop {
learning_rate: f64,
decay_rate: f64,
epsilon: f64,
},
NaturalGradient {
learning_rate: f64,
regularization: f64,
},
BFGS,
LBFGS { memory_size: usize },
ConjugateGradient,
NelderMead,
Powell,
SPSA {
a: f64,
c: f64,
alpha: f64,
gamma: f64,
},
QNSPSA {
learning_rate: f64,
regularization: f64,
spsa_epsilon: f64,
},
}
#[derive(Clone)]
pub struct OptimizationConfig {
pub max_iterations: usize,
pub f_tol: f64,
pub g_tol: f64,
pub x_tol: f64,
pub parallel_gradients: bool,
pub batch_size: Option<usize>,
pub seed: Option<u64>,
pub callback: Option<Arc<dyn Fn(&[f64], f64) + Send + Sync>>,
pub patience: Option<usize>,
pub grad_clip: Option<f64>,
}
impl Default for OptimizationConfig {
fn default() -> Self {
Self {
max_iterations: 100,
f_tol: 1e-8,
g_tol: 1e-8,
x_tol: 1e-8,
parallel_gradients: true,
batch_size: None,
seed: None,
callback: None,
patience: None,
grad_clip: None,
}
}
}
#[derive(Debug, Clone)]
pub struct OptimizationHistory {
pub parameters: Vec<Vec<f64>>,
pub loss_values: Vec<f64>,
pub gradient_norms: Vec<f64>,
pub iteration_times: Vec<f64>,
pub total_iterations: usize,
pub converged: bool,
}
impl OptimizationHistory {
const fn new() -> Self {
Self {
parameters: Vec::new(),
loss_values: Vec::new(),
gradient_norms: Vec::new(),
iteration_times: Vec::new(),
total_iterations: 0,
converged: false,
}
}
}
struct FisherCache {
matrix: Arc<Mutex<Option<Array2<f64>>>>,
params: Arc<Mutex<Option<Vec<f64>>>>,
threshold: f64,
}
struct OptimizerState {
momentum: FxHashMap<String, f64>,
adam_m: FxHashMap<String, f64>,
adam_v: FxHashMap<String, f64>,
rms_avg: FxHashMap<String, f64>,
iteration: usize,
}
impl VariationalQuantumOptimizer {
pub fn new(method: OptimizationMethod, config: OptimizationConfig) -> Self {
let fisher_cache = match &method {
OptimizationMethod::NaturalGradient { .. } | OptimizationMethod::QNSPSA { .. } => {
Some(FisherCache {
matrix: Arc::new(Mutex::new(None)),
params: Arc::new(Mutex::new(None)),
threshold: 1e-3,
})
}
_ => None,
};
Self {
method,
config,
history: OptimizationHistory::new(),
fisher_cache,
}
}
pub fn optimize(
&mut self,
circuit: &mut VariationalCircuit,
cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + 'static,
) -> QuantRS2Result<OptimizationResult> {
let cost_fn = Arc::new(cost_fn);
match &self.method {
OptimizationMethod::BFGS
| OptimizationMethod::LBFGS { .. }
| OptimizationMethod::ConjugateGradient
| OptimizationMethod::NelderMead
| OptimizationMethod::Powell => self.optimize_with_scirs2(circuit, cost_fn),
_ => self.optimize_custom(circuit, cost_fn),
}
}
fn optimize_with_scirs2(
&mut self,
circuit: &mut VariationalCircuit,
cost_fn: Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
) -> QuantRS2Result<OptimizationResult> {
let param_names = circuit.parameter_names();
let initial_params: Vec<f64> = param_names
.iter()
.map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
.collect();
let circuit_clone = Arc::new(Mutex::new(circuit.clone()));
let param_names_clone = param_names.clone();
let objective = move |params: &scirs2_core::ndarray::ArrayView1<f64>| -> f64 {
let params_slice = match params.as_slice() {
Some(slice) => slice,
None => return f64::INFINITY, };
let mut param_map = FxHashMap::default();
for (name, &value) in param_names_clone.iter().zip(params_slice) {
param_map.insert(name.clone(), value);
}
let mut circuit = circuit_clone.lock().unwrap_or_else(|e| e.into_inner());
if circuit.set_parameters(¶m_map).is_err() {
return f64::INFINITY;
}
cost_fn(&*circuit).unwrap_or(f64::INFINITY)
};
let method = match &self.method {
OptimizationMethod::BFGS | OptimizationMethod::ConjugateGradient => Method::BFGS, OptimizationMethod::LBFGS { memory_size: _ } => Method::LBFGS,
OptimizationMethod::NelderMead => Method::NelderMead,
OptimizationMethod::Powell => Method::Powell,
_ => unreachable!(),
};
let options = Options {
max_iter: self.config.max_iterations,
ftol: self.config.f_tol,
gtol: self.config.g_tol,
xtol: self.config.x_tol,
..Default::default()
};
let start_time = std::time::Instant::now();
let initial_array = scirs2_core::ndarray::Array1::from_vec(initial_params);
let result = minimize(objective, &initial_array, method, Some(options))
.map_err(|e| QuantRS2Error::InvalidInput(format!("Optimization failed: {e:?}")))?;
let mut final_params = FxHashMap::default();
let result_slice = result.x.as_slice().ok_or_else(|| {
QuantRS2Error::RuntimeError("Optimization result array is non-contiguous".to_string())
})?;
for (name, &value) in param_names.iter().zip(result_slice) {
final_params.insert(name.clone(), value);
}
circuit.set_parameters(&final_params)?;
self.history.parameters.push(result.x.to_vec());
self.history.loss_values.push(result.fun);
self.history.total_iterations = result.iterations;
self.history.converged = result.success;
Ok(OptimizationResult {
optimal_parameters: final_params,
final_loss: result.fun,
iterations: result.iterations,
converged: result.success,
optimization_time: start_time.elapsed().as_secs_f64(),
history: self.history.clone(),
})
}
fn optimize_custom(
&mut self,
circuit: &mut VariationalCircuit,
cost_fn: Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
) -> QuantRS2Result<OptimizationResult> {
let mut state = OptimizerState {
momentum: FxHashMap::default(),
adam_m: FxHashMap::default(),
adam_v: FxHashMap::default(),
rms_avg: FxHashMap::default(),
iteration: 0,
};
let param_names = circuit.parameter_names();
let start_time = std::time::Instant::now();
let mut best_loss = f64::INFINITY;
let mut patience_counter = 0;
for iter in 0..self.config.max_iterations {
let iter_start = std::time::Instant::now();
let loss = cost_fn(circuit)?;
if loss < best_loss - self.config.f_tol {
best_loss = loss;
patience_counter = 0;
} else if let Some(patience) = self.config.patience {
patience_counter += 1;
if patience_counter >= patience {
self.history.converged = true;
break;
}
}
let gradients = self.compute_gradients(circuit, &cost_fn)?;
let gradients = if let Some(max_norm) = self.config.grad_clip {
self.clip_gradients(gradients, max_norm)
} else {
gradients
};
self.update_parameters(circuit, &gradients, &mut state)?;
let current_params: Vec<f64> = param_names
.iter()
.map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
.collect();
let grad_norm = gradients.values().map(|g| g * g).sum::<f64>().sqrt();
self.history.parameters.push(current_params);
self.history.loss_values.push(loss);
self.history.gradient_norms.push(grad_norm);
self.history
.iteration_times
.push(iter_start.elapsed().as_secs_f64() * 1000.0);
self.history.total_iterations = iter + 1;
if let Some(callback) = &self.config.callback {
let params: Vec<f64> = param_names
.iter()
.map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
.collect();
callback(¶ms, loss);
}
if grad_norm < self.config.g_tol {
self.history.converged = true;
break;
}
state.iteration += 1;
}
let final_params = circuit.get_parameters();
let final_loss = cost_fn(circuit)?;
Ok(OptimizationResult {
optimal_parameters: final_params,
final_loss,
iterations: self.history.total_iterations,
converged: self.history.converged,
optimization_time: start_time.elapsed().as_secs_f64(),
history: self.history.clone(),
})
}
fn compute_gradients(
&self,
circuit: &VariationalCircuit,
cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
) -> QuantRS2Result<FxHashMap<String, f64>> {
let param_names = circuit.parameter_names();
if self.config.parallel_gradients {
let gradients: Vec<(String, f64)> = param_names
.par_iter()
.map(|param_name| {
let grad = self
.compute_single_gradient(circuit, param_name, cost_fn)
.unwrap_or(0.0);
(param_name.clone(), grad)
})
.collect();
Ok(gradients.into_iter().collect())
} else {
let mut gradients = FxHashMap::default();
for param_name in ¶m_names {
let grad = self.compute_single_gradient(circuit, param_name, cost_fn)?;
gradients.insert(param_name.clone(), grad);
}
Ok(gradients)
}
}
fn compute_single_gradient(
&self,
circuit: &VariationalCircuit,
param_name: &str,
cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
) -> QuantRS2Result<f64> {
match &self.method {
OptimizationMethod::SPSA { c, .. } => {
self.spsa_gradient(circuit, param_name, cost_fn, *c)
}
_ => {
self.parameter_shift_gradient(circuit, param_name, cost_fn)
}
}
}
fn parameter_shift_gradient(
&self,
circuit: &VariationalCircuit,
param_name: &str,
cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
) -> QuantRS2Result<f64> {
let current_params = circuit.get_parameters();
let current_value = *current_params.get(param_name).ok_or_else(|| {
QuantRS2Error::InvalidInput(format!("Parameter {param_name} not found"))
})?;
let mut circuit_plus = circuit.clone();
let mut params_plus = current_params.clone();
params_plus.insert(
param_name.to_string(),
current_value + std::f64::consts::PI / 2.0,
);
circuit_plus.set_parameters(¶ms_plus)?;
let loss_plus = cost_fn(&circuit_plus)?;
let mut circuit_minus = circuit.clone();
let mut params_minus = current_params;
params_minus.insert(
param_name.to_string(),
current_value - std::f64::consts::PI / 2.0,
);
circuit_minus.set_parameters(¶ms_minus)?;
let loss_minus = cost_fn(&circuit_minus)?;
Ok((loss_plus - loss_minus) / 2.0)
}
fn spsa_gradient(
&self,
circuit: &VariationalCircuit,
param_name: &str,
cost_fn: &Arc<dyn Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync>,
epsilon: f64,
) -> QuantRS2Result<f64> {
use scirs2_core::random::prelude::*;
let mut rng = if let Some(seed) = self.config.seed {
StdRng::seed_from_u64(seed)
} else {
StdRng::from_seed(thread_rng().random())
};
let current_params = circuit.get_parameters();
let perturbation = if rng.random::<bool>() {
epsilon
} else {
-epsilon
};
let mut circuit_plus = circuit.clone();
let mut params_plus = current_params.clone();
for (name, value) in &mut params_plus {
if name == param_name {
*value += perturbation;
}
}
circuit_plus.set_parameters(¶ms_plus)?;
let loss_plus = cost_fn(&circuit_plus)?;
let mut circuit_minus = circuit.clone();
let mut params_minus = current_params;
for (name, value) in &mut params_minus {
if name == param_name {
*value -= perturbation;
}
}
circuit_minus.set_parameters(¶ms_minus)?;
let loss_minus = cost_fn(&circuit_minus)?;
Ok((loss_plus - loss_minus) / (2.0 * perturbation))
}
fn clip_gradients(
&self,
mut gradients: FxHashMap<String, f64>,
max_norm: f64,
) -> FxHashMap<String, f64> {
let norm = gradients.values().map(|g| g * g).sum::<f64>().sqrt();
if norm > max_norm {
let scale = max_norm / norm;
for grad in gradients.values_mut() {
*grad *= scale;
}
}
gradients
}
fn update_parameters(
&self,
circuit: &mut VariationalCircuit,
gradients: &FxHashMap<String, f64>,
state: &mut OptimizerState,
) -> QuantRS2Result<()> {
let mut new_params = circuit.get_parameters();
match &self.method {
OptimizationMethod::GradientDescent { learning_rate } => {
for (param_name, &grad) in gradients {
if let Some(value) = new_params.get_mut(param_name) {
*value -= learning_rate * grad;
}
}
}
OptimizationMethod::Momentum {
learning_rate,
momentum,
} => {
for (param_name, &grad) in gradients {
let velocity = state.momentum.entry(param_name.clone()).or_insert(0.0);
*velocity = momentum * *velocity - learning_rate * grad;
if let Some(value) = new_params.get_mut(param_name) {
*value += *velocity;
}
}
}
OptimizationMethod::Adam {
learning_rate,
beta1,
beta2,
epsilon,
} => {
let t = state.iteration as f64 + 1.0;
let lr_t = learning_rate * (1.0 - beta2.powf(t)).sqrt() / (1.0 - beta1.powf(t));
for (param_name, &grad) in gradients {
let m = state.adam_m.entry(param_name.clone()).or_insert(0.0);
let v = state.adam_v.entry(param_name.clone()).or_insert(0.0);
*m = (1.0 - beta1).mul_add(grad, beta1 * *m);
*v = ((1.0 - beta2) * grad).mul_add(grad, beta2 * *v);
if let Some(value) = new_params.get_mut(param_name) {
*value -= lr_t * *m / (v.sqrt() + epsilon);
}
}
}
OptimizationMethod::RMSprop {
learning_rate,
decay_rate,
epsilon,
} => {
for (param_name, &grad) in gradients {
let avg = state.rms_avg.entry(param_name.clone()).or_insert(0.0);
*avg = ((1.0 - decay_rate) * grad).mul_add(grad, decay_rate * *avg);
if let Some(value) = new_params.get_mut(param_name) {
*value -= learning_rate * grad / (avg.sqrt() + epsilon);
}
}
}
OptimizationMethod::NaturalGradient {
learning_rate,
regularization,
} => {
let fisher_inv =
self.compute_fisher_inverse(circuit, gradients, *regularization)?;
let natural_grad = self.apply_fisher_inverse(&fisher_inv, gradients);
for (param_name, &nat_grad) in &natural_grad {
if let Some(value) = new_params.get_mut(param_name) {
*value -= learning_rate * nat_grad;
}
}
}
OptimizationMethod::SPSA { a, alpha, .. } => {
let ak = a / (state.iteration as f64 + 1.0).powf(*alpha);
for (param_name, &grad) in gradients {
if let Some(value) = new_params.get_mut(param_name) {
*value -= ak * grad;
}
}
}
OptimizationMethod::QNSPSA {
learning_rate,
regularization,
..
} => {
let fisher_inv =
self.compute_fisher_inverse(circuit, gradients, *regularization)?;
let natural_grad = self.apply_fisher_inverse(&fisher_inv, gradients);
for (param_name, &nat_grad) in &natural_grad {
if let Some(value) = new_params.get_mut(param_name) {
*value -= learning_rate * nat_grad;
}
}
}
_ => {
return Err(QuantRS2Error::InvalidInput(
"Invalid optimization method".to_string(),
));
}
}
circuit.set_parameters(&new_params)
}
fn compute_fisher_inverse(
&self,
circuit: &VariationalCircuit,
gradients: &FxHashMap<String, f64>,
regularization: f64,
) -> QuantRS2Result<Array2<f64>> {
let param_names: Vec<_> = gradients.keys().cloned().collect();
let n_params = param_names.len();
if let Some(cache) = &self.fisher_cache {
let cached_matrix_opt = cache
.matrix
.lock()
.map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))?;
if let Some(cached_matrix) = cached_matrix_opt.as_ref() {
let cached_params_opt = cache
.params
.lock()
.map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))?;
if let Some(cached_params) = cached_params_opt.as_ref() {
let current_params: Vec<f64> = param_names
.iter()
.map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
.collect();
let diff_norm: f64 = current_params
.iter()
.zip(cached_params.iter())
.map(|(a, b)| (a - b).powi(2))
.sum::<f64>()
.sqrt();
if diff_norm < cache.threshold {
return Ok(cached_matrix.clone());
}
}
}
}
let mut fisher = Array2::zeros((n_params, n_params));
for i in 0..n_params {
for j in i..n_params {
let value = gradients[¶m_names[i]] * gradients[¶m_names[j]];
fisher[[i, j]] = value;
fisher[[j, i]] = value;
}
}
for i in 0..n_params {
fisher[[i, i]] += regularization;
}
let fisher_inv = Self::invert_fisher_matrix(&fisher)?;
if let Some(cache) = &self.fisher_cache {
let current_params: Vec<f64> = param_names
.iter()
.map(|name| circuit.get_parameters().get(name).copied().unwrap_or(0.0))
.collect();
*cache
.matrix
.lock()
.map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))? =
Some(fisher_inv.clone());
*cache
.params
.lock()
.map_err(|e| QuantRS2Error::RuntimeError(format!("Lock poisoned: {e}")))? =
Some(current_params);
}
Ok(fisher_inv)
}
fn apply_fisher_inverse(
&self,
fisher_inv: &Array2<f64>,
gradients: &FxHashMap<String, f64>,
) -> FxHashMap<String, f64> {
let param_names: Vec<_> = gradients.keys().cloned().collect();
let grad_vec: Vec<f64> = param_names.iter().map(|name| gradients[name]).collect();
let grad_array = Array1::from_vec(grad_vec);
let natural_grad = fisher_inv.dot(&grad_array);
let mut result = FxHashMap::default();
for (i, name) in param_names.iter().enumerate() {
result.insert(name.clone(), natural_grad[i]);
}
result
}
fn invert_fisher_matrix(fisher: &Array2<f64>) -> QuantRS2Result<Array2<f64>> {
let n = fisher.nrows();
if n == 0 {
return Ok(Array2::zeros((0, 0)));
}
if n == 1 {
let val = fisher[[0, 0]];
if val.abs() < 1e-14 {
return Err(QuantRS2Error::InvalidInput(
"Fisher matrix is singular (1×1 zero)".to_string(),
));
}
let mut inv_mat = Array2::zeros((1, 1));
inv_mat[[0, 0]] = 1.0 / val;
return Ok(inv_mat);
}
scirs2_linalg::inv(&fisher.view(), None).map_err(|e| {
QuantRS2Error::InvalidInput(format!("Fisher matrix inversion failed: {e:?}"))
})
}
}
#[derive(Debug, Clone)]
pub struct OptimizationResult {
pub optimal_parameters: FxHashMap<String, f64>,
pub final_loss: f64,
pub iterations: usize,
pub converged: bool,
pub optimization_time: f64,
pub history: OptimizationHistory,
}
pub fn create_vqe_optimizer() -> VariationalQuantumOptimizer {
let config = OptimizationConfig {
max_iterations: 200,
f_tol: 1e-10,
g_tol: 1e-10,
parallel_gradients: true,
grad_clip: Some(1.0),
..Default::default()
};
VariationalQuantumOptimizer::new(OptimizationMethod::LBFGS { memory_size: 10 }, config)
}
pub fn create_qaoa_optimizer() -> VariationalQuantumOptimizer {
let config = OptimizationConfig {
max_iterations: 100,
parallel_gradients: true,
..Default::default()
};
VariationalQuantumOptimizer::new(OptimizationMethod::BFGS, config)
}
pub fn create_natural_gradient_optimizer(learning_rate: f64) -> VariationalQuantumOptimizer {
let config = OptimizationConfig {
max_iterations: 100,
parallel_gradients: true,
..Default::default()
};
VariationalQuantumOptimizer::new(
OptimizationMethod::NaturalGradient {
learning_rate,
regularization: 1e-4,
},
config,
)
}
pub fn create_spsa_optimizer() -> VariationalQuantumOptimizer {
let config = OptimizationConfig {
max_iterations: 500,
seed: Some(42),
..Default::default()
};
VariationalQuantumOptimizer::new(
OptimizationMethod::SPSA {
a: 0.1,
c: 0.1,
alpha: 0.602,
gamma: 0.101,
},
config,
)
}
pub struct ConstrainedVariationalOptimizer {
base_optimizer: VariationalQuantumOptimizer,
constraints: Vec<Constraint>,
}
#[derive(Clone)]
pub struct Constraint {
pub function: Arc<dyn Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync>,
pub constraint_type: ConstraintType,
pub value: f64,
}
#[derive(Debug, Clone, Copy)]
pub enum ConstraintType {
Eq,
Ineq,
}
impl ConstrainedVariationalOptimizer {
pub const fn new(base_optimizer: VariationalQuantumOptimizer) -> Self {
Self {
base_optimizer,
constraints: Vec::new(),
}
}
pub fn add_equality_constraint(
&mut self,
constraint_fn: impl Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync + 'static,
value: f64,
) {
self.constraints.push(Constraint {
function: Arc::new(constraint_fn),
constraint_type: ConstraintType::Eq,
value,
});
}
pub fn add_inequality_constraint(
&mut self,
constraint_fn: impl Fn(&FxHashMap<String, f64>) -> f64 + Send + Sync + 'static,
value: f64,
) {
self.constraints.push(Constraint {
function: Arc::new(constraint_fn),
constraint_type: ConstraintType::Ineq,
value,
});
}
pub fn optimize(
&mut self,
circuit: &mut VariationalCircuit,
cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + 'static,
) -> QuantRS2Result<OptimizationResult> {
if self.constraints.is_empty() {
return self.base_optimizer.optimize(circuit, cost_fn);
}
let cost_fn = Arc::new(cost_fn);
let constraints = self.constraints.clone();
let penalty_weight = 1000.0;
let penalized_cost = move |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
let base_cost = cost_fn(circuit)?;
let params = circuit.get_parameters();
let mut penalty = 0.0;
for constraint in &constraints {
let constraint_value = (constraint.function)(¶ms);
match constraint.constraint_type {
ConstraintType::Eq => {
penalty += penalty_weight * (constraint_value - constraint.value).powi(2);
}
ConstraintType::Ineq => {
if constraint_value > constraint.value {
penalty +=
penalty_weight * (constraint_value - constraint.value).powi(2);
}
}
}
}
Ok(base_cost + penalty)
};
self.base_optimizer.optimize(circuit, penalized_cost)
}
}
pub struct HyperparameterOptimizer {
search_space: FxHashMap<String, (f64, f64)>,
n_trials: usize,
inner_method: OptimizationMethod,
}
impl HyperparameterOptimizer {
pub fn new(n_trials: usize) -> Self {
Self {
search_space: FxHashMap::default(),
n_trials,
inner_method: OptimizationMethod::BFGS,
}
}
pub fn add_hyperparameter(&mut self, name: String, min_value: f64, max_value: f64) {
self.search_space.insert(name, (min_value, max_value));
}
pub fn optimize(
&self,
circuit_builder: impl Fn(&FxHashMap<String, f64>) -> VariationalCircuit + Send + Sync,
cost_fn: impl Fn(&VariationalCircuit) -> QuantRS2Result<f64> + Send + Sync + Clone + 'static,
) -> QuantRS2Result<HyperparameterResult> {
use scirs2_core::random::prelude::*;
let mut rng = StdRng::from_seed(thread_rng().random());
let mut best_hyperparams = FxHashMap::default();
let mut best_loss = f64::INFINITY;
let mut all_trials = Vec::new();
for _trial in 0..self.n_trials {
let mut hyperparams = FxHashMap::default();
for (name, &(min_val, max_val)) in &self.search_space {
let value = rng.random_range(min_val..max_val);
hyperparams.insert(name.clone(), value);
}
let mut circuit = circuit_builder(&hyperparams);
let config = OptimizationConfig {
max_iterations: 50,
..Default::default()
};
let mut optimizer = VariationalQuantumOptimizer::new(self.inner_method.clone(), config);
let result = optimizer.optimize(&mut circuit, cost_fn.clone())?;
all_trials.push(HyperparameterTrial {
hyperparameters: hyperparams.clone(),
final_loss: result.final_loss,
optimal_parameters: result.optimal_parameters,
});
if result.final_loss < best_loss {
best_loss = result.final_loss;
best_hyperparams = hyperparams;
}
}
Ok(HyperparameterResult {
best_hyperparameters: best_hyperparams,
best_loss,
all_trials,
})
}
}
#[derive(Debug, Clone)]
pub struct HyperparameterResult {
pub best_hyperparameters: FxHashMap<String, f64>,
pub best_loss: f64,
pub all_trials: Vec<HyperparameterTrial>,
}
#[derive(Debug, Clone)]
pub struct HyperparameterTrial {
pub hyperparameters: FxHashMap<String, f64>,
pub final_loss: f64,
pub optimal_parameters: FxHashMap<String, f64>,
}
impl Clone for VariationalCircuit {
fn clone(&self) -> Self {
Self {
gates: self.gates.clone(),
param_map: self.param_map.clone(),
num_qubits: self.num_qubits,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::qubit::QubitId;
use crate::variational::VariationalGate;
#[test]
fn test_gradient_descent_optimizer() {
let mut circuit = VariationalCircuit::new(1);
circuit.add_gate(VariationalGate::rx(QubitId(0), "theta".to_string(), 0.0));
let config = OptimizationConfig {
max_iterations: 10,
..Default::default()
};
let mut optimizer = VariationalQuantumOptimizer::new(
OptimizationMethod::GradientDescent { learning_rate: 0.1 },
config,
);
let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
let theta = circuit
.get_parameters()
.get("theta")
.copied()
.unwrap_or(0.0);
Ok((theta - 1.0).powi(2))
};
let result = optimizer
.optimize(&mut circuit, cost_fn)
.expect("Optimization should succeed");
assert!(result.converged || result.iterations == 10);
assert!((result.optimal_parameters["theta"] - 1.0).abs() < 0.1);
}
#[test]
fn test_adam_optimizer() {
let mut circuit = VariationalCircuit::new(2);
circuit.add_gate(VariationalGate::ry(QubitId(0), "alpha".to_string(), 0.5));
circuit.add_gate(VariationalGate::rz(QubitId(1), "beta".to_string(), 0.5));
let config = OptimizationConfig {
max_iterations: 100,
f_tol: 1e-6,
g_tol: 1e-6,
..Default::default()
};
let mut optimizer = VariationalQuantumOptimizer::new(
OptimizationMethod::Adam {
learning_rate: 0.1,
beta1: 0.9,
beta2: 0.999,
epsilon: 1e-8,
},
config,
);
let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
let params = circuit.get_parameters();
let alpha = params.get("alpha").copied().unwrap_or(0.0);
let beta = params.get("beta").copied().unwrap_or(0.0);
Ok(alpha.powi(2) + beta.powi(2))
};
let result = optimizer
.optimize(&mut circuit, cost_fn)
.expect("Optimization should succeed");
assert!(result.optimal_parameters["alpha"].abs() < 0.1);
assert!(result.optimal_parameters["beta"].abs() < 0.1);
}
#[test]
fn test_constrained_optimization() {
let mut circuit = VariationalCircuit::new(1);
circuit.add_gate(VariationalGate::rx(QubitId(0), "x".to_string(), 2.0));
let base_optimizer =
VariationalQuantumOptimizer::new(OptimizationMethod::BFGS, Default::default());
let mut constrained_opt = ConstrainedVariationalOptimizer::new(base_optimizer);
constrained_opt
.add_inequality_constraint(|params| 1.0 - params.get("x").copied().unwrap_or(0.0), 0.0);
let cost_fn = |circuit: &VariationalCircuit| -> QuantRS2Result<f64> {
let x = circuit.get_parameters().get("x").copied().unwrap_or(0.0);
Ok(x.powi(2))
};
let result = constrained_opt
.optimize(&mut circuit, cost_fn)
.expect("Constrained optimization should succeed");
let optimized_x = result.optimal_parameters["x"];
assert!(optimized_x >= 1.0 - 1e-6);
assert!(optimized_x <= 2.0 + 1e-6);
}
}