use crate::embedding::Embedding;
use crate::ising::{IsingError, IsingModel, IsingResult, QuboModel};
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct FluxBiasConfig {
pub initial_flux_bias: f64,
pub max_flux_bias: f64,
pub step_size: f64,
pub num_samples: usize,
pub use_gradients: bool,
pub learning_rate: f64,
pub regularization: f64,
}
impl Default for FluxBiasConfig {
fn default() -> Self {
Self {
initial_flux_bias: 0.0,
max_flux_bias: 0.1, step_size: 0.01,
num_samples: 100,
use_gradients: true,
learning_rate: 0.01,
regularization: 0.001,
}
}
}
#[derive(Debug, Clone)]
pub struct FluxBiasResult {
pub flux_biases: HashMap<usize, f64>,
pub energy_improvement: f64,
pub iterations: usize,
pub solution_quality: f64,
pub calibration_corrections: HashMap<usize, f64>,
}
pub struct FluxBiasOptimizer {
config: FluxBiasConfig,
calibration_data: Option<CalibrationData>,
adjustment_history: Vec<HashMap<usize, f64>>,
}
#[derive(Debug, Clone)]
pub struct CalibrationData {
pub nominal_biases: HashMap<usize, f64>,
pub bias_errors: HashMap<usize, f64>,
pub coupling_errors: HashMap<(usize, usize), f64>,
pub qubit_temperatures: HashMap<usize, f64>,
}
impl FluxBiasOptimizer {
#[must_use]
pub const fn new(config: FluxBiasConfig) -> Self {
Self {
config,
calibration_data: None,
adjustment_history: Vec::new(),
}
}
pub fn set_calibration_data(&mut self, data: CalibrationData) {
self.calibration_data = Some(data);
}
pub fn optimize_ising(
&mut self,
model: &IsingModel,
embedding: &Embedding,
samples: &[Vec<i8>],
) -> IsingResult<FluxBiasResult> {
let mut result = FluxBiasResult {
flux_biases: HashMap::new(),
energy_improvement: 0.0,
iterations: 0,
solution_quality: 0.0,
calibration_corrections: HashMap::new(),
};
let hardware_qubits = self.get_hardware_qubits(embedding);
for qubit in hardware_qubits {
result
.flux_biases
.insert(qubit, self.config.initial_flux_bias);
}
if let Some(calibration) = &self.calibration_data {
self.apply_calibration_corrections(&mut result.flux_biases, calibration);
result
.calibration_corrections
.clone_from(&result.flux_biases);
}
let initial_energy = self.compute_average_energy(model, samples)?;
if self.config.use_gradients {
result = self.optimize_with_gradients(model, embedding, samples, result)?;
} else {
result = self.optimize_grid_search(model, embedding, samples, result)?;
}
let final_energy =
self.compute_average_energy_with_flux(model, samples, &result.flux_biases)?;
result.energy_improvement = initial_energy - final_energy;
result.solution_quality = self.compute_solution_quality(samples, embedding);
Ok(result)
}
pub fn optimize_qubo(
&mut self,
model: &QuboModel,
embedding: &Embedding,
samples: &[Vec<i8>],
) -> IsingResult<FluxBiasResult> {
let (ising, _offset) = model.to_ising();
self.optimize_ising(&ising, embedding, samples)
}
fn get_hardware_qubits(&self, embedding: &Embedding) -> Vec<usize> {
let mut hardware_qubits = Vec::new();
for chain in embedding.chains.values() {
for &qubit in chain {
if !hardware_qubits.contains(&qubit) {
hardware_qubits.push(qubit);
}
}
}
hardware_qubits.sort_unstable();
hardware_qubits
}
fn apply_calibration_corrections(
&self,
flux_biases: &mut HashMap<usize, f64>,
calibration: &CalibrationData,
) {
for (qubit, flux_bias) in flux_biases.iter_mut() {
if let Some(&error) = calibration.bias_errors.get(qubit) {
*flux_bias -= error * 0.5; }
if let Some(&temp) = calibration.qubit_temperatures.get(qubit) {
let temp_correction = (temp - 15.0) * 0.001; *flux_bias += temp_correction;
}
}
}
fn optimize_with_gradients(
&mut self,
model: &IsingModel,
embedding: &Embedding,
samples: &[Vec<i8>],
mut result: FluxBiasResult,
) -> IsingResult<FluxBiasResult> {
let max_iterations = 50;
let tolerance = 1e-6;
for iteration in 0..max_iterations {
result.iterations = iteration + 1;
let gradients =
self.compute_gradients(model, embedding, samples, &result.flux_biases)?;
let mut converged = true;
for (qubit, gradient) in gradients {
if let Some(flux_bias) = result.flux_biases.get_mut(&qubit) {
let update = -self.config.learning_rate * gradient;
if update.abs() > tolerance {
converged = false;
}
*flux_bias = (*flux_bias + update)
.max(-self.config.max_flux_bias)
.min(self.config.max_flux_bias);
}
}
self.adjustment_history.push(result.flux_biases.clone());
if converged {
break;
}
}
Ok(result)
}
fn optimize_grid_search(
&self,
model: &IsingModel,
embedding: &Embedding,
samples: &[Vec<i8>],
mut result: FluxBiasResult,
) -> IsingResult<FluxBiasResult> {
let hardware_qubits = self.get_hardware_qubits(embedding);
let mut best_energy = f64::INFINITY;
let mut best_flux_biases = result.flux_biases.clone();
for _ in 0..10 {
result.iterations += 1;
let mut improved = false;
for &qubit in &hardware_qubits {
let current_flux = result.flux_biases.get(&qubit).copied().unwrap_or(0.0);
for delta in [-self.config.step_size, 0.0, self.config.step_size] {
let new_flux = (current_flux + delta)
.max(-self.config.max_flux_bias)
.min(self.config.max_flux_bias);
result.flux_biases.insert(qubit, new_flux);
let energy =
self.compute_average_energy_with_flux(model, samples, &result.flux_biases)?;
if energy < best_energy {
best_energy = energy;
best_flux_biases.clone_from(&result.flux_biases);
improved = true;
}
}
if let Some(&best_flux) = best_flux_biases.get(&qubit) {
result.flux_biases.insert(qubit, best_flux);
}
}
if !improved {
break;
}
}
result.flux_biases = best_flux_biases;
Ok(result)
}
fn compute_gradients(
&self,
model: &IsingModel,
embedding: &Embedding,
samples: &[Vec<i8>],
flux_biases: &HashMap<usize, f64>,
) -> IsingResult<HashMap<usize, f64>> {
let mut gradients = HashMap::new();
let epsilon = 0.001;
for (qubit, ¤t_flux) in flux_biases {
let mut flux_plus = flux_biases.clone();
flux_plus.insert(*qubit, current_flux + epsilon);
let energy_plus = self.compute_average_energy_with_flux(model, samples, &flux_plus)?;
let mut flux_minus = flux_biases.clone();
flux_minus.insert(*qubit, current_flux - epsilon);
let energy_minus =
self.compute_average_energy_with_flux(model, samples, &flux_minus)?;
let gradient = self
.config
.regularization
.mul_add(current_flux, (energy_plus - energy_minus) / (2.0 * epsilon));
gradients.insert(*qubit, gradient);
}
Ok(gradients)
}
fn compute_average_energy(&self, model: &IsingModel, samples: &[Vec<i8>]) -> IsingResult<f64> {
let mut total_energy = 0.0;
let mut valid_samples = 0;
for sample in samples {
match model.energy(sample) {
Ok(energy) => {
total_energy += energy;
valid_samples += 1;
}
Err(_) => continue,
}
}
if valid_samples == 0 {
return Err(IsingError::InvalidValue("No valid samples".to_string()));
}
Ok(total_energy / f64::from(valid_samples))
}
fn compute_average_energy_with_flux(
&self,
model: &IsingModel,
samples: &[Vec<i8>],
flux_biases: &HashMap<usize, f64>,
) -> IsingResult<f64> {
let mut total_energy = 0.0;
let mut valid_samples = 0;
for sample in samples {
match model.energy(sample) {
Ok(mut energy) => {
for (qubit, &flux_bias) in flux_biases {
if *qubit < sample.len() {
energy += flux_bias * f64::from(sample[*qubit]);
}
}
total_energy += energy;
valid_samples += 1;
}
Err(_) => continue,
}
}
if valid_samples == 0 {
return Err(IsingError::InvalidValue("No valid samples".to_string()));
}
Ok(total_energy / f64::from(valid_samples))
}
fn compute_solution_quality(&self, samples: &[Vec<i8>], embedding: &Embedding) -> f64 {
let mut chain_satisfaction = 0.0;
let mut total_chains = 0;
for sample in samples {
for chain in embedding.chains.values() {
if chain.len() > 1 {
total_chains += 1;
let first_val = sample[chain[0]];
let satisfied = chain[1..].iter().all(|&q| sample[q] == first_val);
if satisfied {
chain_satisfaction += 1.0;
}
}
}
}
if total_chains > 0 {
chain_satisfaction / f64::from(total_chains)
} else {
1.0
}
}
}
pub struct MLFluxBiasOptimizer {
base_optimizer: FluxBiasOptimizer,
learned_patterns: HashMap<String, Vec<f64>>,
pattern_threshold: f64,
}
impl MLFluxBiasOptimizer {
#[must_use]
pub fn new(config: FluxBiasConfig) -> Self {
Self {
base_optimizer: FluxBiasOptimizer::new(config),
learned_patterns: HashMap::new(),
pattern_threshold: 0.8,
}
}
pub fn learn_pattern(&mut self, problem_type: &str, flux_biases: &HashMap<usize, f64>) {
let pattern: Vec<f64> = flux_biases.values().copied().collect();
self.learned_patterns
.insert(problem_type.to_string(), pattern);
}
#[must_use]
pub fn apply_learned_patterns(
&self,
problem_type: &str,
num_qubits: usize,
) -> Option<HashMap<usize, f64>> {
if let Some(pattern) = self.learned_patterns.get(problem_type) {
let mut flux_biases = HashMap::new();
for (i, &value) in pattern.iter().take(num_qubits).enumerate() {
flux_biases.insert(i, value);
}
Some(flux_biases)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_flux_bias_optimizer_creation() {
let config = FluxBiasConfig::default();
let optimizer = FluxBiasOptimizer::new(config);
assert!(optimizer.adjustment_history.is_empty());
}
#[test]
fn test_calibration_data() {
let mut calibration = CalibrationData {
nominal_biases: HashMap::new(),
bias_errors: HashMap::new(),
coupling_errors: HashMap::new(),
qubit_temperatures: HashMap::new(),
};
calibration.bias_errors.insert(0, 0.01);
calibration.qubit_temperatures.insert(0, 16.0);
assert_eq!(calibration.bias_errors.get(&0), Some(&0.01));
}
#[test]
fn test_flux_bias_bounds() {
let config = FluxBiasConfig {
max_flux_bias: 0.1,
..Default::default()
};
assert!(config.max_flux_bias > 0.0);
assert!(config.max_flux_bias < 1.0); }
}