use scirs2_core::ndarray::{Array1, Array2, Axis};
use scirs2_core::Complex64;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex};
use crate::circuit_interfaces::CircuitInterface;
use crate::concatenated_error_correction::ErrorType;
use crate::error::Result;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MLModelType {
NeuralNetwork,
DecisionTree,
SVM,
ReinforcementLearning,
Ensemble,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LearningStrategy {
Supervised,
Unsupervised,
Reinforcement,
Online,
Transfer,
}
#[derive(Debug, Clone)]
pub struct AdaptiveMLConfig {
pub model_type: MLModelType,
pub learning_strategy: LearningStrategy,
pub learning_rate: f64,
pub batch_size: usize,
pub max_history_size: usize,
pub confidence_threshold: f64,
pub real_time_learning: bool,
pub update_frequency: usize,
pub feature_extraction: FeatureExtractionMethod,
pub hardware_aware: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FeatureExtractionMethod {
RawSyndrome,
FourierTransform,
PCA,
Autoencoder,
TemporalConvolution,
}
impl Default for AdaptiveMLConfig {
fn default() -> Self {
Self {
model_type: MLModelType::NeuralNetwork,
learning_strategy: LearningStrategy::Online,
learning_rate: 0.001,
batch_size: 32,
max_history_size: 10_000,
confidence_threshold: 0.8,
real_time_learning: true,
update_frequency: 100,
feature_extraction: FeatureExtractionMethod::RawSyndrome,
hardware_aware: true,
}
}
}
#[derive(Debug, Clone)]
pub struct SyndromeClassificationNetwork {
input_size: usize,
hidden_sizes: Vec<usize>,
output_size: usize,
weights: Vec<Array2<f64>>,
biases: Vec<Array1<f64>>,
learning_rate: f64,
training_history: Vec<(Array1<f64>, Array1<f64>)>,
}
impl SyndromeClassificationNetwork {
#[must_use]
pub fn new(
input_size: usize,
hidden_sizes: Vec<usize>,
output_size: usize,
learning_rate: f64,
) -> Self {
let mut layer_sizes = vec![input_size];
layer_sizes.extend(&hidden_sizes);
layer_sizes.push(output_size);
let mut weights = Vec::new();
let mut biases = Vec::new();
for i in 0..layer_sizes.len() - 1 {
let rows = layer_sizes[i + 1];
let cols = layer_sizes[i];
let scale = (2.0 / (rows + cols) as f64).sqrt();
let mut weight_matrix = Array2::zeros((rows, cols));
for elem in &mut weight_matrix {
*elem = (fastrand::f64() - 0.5) * 2.0 * scale;
}
weights.push(weight_matrix);
biases.push(Array1::zeros(rows));
}
Self {
input_size,
hidden_sizes,
output_size,
weights,
biases,
learning_rate,
training_history: Vec::new(),
}
}
#[must_use]
pub fn forward(&self, input: &Array1<f64>) -> Array1<f64> {
let mut activation = input.clone();
let last_weight = self.weights.last();
for (weight, bias) in self.weights.iter().zip(self.biases.iter()) {
activation = weight.dot(&activation) + bias;
let is_output_layer = last_weight.map_or(false, |last| weight == last);
if is_output_layer {
let max_val = activation.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
activation.mapv_inplace(|x| (x - max_val).exp());
let sum = activation.sum();
activation.mapv_inplace(|x| x / sum);
} else {
activation.mapv_inplace(|x| x.max(0.0));
}
}
activation
}
pub fn train_batch(&mut self, inputs: &[Array1<f64>], targets: &[Array1<f64>]) -> f64 {
let batch_size = inputs.len();
let mut total_loss = 0.0;
let mut weight_gradients: Vec<Array2<f64>> = self
.weights
.iter()
.map(|w| Array2::zeros(w.raw_dim()))
.collect();
let mut bias_gradients: Vec<Array1<f64>> = self
.biases
.iter()
.map(|b| Array1::zeros(b.raw_dim()))
.collect();
for (input, target) in inputs.iter().zip(targets.iter()) {
let (loss, w_grads, b_grads) = self.backward(input, target);
total_loss += loss;
for (wg_acc, wg) in weight_gradients.iter_mut().zip(w_grads.iter()) {
*wg_acc = &*wg_acc + wg;
}
for (bg_acc, bg) in bias_gradients.iter_mut().zip(b_grads.iter()) {
*bg_acc = &*bg_acc + bg;
}
}
let lr = self.learning_rate / batch_size as f64;
for (weight, gradient) in self.weights.iter_mut().zip(weight_gradients.iter()) {
*weight = &*weight - &(gradient * lr);
}
for (bias, gradient) in self.biases.iter_mut().zip(bias_gradients.iter()) {
*bias = &*bias - &(gradient * lr);
}
total_loss / batch_size as f64
}
fn backward(
&self,
input: &Array1<f64>,
target: &Array1<f64>,
) -> (f64, Vec<Array2<f64>>, Vec<Array1<f64>>) {
let mut activations = vec![input.clone()];
let mut z_values = Vec::new();
let last_weight = self.weights.last();
for (weight, bias) in self.weights.iter().zip(self.biases.iter()) {
let last_activation = activations
.last()
.expect("activations should never be empty");
let z = weight.dot(last_activation) + bias;
z_values.push(z.clone());
let mut activation = z;
let is_output_layer = last_weight.map_or(false, |last| weight == last);
if is_output_layer {
let max_val = activation.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
activation.mapv_inplace(|x| (x - max_val).exp());
let sum = activation.sum();
activation.mapv_inplace(|x| x / sum);
} else {
activation.mapv_inplace(|x| x.max(0.0)); }
activations.push(activation);
}
let output = activations
.last()
.expect("activations should have output from forward pass");
let loss = -target
.iter()
.zip(output.iter())
.map(|(&t, &o)| if t > 0.0 { t * o.ln() } else { 0.0 })
.sum::<f64>();
let mut weight_gradients = Vec::with_capacity(self.weights.len());
let mut bias_gradients = Vec::with_capacity(self.biases.len());
let mut delta = output - target;
for i in (0..self.weights.len()).rev() {
let weight_grad = delta
.view()
.insert_axis(Axis(1))
.dot(&activations[i].view().insert_axis(Axis(0)));
weight_gradients.insert(0, weight_grad);
bias_gradients.insert(0, delta.clone());
if i > 0 {
delta = self.weights[i].t().dot(&delta);
for (j, &z) in z_values[i - 1].iter().enumerate() {
if z <= 0.0 {
delta[j] = 0.0;
}
}
}
}
(loss, weight_gradients, bias_gradients)
}
#[must_use]
pub fn predict(&self, syndrome: &Array1<f64>) -> (usize, f64) {
let output = self.forward(syndrome);
let max_idx = output
.iter()
.enumerate()
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0);
let confidence = output.get(max_idx).copied().unwrap_or(0.0);
(max_idx, confidence)
}
}
#[derive(Debug, Clone)]
pub struct ErrorCorrectionAgent {
q_table: HashMap<String, Array1<f64>>,
learning_rate: f64,
discount_factor: f64,
epsilon: f64,
action_space_size: usize,
training_steps: usize,
episode_rewards: VecDeque<f64>,
}
impl ErrorCorrectionAgent {
#[must_use]
pub fn new(
action_space_size: usize,
learning_rate: f64,
discount_factor: f64,
epsilon: f64,
) -> Self {
Self {
q_table: HashMap::new(),
learning_rate,
discount_factor,
epsilon,
action_space_size,
training_steps: 0,
episode_rewards: VecDeque::with_capacity(1000),
}
}
pub fn select_action(&mut self, state: &str) -> usize {
if fastrand::f64() < self.epsilon {
fastrand::usize(0..self.action_space_size)
} else {
let q_values = self
.q_table
.entry(state.to_string())
.or_insert_with(|| Array1::zeros(self.action_space_size));
q_values
.iter()
.enumerate()
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
.map(|(idx, _)| idx)
.unwrap_or(0)
}
}
pub fn update_q_value(
&mut self,
state: &str,
action: usize,
reward: f64,
next_state: &str,
done: bool,
) {
let current_q = self
.q_table
.entry(state.to_string())
.or_insert_with(|| Array1::zeros(self.action_space_size))
.clone();
let next_q_max = if done {
0.0
} else {
let next_q_values = self
.q_table
.entry(next_state.to_string())
.or_insert_with(|| Array1::zeros(self.action_space_size));
next_q_values
.iter()
.fold(f64::NEG_INFINITY, |a, &b| a.max(b))
};
let td_target = self.discount_factor.mul_add(next_q_max, reward);
let current_q_action = current_q.get(action).copied().unwrap_or(0.0);
let td_error = td_target - current_q_action;
if let Some(q_values) = self.q_table.get_mut(state) {
if action < q_values.len() {
q_values[action] += self.learning_rate * td_error;
}
}
self.training_steps += 1;
if self.training_steps % 1000 == 0 {
self.epsilon = (self.epsilon * 0.995).max(0.01);
}
}
#[must_use]
pub fn calculate_reward(
&self,
errors_before: usize,
errors_after: usize,
correction_cost: f64,
) -> f64 {
let error_reduction = errors_before as f64 - errors_after as f64;
let reward = error_reduction.mul_add(10.0, -correction_cost);
if errors_after == 0 {
reward + 5.0
} else {
reward
}
}
}
pub struct AdaptiveMLErrorCorrection {
config: AdaptiveMLConfig,
classifier: SyndromeClassificationNetwork,
rl_agent: ErrorCorrectionAgent,
feature_extractor: FeatureExtractor,
training_history: Arc<Mutex<VecDeque<TrainingExample>>>,
metrics: CorrectionMetrics,
circuit_interface: CircuitInterface,
update_counter: usize,
}
#[derive(Debug, Clone)]
pub struct TrainingExample {
pub syndrome: Array1<f64>,
pub error_type: ErrorType,
pub action: usize,
pub reward: f64,
pub timestamp: f64,
}
#[derive(Debug, Clone)]
pub struct FeatureExtractor {
method: FeatureExtractionMethod,
pca_components: Option<Array2<f64>>,
autoencoder: Option<SyndromeClassificationNetwork>,
}
impl FeatureExtractor {
#[must_use]
pub const fn new(method: FeatureExtractionMethod) -> Self {
Self {
method,
pca_components: None,
autoencoder: None,
}
}
#[must_use]
pub fn extract_features(&self, syndrome: &[bool]) -> Array1<f64> {
match self.method {
FeatureExtractionMethod::RawSyndrome => {
let mut features: Vec<f64> = syndrome
.iter()
.map(|&b| if b { 1.0 } else { 0.0 })
.collect();
while features.len() < 4 {
features.push(0.0);
}
Array1::from_vec(features)
}
FeatureExtractionMethod::FourierTransform => self.fft_features(syndrome),
FeatureExtractionMethod::PCA => self.pca_features(syndrome),
FeatureExtractionMethod::Autoencoder => self.autoencoder_features(syndrome),
FeatureExtractionMethod::TemporalConvolution => self.temporal_conv_features(syndrome),
}
}
fn fft_features(&self, syndrome: &[bool]) -> Array1<f64> {
let mut signal: Vec<f64> = syndrome
.iter()
.map(|&b| if b { 1.0 } else { 0.0 })
.collect();
while signal.len() < 4 {
signal.push(0.0);
}
let mut features = Vec::new();
let n = signal.len();
for k in 0..n.min(8) {
let mut real_part = 0.0;
let mut imag_part = 0.0;
for (i, &x) in signal.iter().enumerate() {
let angle = -2.0 * std::f64::consts::PI * k as f64 * i as f64 / n as f64;
real_part += x * angle.cos();
imag_part += x * angle.sin();
}
features.push(real_part);
features.push(imag_part);
}
Array1::from_vec(features)
}
fn pca_features(&self, syndrome: &[bool]) -> Array1<f64> {
let mut features: Vec<f64> = syndrome
.iter()
.map(|&b| if b { 1.0 } else { 0.0 })
.collect();
while features.len() < 4 {
features.push(0.0);
}
let raw_features = Array1::from_vec(features);
if let Some(ref components) = self.pca_components {
components.dot(&raw_features)
} else {
raw_features
}
}
fn autoencoder_features(&self, syndrome: &[bool]) -> Array1<f64> {
let mut features: Vec<f64> = syndrome
.iter()
.map(|&b| if b { 1.0 } else { 0.0 })
.collect();
while features.len() < 4 {
features.push(0.0);
}
let raw_features = Array1::from_vec(features);
if let Some(ref encoder) = self.autoencoder {
encoder.forward(&raw_features)
} else {
raw_features
}
}
fn temporal_conv_features(&self, syndrome: &[bool]) -> Array1<f64> {
let mut signal: Vec<f64> = syndrome
.iter()
.map(|&b| if b { 1.0 } else { 0.0 })
.collect();
while signal.len() < 4 {
signal.push(0.0);
}
let kernel_size = 3;
let mut features = Vec::new();
for i in 0..signal.len().saturating_sub(kernel_size - 1) {
let mut conv_sum = 0.0;
for j in 0..kernel_size {
conv_sum += signal[i + j] * (j as f64 + 1.0) / kernel_size as f64;
}
features.push(conv_sum);
}
if features.is_empty() {
features = signal; }
Array1::from_vec(features)
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct CorrectionMetrics {
pub total_corrections: usize,
pub successful_corrections: usize,
pub false_positives: usize,
pub false_negatives: usize,
pub average_confidence: f64,
pub learning_curve: Vec<f64>,
pub reward_history: Vec<f64>,
pub avg_correction_time_ms: f64,
}
impl CorrectionMetrics {
#[must_use]
pub fn accuracy(&self) -> f64 {
if self.total_corrections == 0 {
return 1.0;
}
self.successful_corrections as f64 / self.total_corrections as f64
}
#[must_use]
pub fn precision(&self) -> f64 {
let true_positives = self.successful_corrections;
let predicted_positives = true_positives + self.false_positives;
if predicted_positives == 0 {
return 1.0;
}
true_positives as f64 / predicted_positives as f64
}
#[must_use]
pub fn recall(&self) -> f64 {
let true_positives = self.successful_corrections;
let actual_positives = true_positives + self.false_negatives;
if actual_positives == 0 {
return 1.0;
}
true_positives as f64 / actual_positives as f64
}
#[must_use]
pub fn f1_score(&self) -> f64 {
let precision = self.precision();
let recall = self.recall();
if precision + recall == 0.0 {
return 0.0;
}
2.0 * precision * recall / (precision + recall)
}
}
impl AdaptiveMLErrorCorrection {
pub fn new(config: AdaptiveMLConfig) -> Result<Self> {
let circuit_interface = CircuitInterface::new(Default::default())?;
let feature_extractor = FeatureExtractor::new(config.feature_extraction);
let test_syndrome = vec![false, false, false, false]; let test_features = feature_extractor.extract_features(&test_syndrome);
let input_size = test_features.len();
let hidden_sizes = vec![input_size * 2, input_size]; let output_size = 4; let classifier = SyndromeClassificationNetwork::new(
input_size,
hidden_sizes,
output_size,
config.learning_rate,
);
let action_space_size = 8; let rl_agent = ErrorCorrectionAgent::new(
action_space_size,
config.learning_rate,
0.99, 0.1, );
let training_history =
Arc::new(Mutex::new(VecDeque::with_capacity(config.max_history_size)));
Ok(Self {
config,
classifier,
rl_agent,
feature_extractor,
training_history,
metrics: CorrectionMetrics::default(),
circuit_interface,
update_counter: 0,
})
}
pub fn correct_errors_adaptive(
&mut self,
state: &mut Array1<Complex64>,
syndrome: &[bool],
) -> Result<AdaptiveCorrectionResult> {
let start_time = std::time::Instant::now();
let features = self.feature_extractor.extract_features(syndrome);
let (predicted_error_class, confidence) = self.classifier.predict(&features);
let predicted_error_type = self.class_to_error_type(predicted_error_class);
let state_repr = self.syndrome_to_string(syndrome);
let action = self.rl_agent.select_action(&state_repr);
let errors_before = self.count_errors(state, syndrome);
let correction_applied = if confidence >= self.config.confidence_threshold {
self.apply_ml_correction(state, predicted_error_type, action)?;
true
} else {
self.apply_classical_correction(state, syndrome)?;
false
};
let errors_after = self.count_errors(state, syndrome);
let reward = self
.rl_agent
.calculate_reward(errors_before, errors_after, 1.0);
let next_state_repr = self.state_to_string(state);
self.rl_agent.update_q_value(
&state_repr,
action,
reward,
&next_state_repr,
errors_after == 0,
);
if self.config.real_time_learning {
let training_example = TrainingExample {
syndrome: features,
error_type: predicted_error_type,
action,
reward,
timestamp: start_time.elapsed().as_secs_f64(),
};
if let Ok(mut history) = self.training_history.lock() {
history.push_back(training_example);
if history.len() > self.config.max_history_size {
history.pop_front();
}
}
}
self.update_metrics(errors_before, errors_after, confidence, reward);
self.update_counter += 1;
if self.update_counter % self.config.update_frequency == 0 {
self.retrain_models()?;
}
let processing_time = start_time.elapsed().as_secs_f64() * 1000.0;
Ok(AdaptiveCorrectionResult {
predicted_error_type,
confidence,
correction_applied,
errors_corrected: errors_before.saturating_sub(errors_after),
reward,
processing_time_ms: processing_time,
rl_action: action,
})
}
fn apply_ml_correction(
&self,
state: &mut Array1<Complex64>,
error_type: ErrorType,
action: usize,
) -> Result<()> {
match action {
0 => {
self.apply_single_qubit_correction(state, error_type, 0)?;
}
1 => {
self.apply_two_qubit_correction(state, error_type, 0, 1)?;
}
2 => {
self.apply_syndrome_based_correction(state, error_type)?;
}
3 => {
self.apply_probabilistic_correction(state, error_type)?;
}
_ => {
self.apply_single_qubit_correction(state, error_type, 0)?;
}
}
Ok(())
}
fn apply_single_qubit_correction(
&self,
state: &mut Array1<Complex64>,
error_type: ErrorType,
qubit: usize,
) -> Result<()> {
let n_qubits = (state.len() as f64).log2().ceil() as usize;
if qubit >= n_qubits {
return Ok(());
}
match error_type {
ErrorType::BitFlip => {
for i in 0..state.len() {
if (i >> qubit) & 1 == 0 {
let partner = i | (1 << qubit);
if partner < state.len() {
state.swap(i, partner);
}
}
}
}
ErrorType::PhaseFlip => {
for i in 0..state.len() {
if (i >> qubit) & 1 == 1 {
state[i] *= -1.0;
}
}
}
ErrorType::BitPhaseFlip => {
self.apply_single_qubit_correction(state, ErrorType::PhaseFlip, qubit)?;
self.apply_single_qubit_correction(state, ErrorType::BitFlip, qubit)?;
}
ErrorType::Identity => {
}
}
Ok(())
}
fn apply_two_qubit_correction(
&self,
state: &mut Array1<Complex64>,
error_type: ErrorType,
qubit1: usize,
qubit2: usize,
) -> Result<()> {
self.apply_single_qubit_correction(state, error_type, qubit1)?;
self.apply_single_qubit_correction(state, error_type, qubit2)?;
Ok(())
}
fn apply_syndrome_based_correction(
&self,
state: &mut Array1<Complex64>,
error_type: ErrorType,
) -> Result<()> {
let n_qubits = (state.len() as f64).log2().ceil() as usize;
let target_qubit = fastrand::usize(0..n_qubits);
self.apply_single_qubit_correction(state, error_type, target_qubit)?;
Ok(())
}
fn apply_probabilistic_correction(
&self,
state: &mut Array1<Complex64>,
error_type: ErrorType,
) -> Result<()> {
let n_qubits = (state.len() as f64).log2().ceil() as usize;
for qubit in 0..n_qubits {
let prob = match error_type {
ErrorType::BitFlip => 0.3,
ErrorType::PhaseFlip => 0.2,
ErrorType::BitPhaseFlip => 0.1,
ErrorType::Identity => 0.0,
};
if fastrand::f64() < prob {
self.apply_single_qubit_correction(state, error_type, qubit)?;
}
}
Ok(())
}
fn apply_classical_correction(
&self,
state: &mut Array1<Complex64>,
syndrome: &[bool],
) -> Result<()> {
for (i, &has_error) in syndrome.iter().enumerate() {
if has_error {
self.apply_single_qubit_correction(state, ErrorType::BitFlip, i)?;
}
}
Ok(())
}
fn count_errors(&self, _state: &Array1<Complex64>, syndrome: &[bool]) -> usize {
syndrome.iter().map(|&b| usize::from(b)).sum()
}
const fn class_to_error_type(&self, class: usize) -> ErrorType {
match class {
0 => ErrorType::Identity,
1 => ErrorType::BitFlip,
2 => ErrorType::PhaseFlip,
3 => ErrorType::BitPhaseFlip,
_ => ErrorType::Identity,
}
}
fn syndrome_to_string(&self, syndrome: &[bool]) -> String {
syndrome
.iter()
.map(|&b| if b { '1' } else { '0' })
.collect()
}
fn state_to_string(&self, state: &Array1<Complex64>) -> String {
let amplitudes: Vec<f64> = state.iter().map(|c| c.norm()).collect();
format!("{amplitudes:.3?}")
}
fn update_metrics(
&mut self,
errors_before: usize,
errors_after: usize,
confidence: f64,
reward: f64,
) {
self.metrics.total_corrections += 1;
if errors_after < errors_before {
self.metrics.successful_corrections += 1;
} else if errors_after > errors_before {
self.metrics.false_positives += 1;
}
self.metrics.average_confidence = self
.metrics
.average_confidence
.mul_add((self.metrics.total_corrections - 1) as f64, confidence)
/ self.metrics.total_corrections as f64;
self.metrics.reward_history.push(reward);
if self.metrics.reward_history.len() > 1000 {
self.metrics.reward_history.remove(0);
}
}
fn retrain_models(&mut self) -> Result<()> {
let history = self.training_history.lock().map_err(|e| {
crate::error::SimulatorError::InvalidOperation(format!("Lock poisoned: {e}"))
})?;
if history.len() < self.config.batch_size {
return Ok(());
}
let mut inputs = Vec::new();
let mut targets = Vec::new();
for example in history.iter() {
inputs.push(example.syndrome.clone());
let mut target = Array1::zeros(4);
let error_class = match example.error_type {
ErrorType::Identity => 0,
ErrorType::BitFlip => 1,
ErrorType::PhaseFlip => 2,
ErrorType::BitPhaseFlip => 3,
};
target[error_class] = 1.0;
targets.push(target);
}
let batch_size = self.config.batch_size.min(inputs.len());
for chunk in inputs.chunks(batch_size).zip(targets.chunks(batch_size)) {
let loss = self.classifier.train_batch(chunk.0, chunk.1);
self.metrics.learning_curve.push(loss);
}
Ok(())
}
#[must_use]
pub const fn get_metrics(&self) -> &CorrectionMetrics {
&self.metrics
}
pub fn reset(&mut self) {
self.metrics = CorrectionMetrics::default();
if let Ok(mut history) = self.training_history.lock() {
history.clear();
}
self.update_counter = 0;
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AdaptiveCorrectionResult {
pub predicted_error_type: ErrorType,
pub confidence: f64,
pub correction_applied: bool,
pub errors_corrected: usize,
pub reward: f64,
pub processing_time_ms: f64,
pub rl_action: usize,
}
pub fn benchmark_adaptive_ml_error_correction() -> Result<HashMap<String, f64>> {
let mut results = HashMap::new();
let configs = vec![
AdaptiveMLConfig {
model_type: MLModelType::NeuralNetwork,
learning_strategy: LearningStrategy::Online,
..Default::default()
},
AdaptiveMLConfig {
model_type: MLModelType::ReinforcementLearning,
learning_strategy: LearningStrategy::Reinforcement,
..Default::default()
},
];
for (i, config) in configs.into_iter().enumerate() {
let start = std::time::Instant::now();
let mut adaptive_ec = AdaptiveMLErrorCorrection::new(config)?;
for _ in 0..100 {
let mut test_state = Array1::from_vec(vec![
Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
Complex64::new(1.0 / 2.0_f64.sqrt(), 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
]);
let syndrome = vec![true, false, true, false]; let _result = adaptive_ec.correct_errors_adaptive(&mut test_state, &syndrome)?;
}
let time = start.elapsed().as_secs_f64() * 1000.0;
results.insert(format!("config_{i}"), time);
}
Ok(results)
}
#[cfg(test)]
#[allow(clippy::field_reassign_with_default)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
#[test]
fn test_neural_network_creation() {
let nn = SyndromeClassificationNetwork::new(4, vec![8, 4], 2, 0.01);
assert_eq!(nn.input_size, 4);
assert_eq!(nn.output_size, 2);
assert_eq!(nn.weights.len(), 3); }
#[test]
fn test_neural_network_forward() {
let nn = SyndromeClassificationNetwork::new(3, vec![4], 2, 0.01);
let input = Array1::from_vec(vec![1.0, 0.0, 1.0]);
let output = nn.forward(&input);
assert_eq!(output.len(), 2);
assert_abs_diff_eq!(output.sum(), 1.0, epsilon = 1e-6); }
#[test]
fn test_rl_agent_creation() {
let agent = ErrorCorrectionAgent::new(4, 0.1, 0.99, 0.1);
assert_eq!(agent.action_space_size, 4);
assert!(agent.q_table.is_empty());
}
#[test]
fn test_rl_agent_action_selection() {
let mut agent = ErrorCorrectionAgent::new(3, 0.1, 0.99, 0.0); let state = "001";
let action = agent.select_action(state);
assert!(action < 3);
}
#[test]
fn test_feature_extraction() {
let extractor = FeatureExtractor::new(FeatureExtractionMethod::RawSyndrome);
let syndrome = vec![true, false, true, false];
let features = extractor.extract_features(&syndrome);
assert_eq!(features.len(), 4);
assert_abs_diff_eq!(features[0], 1.0, epsilon = 1e-10);
assert_abs_diff_eq!(features[1], 0.0, epsilon = 1e-10);
assert_abs_diff_eq!(features[2], 1.0, epsilon = 1e-10);
assert_abs_diff_eq!(features[3], 0.0, epsilon = 1e-10);
}
#[test]
fn test_adaptive_ml_error_correction_creation() {
let config = AdaptiveMLConfig::default();
let adaptive_ec = AdaptiveMLErrorCorrection::new(config);
assert!(adaptive_ec.is_ok());
}
#[test]
fn test_error_correction_application() {
let config = AdaptiveMLConfig::default();
let mut adaptive_ec = AdaptiveMLErrorCorrection::new(config)
.expect("Failed to create AdaptiveMLErrorCorrection");
let mut state = Array1::from_vec(vec![
Complex64::new(1.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
Complex64::new(0.0, 0.0),
]);
let syndrome = vec![false, false];
let result = adaptive_ec.correct_errors_adaptive(&mut state, &syndrome);
assert!(result.is_ok());
let correction_result = result.expect("Failed to correct errors");
assert!(correction_result.processing_time_ms >= 0.0);
}
#[test]
fn test_metrics_calculation() {
let mut metrics = CorrectionMetrics::default();
metrics.total_corrections = 100;
metrics.successful_corrections = 90;
metrics.false_positives = 5;
metrics.false_negatives = 5;
assert_abs_diff_eq!(metrics.accuracy(), 0.9, epsilon = 1e-10);
assert_abs_diff_eq!(metrics.precision(), 90.0 / 95.0, epsilon = 1e-10);
assert_abs_diff_eq!(metrics.recall(), 90.0 / 95.0, epsilon = 1e-10);
}
#[test]
fn test_different_error_types() {
let config = AdaptiveMLConfig::default();
let adaptive_ec = AdaptiveMLErrorCorrection::new(config)
.expect("Failed to create AdaptiveMLErrorCorrection");
assert_eq!(adaptive_ec.class_to_error_type(0), ErrorType::Identity);
assert_eq!(adaptive_ec.class_to_error_type(1), ErrorType::BitFlip);
assert_eq!(adaptive_ec.class_to_error_type(2), ErrorType::PhaseFlip);
assert_eq!(adaptive_ec.class_to_error_type(3), ErrorType::BitPhaseFlip);
}
}