1use scirs2_core::ndarray::{Array1, Array2, Array3};
19use scirs2_core::random::{thread_rng, Rng};
20use serde::{Deserialize, Serialize};
21use std::collections::{HashMap, VecDeque};
22
23use crate::circuit_interfaces::{InterfaceCircuit, InterfaceGate, InterfaceGateType};
24use crate::error::{Result, SimulatorError};
25use scirs2_core::random::prelude::*;
26
27#[derive(Debug, Clone)]
29pub struct AdvancedMLMitigationConfig {
30 pub enable_deep_learning: bool,
32 pub enable_reinforcement_learning: bool,
34 pub enable_transfer_learning: bool,
36 pub enable_adversarial_training: bool,
38 pub enable_ensemble_methods: bool,
40 pub enable_online_learning: bool,
42 pub learning_rate: f64,
44 pub batch_size: usize,
46 pub memory_size: usize,
48 pub exploration_rate: f64,
50 pub transfer_alpha: f64,
52 pub ensemble_size: usize,
54}
55
56impl Default for AdvancedMLMitigationConfig {
57 fn default() -> Self {
58 Self {
59 enable_deep_learning: true,
60 enable_reinforcement_learning: true,
61 enable_transfer_learning: false,
62 enable_adversarial_training: false,
63 enable_ensemble_methods: true,
64 enable_online_learning: true,
65 learning_rate: 0.001,
66 batch_size: 64,
67 memory_size: 10_000,
68 exploration_rate: 0.1,
69 transfer_alpha: 0.5,
70 ensemble_size: 5,
71 }
72 }
73}
74
75#[derive(Debug, Clone)]
77pub struct DeepMitigationNetwork {
78 pub layers: Vec<usize>,
80 pub weights: Vec<Array2<f64>>,
82 pub biases: Vec<Array1<f64>>,
84 pub activation: ActivationFunction,
86 pub loss_history: Vec<f64>,
88}
89
90#[derive(Debug, Clone, Copy, PartialEq, Eq)]
92pub enum ActivationFunction {
93 ReLU,
94 Sigmoid,
95 Tanh,
96 Swish,
97 GELU,
98}
99
100#[derive(Debug, Clone)]
102pub struct QLearningMitigationAgent {
103 pub q_table: HashMap<String, HashMap<MitigationAction, f64>>,
105 pub learning_rate: f64,
107 pub discount_factor: f64,
109 pub exploration_rate: f64,
111 pub experience_buffer: VecDeque<Experience>,
113 pub stats: RLTrainingStats,
115}
116
117#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
119pub enum MitigationAction {
120 ZeroNoiseExtrapolation,
121 VirtualDistillation,
122 SymmetryVerification,
123 PauliTwirling,
124 RandomizedCompiling,
125 ClusterExpansion,
126 MachineLearningPrediction,
127 EnsembleMitigation,
128}
129
130#[derive(Debug, Clone)]
132pub struct Experience {
133 pub state: Array1<f64>,
135 pub action: MitigationAction,
137 pub reward: f64,
139 pub next_state: Array1<f64>,
141 pub done: bool,
143}
144
145#[derive(Debug, Clone, Default)]
147pub struct RLTrainingStats {
148 pub episodes: usize,
150 pub avg_reward: f64,
152 pub success_rate: f64,
154 pub exploration_decay: f64,
156 pub loss_convergence: Vec<f64>,
158}
159
160#[derive(Debug, Clone)]
162pub struct TransferLearningModel {
163 pub source_device: DeviceCharacteristics,
165 pub target_device: DeviceCharacteristics,
167 pub feature_extractor: DeepMitigationNetwork,
169 pub device_heads: HashMap<String, DeepMitigationNetwork>,
171 pub transfer_alpha: f64,
173 pub adaptation_stats: TransferStats,
175}
176
177#[derive(Debug, Clone)]
179pub struct DeviceCharacteristics {
180 pub device_id: String,
182 pub gate_errors: HashMap<String, f64>,
184 pub coherence_times: HashMap<String, f64>,
186 pub connectivity: Array2<bool>,
188 pub noise_correlations: Array2<f64>,
190}
191
192#[derive(Debug, Clone, Default)]
194pub struct TransferStats {
195 pub adaptation_loss: f64,
197 pub source_performance: f64,
199 pub target_performance: f64,
201 pub transfer_efficiency: f64,
203}
204
205pub struct EnsembleMitigation {
207 pub models: Vec<Box<dyn MitigationModel>>,
209 pub weights: Array1<f64>,
211 pub combination_strategy: EnsembleStrategy,
213 pub performance_history: Vec<f64>,
215}
216
217#[derive(Debug, Clone, Copy, PartialEq, Eq)]
219pub enum EnsembleStrategy {
220 WeightedAverage,
222 MajorityVoting,
224 Stacking,
226 DynamicSelection,
228 BayesianAveraging,
230}
231
232pub trait MitigationModel: Send + Sync {
234 fn mitigate(&self, measurements: &Array1<f64>, circuit: &InterfaceCircuit) -> Result<f64>;
236
237 fn update(&mut self, training_data: &[(Array1<f64>, f64)]) -> Result<()>;
239
240 fn confidence(&self) -> f64;
242
243 fn name(&self) -> String;
245}
246
247#[derive(Debug, Clone, Serialize, Deserialize)]
249pub struct AdvancedMLMitigationResult {
250 pub mitigated_value: f64,
252 pub confidence: f64,
254 pub model_used: String,
256 pub raw_measurements: Vec<f64>,
258 pub overhead: f64,
260 pub error_reduction: f64,
262 pub performance_metrics: PerformanceMetrics,
264}
265
266#[derive(Debug, Clone, Default, Serialize, Deserialize)]
268pub struct PerformanceMetrics {
269 pub mae: f64,
271 pub rmse: f64,
273 pub r_squared: f64,
275 pub bias: f64,
277 pub variance: f64,
279 pub computation_time_ms: f64,
281}
282
283#[derive(Debug, Clone)]
285pub struct GraphMitigationNetwork {
286 pub node_features: Array2<f64>,
288 pub edge_features: Array3<f64>,
290 pub attention_weights: Array2<f64>,
292 pub conv_layers: Vec<GraphConvLayer>,
294 pub pooling: GraphPooling,
296}
297
298#[derive(Debug, Clone)]
300pub struct GraphConvLayer {
301 pub weights: Array2<f64>,
303 pub bias: Array1<f64>,
305 pub activation: ActivationFunction,
307}
308
309#[derive(Debug, Clone, Copy, PartialEq, Eq)]
311pub enum GraphPooling {
312 Mean,
313 Max,
314 Sum,
315 Attention,
316 Set2Set,
317}
318
319pub struct AdvancedMLErrorMitigator {
321 config: AdvancedMLMitigationConfig,
323 deep_model: Option<DeepMitigationNetwork>,
325 rl_agent: Option<QLearningMitigationAgent>,
327 transfer_model: Option<TransferLearningModel>,
329 ensemble: Option<EnsembleMitigation>,
331 graph_model: Option<GraphMitigationNetwork>,
333 training_history: VecDeque<(Array1<f64>, f64)>,
335 performance_tracker: PerformanceTracker,
337}
338
339#[derive(Debug, Clone, Default)]
341pub struct PerformanceTracker {
342 pub accuracy_history: HashMap<String, Vec<f64>>,
344 pub cost_history: HashMap<String, Vec<f64>>,
346 pub error_reduction_history: Vec<f64>,
348 pub best_models: HashMap<String, String>,
350}
351
352impl AdvancedMLErrorMitigator {
353 pub fn new(config: AdvancedMLMitigationConfig) -> Result<Self> {
355 let mut mitigator = Self {
356 config: config.clone(),
357 deep_model: None,
358 rl_agent: None,
359 transfer_model: None,
360 ensemble: None,
361 graph_model: None,
362 training_history: VecDeque::with_capacity(config.memory_size),
363 performance_tracker: PerformanceTracker::default(),
364 };
365
366 if config.enable_deep_learning {
368 mitigator.deep_model = Some(mitigator.create_deep_model()?);
369 }
370
371 if config.enable_reinforcement_learning {
372 mitigator.rl_agent = Some(mitigator.create_rl_agent()?);
373 }
374
375 if config.enable_ensemble_methods {
376 mitigator.ensemble = Some(mitigator.create_ensemble()?);
377 }
378
379 Ok(mitigator)
380 }
381
382 pub fn mitigate_errors(
384 &mut self,
385 measurements: &Array1<f64>,
386 circuit: &InterfaceCircuit,
387 ) -> Result<AdvancedMLMitigationResult> {
388 let start_time = std::time::Instant::now();
389
390 let features = self.extract_features(circuit, measurements)?;
392
393 let strategy = self.select_mitigation_strategy(&features)?;
395
396 let mitigated_value = match strategy {
398 MitigationAction::MachineLearningPrediction => {
399 self.apply_deep_learning_mitigation(&features, measurements)?
400 }
401 MitigationAction::EnsembleMitigation => {
402 self.apply_ensemble_mitigation(&features, measurements, circuit)?
403 }
404 _ => {
405 self.apply_traditional_mitigation(strategy, measurements, circuit)?
407 }
408 };
409
410 let confidence = self.calculate_confidence(&features, mitigated_value)?;
412 let error_reduction = self.estimate_error_reduction(measurements, mitigated_value)?;
413
414 let computation_time = start_time.elapsed().as_millis() as f64;
415
416 self.update_models(&features, mitigated_value)?;
418
419 Ok(AdvancedMLMitigationResult {
420 mitigated_value,
421 confidence,
422 model_used: format!("{strategy:?}"),
423 raw_measurements: measurements.to_vec(),
424 overhead: computation_time / 1000.0, error_reduction,
426 performance_metrics: PerformanceMetrics {
427 computation_time_ms: computation_time,
428 ..Default::default()
429 },
430 })
431 }
432
433 pub fn create_deep_model(&self) -> Result<DeepMitigationNetwork> {
435 let layers = vec![18, 128, 64, 32, 1]; let mut weights = Vec::new();
437 let mut biases = Vec::new();
438
439 for i in 0..layers.len() - 1 {
441 let fan_in = layers[i];
442 let fan_out = layers[i + 1];
443 let limit = (6.0 / (fan_in + fan_out) as f64).sqrt();
444
445 let w =
446 Array2::from_shape_fn((fan_out, fan_in), |_| thread_rng().gen_range(-limit..limit));
447 let b = Array1::zeros(fan_out);
448
449 weights.push(w);
450 biases.push(b);
451 }
452
453 Ok(DeepMitigationNetwork {
454 layers,
455 weights,
456 biases,
457 activation: ActivationFunction::ReLU,
458 loss_history: Vec::new(),
459 })
460 }
461
462 pub fn create_rl_agent(&self) -> Result<QLearningMitigationAgent> {
464 Ok(QLearningMitigationAgent {
465 q_table: HashMap::new(),
466 learning_rate: self.config.learning_rate,
467 discount_factor: 0.95,
468 exploration_rate: self.config.exploration_rate,
469 experience_buffer: VecDeque::with_capacity(self.config.memory_size),
470 stats: RLTrainingStats::default(),
471 })
472 }
473
474 fn create_ensemble(&self) -> Result<EnsembleMitigation> {
476 let models: Vec<Box<dyn MitigationModel>> = Vec::new();
477 let weights = Array1::ones(self.config.ensemble_size) / self.config.ensemble_size as f64;
478
479 Ok(EnsembleMitigation {
480 models,
481 weights,
482 combination_strategy: EnsembleStrategy::WeightedAverage,
483 performance_history: Vec::new(),
484 })
485 }
486
487 pub fn extract_features(
489 &self,
490 circuit: &InterfaceCircuit,
491 measurements: &Array1<f64>,
492 ) -> Result<Array1<f64>> {
493 let mut features = Vec::new();
494
495 features.push(circuit.gates.len() as f64); features.push(circuit.num_qubits as f64); let mut gate_counts = HashMap::new();
501 for gate in &circuit.gates {
502 *gate_counts
503 .entry(format!("{:?}", gate.gate_type))
504 .or_insert(0) += 1;
505 }
506
507 let total_gates = circuit.gates.len() as f64;
509 for gate_type in [
510 "PauliX", "PauliY", "PauliZ", "Hadamard", "CNOT", "CZ", "RX", "RY", "RZ", "Phase",
511 ] {
512 let count = gate_counts.get(gate_type).unwrap_or(&0);
513 features.push(f64::from(*count) / total_gates);
514 }
515
516 features.push(measurements.mean().unwrap_or(0.0));
518 features.push(measurements.std(0.0));
519 features.push(measurements.var(0.0));
520 features.push(measurements.len() as f64);
521
522 features.push(self.calculate_circuit_connectivity(circuit)?);
524 features.push(self.calculate_entanglement_estimate(circuit)?);
525
526 Ok(Array1::from_vec(features))
527 }
528
529 pub fn select_mitigation_strategy(
531 &mut self,
532 features: &Array1<f64>,
533 ) -> Result<MitigationAction> {
534 if let Some(ref mut agent) = self.rl_agent {
535 let state_key = Self::features_to_state_key(features);
536
537 if thread_rng().gen::<f64>() < agent.exploration_rate {
539 let actions = [
541 MitigationAction::ZeroNoiseExtrapolation,
542 MitigationAction::VirtualDistillation,
543 MitigationAction::MachineLearningPrediction,
544 MitigationAction::EnsembleMitigation,
545 ];
546 Ok(actions[thread_rng().gen_range(0..actions.len())])
547 } else {
548 let q_values = agent.q_table.get(&state_key).cloned().unwrap_or_default();
550
551 let best_action = q_values
552 .iter()
553 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
554 .map_or(
555 MitigationAction::MachineLearningPrediction,
556 |(action, _)| *action,
557 );
558
559 Ok(best_action)
560 }
561 } else {
562 Ok(MitigationAction::MachineLearningPrediction)
564 }
565 }
566
567 fn apply_deep_learning_mitigation(
569 &self,
570 features: &Array1<f64>,
571 measurements: &Array1<f64>,
572 ) -> Result<f64> {
573 if let Some(ref model) = self.deep_model {
574 let prediction = Self::forward_pass_static(model, features)?;
575
576 let correction_factor = prediction[0];
578 let mitigated_value = measurements.mean().unwrap_or(0.0) * (1.0 + correction_factor);
579
580 Ok(mitigated_value)
581 } else {
582 Err(SimulatorError::InvalidConfiguration(
583 "Deep learning model not initialized".to_string(),
584 ))
585 }
586 }
587
588 fn apply_ensemble_mitigation(
590 &self,
591 features: &Array1<f64>,
592 measurements: &Array1<f64>,
593 circuit: &InterfaceCircuit,
594 ) -> Result<f64> {
595 if let Some(ref ensemble) = self.ensemble {
596 let mut predictions = Vec::new();
597
598 for model in &ensemble.models {
600 let prediction = model.mitigate(measurements, circuit)?;
601 predictions.push(prediction);
602 }
603
604 let mitigated_value = match ensemble.combination_strategy {
606 EnsembleStrategy::WeightedAverage => {
607 let weighted_sum: f64 = predictions
608 .iter()
609 .zip(ensemble.weights.iter())
610 .map(|(pred, weight)| pred * weight)
611 .sum();
612 weighted_sum
613 }
614 EnsembleStrategy::MajorityVoting => {
615 let mut sorted_predictions = predictions.clone();
617 sorted_predictions
618 .sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
619 sorted_predictions[sorted_predictions.len() / 2]
620 }
621 _ => {
622 predictions.iter().sum::<f64>() / predictions.len() as f64
624 }
625 };
626
627 Ok(mitigated_value)
628 } else {
629 Ok(measurements.mean().unwrap_or(0.0))
631 }
632 }
633
634 pub fn apply_traditional_mitigation(
636 &self,
637 strategy: MitigationAction,
638 measurements: &Array1<f64>,
639 _circuit: &InterfaceCircuit,
640 ) -> Result<f64> {
641 match strategy {
642 MitigationAction::ZeroNoiseExtrapolation => {
643 let noise_factors = [1.0, 1.5, 2.0];
645 let values: Vec<f64> = noise_factors
646 .iter()
647 .zip(measurements.iter())
648 .map(|(factor, &val)| val / factor)
649 .collect();
650
651 let extrapolated = 2.0f64.mul_add(values[0], -values[1]);
653 Ok(extrapolated)
654 }
655 MitigationAction::VirtualDistillation => {
656 let mean_val = measurements.mean().unwrap_or(0.0);
658 let variance = measurements.var(0.0);
659 let corrected = mean_val + variance * 0.1; Ok(corrected)
661 }
662 _ => {
663 Ok(measurements.mean().unwrap_or(0.0))
665 }
666 }
667 }
668
669 fn forward_pass_static(
671 model: &DeepMitigationNetwork,
672 input: &Array1<f64>,
673 ) -> Result<Array1<f64>> {
674 let mut current = input.clone();
675
676 for (weights, bias) in model.weights.iter().zip(model.biases.iter()) {
677 current = weights.dot(¤t) + bias;
679
680 current.mapv_inplace(|x| Self::apply_activation_static(x, model.activation));
682 }
683
684 Ok(current)
685 }
686
687 fn apply_activation_static(x: f64, activation: ActivationFunction) -> f64 {
689 match activation {
690 ActivationFunction::ReLU => x.max(0.0),
691 ActivationFunction::Sigmoid => 1.0 / (1.0 + (-x).exp()),
692 ActivationFunction::Tanh => x.tanh(),
693 ActivationFunction::Swish => x * (1.0 / (1.0 + (-x).exp())),
694 ActivationFunction::GELU => {
695 0.5 * x
696 * (1.0
697 + ((2.0 / std::f64::consts::PI).sqrt()
698 * 0.044_715f64.mul_add(x.powi(3), x))
699 .tanh())
700 }
701 }
702 }
703
704 #[must_use]
706 pub fn apply_activation(&self, x: f64, activation: ActivationFunction) -> f64 {
707 Self::apply_activation_static(x, activation)
708 }
709
710 pub fn forward_pass(
712 &self,
713 model: &DeepMitigationNetwork,
714 input: &Array1<f64>,
715 ) -> Result<Array1<f64>> {
716 Self::forward_pass_static(model, input)
717 }
718
719 fn calculate_circuit_connectivity(&self, circuit: &InterfaceCircuit) -> Result<f64> {
721 if circuit.num_qubits == 0 {
722 return Ok(0.0);
723 }
724
725 let mut connectivity_sum = 0.0;
726 let total_possible_connections = (circuit.num_qubits * (circuit.num_qubits - 1)) / 2;
727
728 for gate in &circuit.gates {
729 if gate.qubits.len() > 1 {
730 connectivity_sum += 1.0;
731 }
732 }
733
734 Ok(connectivity_sum / total_possible_connections as f64)
735 }
736
737 fn calculate_entanglement_estimate(&self, circuit: &InterfaceCircuit) -> Result<f64> {
739 let mut entangling_gates = 0;
740
741 for gate in &circuit.gates {
742 match gate.gate_type {
743 InterfaceGateType::CNOT
744 | InterfaceGateType::CZ
745 | InterfaceGateType::CY
746 | InterfaceGateType::SWAP
747 | InterfaceGateType::ISwap
748 | InterfaceGateType::Toffoli => {
749 entangling_gates += 1;
750 }
751 _ => {}
752 }
753 }
754
755 Ok(f64::from(entangling_gates) / circuit.gates.len() as f64)
756 }
757
758 fn features_to_state_key(features: &Array1<f64>) -> String {
760 let discretized: Vec<i32> = features
762 .iter()
763 .map(|&x| (x * 10.0).round() as i32)
764 .collect();
765 format!("{discretized:?}")
766 }
767
768 fn calculate_confidence(&self, features: &Array1<f64>, _mitigated_value: f64) -> Result<f64> {
770 let feature_variance = features.var(0.0);
772 let confidence = 1.0 / (1.0 + feature_variance);
773 Ok(confidence.clamp(0.0, 1.0))
774 }
775
776 fn estimate_error_reduction(&self, original: &Array1<f64>, mitigated: f64) -> Result<f64> {
778 let original_mean = original.mean().unwrap_or(0.0);
779 let original_variance = original.var(0.0);
780
781 let estimated_improvement = (original_variance.sqrt() - (mitigated - original_mean).abs())
783 / original_variance.sqrt();
784 Ok(estimated_improvement.clamp(0.0, 1.0))
785 }
786
787 fn update_models(&mut self, features: &Array1<f64>, target: f64) -> Result<()> {
789 if self.training_history.len() >= self.config.memory_size {
791 self.training_history.pop_front();
792 }
793 self.training_history.push_back((features.clone(), target));
794
795 if self.training_history.len() >= self.config.batch_size {
797 self.update_deep_model()?;
798 }
799
800 self.update_rl_agent(features, target)?;
802
803 Ok(())
804 }
805
806 fn update_deep_model(&mut self) -> Result<()> {
808 if let Some(ref mut model) = self.deep_model {
809 let batch_size = self.config.batch_size.min(self.training_history.len());
813 let batch: Vec<_> = self
814 .training_history
815 .iter()
816 .rev()
817 .take(batch_size)
818 .collect();
819
820 let mut total_loss = 0.0;
821
822 for (features, target) in batch {
823 let prediction = Self::forward_pass_static(model, features)?;
824 let loss = (prediction[0] - target).powi(2);
825 total_loss += loss;
826 }
827
828 let avg_loss = total_loss / batch_size as f64;
829 model.loss_history.push(avg_loss);
830 }
831
832 Ok(())
833 }
834
835 fn update_rl_agent(&mut self, features: &Array1<f64>, reward: f64) -> Result<()> {
837 if let Some(ref mut agent) = self.rl_agent {
838 let state_key = Self::features_to_state_key(features);
839
840 agent.stats.episodes += 1;
844 agent.stats.avg_reward = agent
845 .stats
846 .avg_reward
847 .mul_add((agent.stats.episodes - 1) as f64, reward)
848 / agent.stats.episodes as f64;
849
850 agent.exploration_rate *= 0.995;
852 agent.exploration_rate = agent.exploration_rate.max(0.01);
853 }
854
855 Ok(())
856 }
857}
858
859pub fn benchmark_advanced_ml_error_mitigation() -> Result<()> {
861 println!("Benchmarking Advanced ML Error Mitigation...");
862
863 let config = AdvancedMLMitigationConfig::default();
864 let mut mitigator = AdvancedMLErrorMitigator::new(config)?;
865
866 let mut circuit = InterfaceCircuit::new(4, 0);
868 circuit.add_gate(InterfaceGate::new(InterfaceGateType::Hadamard, vec![0]));
869 circuit.add_gate(InterfaceGate::new(InterfaceGateType::CNOT, vec![0, 1]));
870 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RZ(0.5), vec![2]));
871
872 let noisy_measurements = Array1::from_vec(vec![0.48, 0.52, 0.47, 0.53, 0.49]);
874
875 let start_time = std::time::Instant::now();
876
877 let result = mitigator.mitigate_errors(&noisy_measurements, &circuit)?;
879
880 let duration = start_time.elapsed();
881
882 println!("✅ Advanced ML Error Mitigation Results:");
883 println!(" Mitigated Value: {:.6}", result.mitigated_value);
884 println!(" Confidence: {:.4}", result.confidence);
885 println!(" Model Used: {}", result.model_used);
886 println!(" Error Reduction: {:.4}", result.error_reduction);
887 println!(" Computation Time: {:.2}ms", duration.as_millis());
888
889 Ok(())
890}
891
892#[cfg(test)]
893mod tests {
894 use super::*;
895
896 #[test]
897 fn test_advanced_ml_mitigator_creation() {
898 let config = AdvancedMLMitigationConfig::default();
899 let mitigator = AdvancedMLErrorMitigator::new(config);
900 assert!(mitigator.is_ok());
901 }
902
903 #[test]
904 fn test_feature_extraction() {
905 let config = AdvancedMLMitigationConfig::default();
906 let mitigator = AdvancedMLErrorMitigator::new(config).expect("Failed to create mitigator");
907
908 let mut circuit = InterfaceCircuit::new(2, 0);
909 circuit.add_gate(InterfaceGate::new(InterfaceGateType::Hadamard, vec![0]));
910 circuit.add_gate(InterfaceGate::new(InterfaceGateType::CNOT, vec![0, 1]));
911
912 let measurements = Array1::from_vec(vec![0.5, 0.5, 0.5]);
913 let features = mitigator.extract_features(&circuit, &measurements);
914
915 assert!(features.is_ok());
916 let features = features.expect("Failed to extract features");
917 assert!(!features.is_empty());
918 }
919
920 #[test]
921 fn test_activation_functions() {
922 let config = AdvancedMLMitigationConfig::default();
923 let mitigator = AdvancedMLErrorMitigator::new(config).expect("Failed to create mitigator");
924
925 assert_eq!(
927 mitigator.apply_activation(-1.0, ActivationFunction::ReLU),
928 0.0
929 );
930 assert_eq!(
931 mitigator.apply_activation(1.0, ActivationFunction::ReLU),
932 1.0
933 );
934
935 let sigmoid_result = mitigator.apply_activation(0.0, ActivationFunction::Sigmoid);
937 assert!((sigmoid_result - 0.5).abs() < 1e-10);
938 }
939
940 #[test]
941 fn test_mitigation_strategy_selection() {
942 let config = AdvancedMLMitigationConfig::default();
943 let mut mitigator =
944 AdvancedMLErrorMitigator::new(config).expect("Failed to create mitigator");
945
946 let features = Array1::from_vec(vec![1.0, 2.0, 3.0]);
947 let strategy = mitigator.select_mitigation_strategy(&features);
948
949 assert!(strategy.is_ok());
950 }
951
952 #[test]
953 fn test_traditional_mitigation() {
954 let config = AdvancedMLMitigationConfig::default();
955 let mitigator = AdvancedMLErrorMitigator::new(config).expect("Failed to create mitigator");
956
957 let measurements = Array1::from_vec(vec![0.48, 0.52, 0.49]);
958 let circuit = InterfaceCircuit::new(2, 0);
959
960 let result = mitigator.apply_traditional_mitigation(
961 MitigationAction::ZeroNoiseExtrapolation,
962 &measurements,
963 &circuit,
964 );
965
966 assert!(result.is_ok());
967 }
968}