1use scirs2_core::ndarray::{Array1, Array2, Array3};
19use scirs2_core::random::{thread_rng, Rng};
20use serde::{Deserialize, Serialize};
21use std::collections::{HashMap, VecDeque};
22
23use crate::circuit_interfaces::{InterfaceCircuit, InterfaceGate, InterfaceGateType};
24use crate::error::{Result, SimulatorError};
25use scirs2_core::random::prelude::*;
26
27#[derive(Debug, Clone)]
29pub struct AdvancedMLMitigationConfig {
30 pub enable_deep_learning: bool,
32 pub enable_reinforcement_learning: bool,
34 pub enable_transfer_learning: bool,
36 pub enable_adversarial_training: bool,
38 pub enable_ensemble_methods: bool,
40 pub enable_online_learning: bool,
42 pub learning_rate: f64,
44 pub batch_size: usize,
46 pub memory_size: usize,
48 pub exploration_rate: f64,
50 pub transfer_alpha: f64,
52 pub ensemble_size: usize,
54}
55
56impl Default for AdvancedMLMitigationConfig {
57 fn default() -> Self {
58 Self {
59 enable_deep_learning: true,
60 enable_reinforcement_learning: true,
61 enable_transfer_learning: false,
62 enable_adversarial_training: false,
63 enable_ensemble_methods: true,
64 enable_online_learning: true,
65 learning_rate: 0.001,
66 batch_size: 64,
67 memory_size: 10000,
68 exploration_rate: 0.1,
69 transfer_alpha: 0.5,
70 ensemble_size: 5,
71 }
72 }
73}
74
75#[derive(Debug, Clone)]
77pub struct DeepMitigationNetwork {
78 pub layers: Vec<usize>,
80 pub weights: Vec<Array2<f64>>,
82 pub biases: Vec<Array1<f64>>,
84 pub activation: ActivationFunction,
86 pub loss_history: Vec<f64>,
88}
89
90#[derive(Debug, Clone, Copy, PartialEq, Eq)]
92pub enum ActivationFunction {
93 ReLU,
94 Sigmoid,
95 Tanh,
96 Swish,
97 GELU,
98}
99
100#[derive(Debug, Clone)]
102pub struct QLearningMitigationAgent {
103 pub q_table: HashMap<String, HashMap<MitigationAction, f64>>,
105 pub learning_rate: f64,
107 pub discount_factor: f64,
109 pub exploration_rate: f64,
111 pub experience_buffer: VecDeque<Experience>,
113 pub stats: RLTrainingStats,
115}
116
117#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
119pub enum MitigationAction {
120 ZeroNoiseExtrapolation,
121 VirtualDistillation,
122 SymmetryVerification,
123 PauliTwirling,
124 RandomizedCompiling,
125 ClusterExpansion,
126 MachineLearningPrediction,
127 EnsembleMitigation,
128}
129
130#[derive(Debug, Clone)]
132pub struct Experience {
133 pub state: Array1<f64>,
135 pub action: MitigationAction,
137 pub reward: f64,
139 pub next_state: Array1<f64>,
141 pub done: bool,
143}
144
145#[derive(Debug, Clone, Default)]
147pub struct RLTrainingStats {
148 pub episodes: usize,
150 pub avg_reward: f64,
152 pub success_rate: f64,
154 pub exploration_decay: f64,
156 pub loss_convergence: Vec<f64>,
158}
159
160#[derive(Debug, Clone)]
162pub struct TransferLearningModel {
163 pub source_device: DeviceCharacteristics,
165 pub target_device: DeviceCharacteristics,
167 pub feature_extractor: DeepMitigationNetwork,
169 pub device_heads: HashMap<String, DeepMitigationNetwork>,
171 pub transfer_alpha: f64,
173 pub adaptation_stats: TransferStats,
175}
176
177#[derive(Debug, Clone)]
179pub struct DeviceCharacteristics {
180 pub device_id: String,
182 pub gate_errors: HashMap<String, f64>,
184 pub coherence_times: HashMap<String, f64>,
186 pub connectivity: Array2<bool>,
188 pub noise_correlations: Array2<f64>,
190}
191
192#[derive(Debug, Clone, Default)]
194pub struct TransferStats {
195 pub adaptation_loss: f64,
197 pub source_performance: f64,
199 pub target_performance: f64,
201 pub transfer_efficiency: f64,
203}
204
205pub struct EnsembleMitigation {
207 pub models: Vec<Box<dyn MitigationModel>>,
209 pub weights: Array1<f64>,
211 pub combination_strategy: EnsembleStrategy,
213 pub performance_history: Vec<f64>,
215}
216
217#[derive(Debug, Clone, Copy, PartialEq, Eq)]
219pub enum EnsembleStrategy {
220 WeightedAverage,
222 MajorityVoting,
224 Stacking,
226 DynamicSelection,
228 BayesianAveraging,
230}
231
232pub trait MitigationModel: Send + Sync {
234 fn mitigate(&self, measurements: &Array1<f64>, circuit: &InterfaceCircuit) -> Result<f64>;
236
237 fn update(&mut self, training_data: &[(Array1<f64>, f64)]) -> Result<()>;
239
240 fn confidence(&self) -> f64;
242
243 fn name(&self) -> String;
245}
246
247#[derive(Debug, Clone, Serialize, Deserialize)]
249pub struct AdvancedMLMitigationResult {
250 pub mitigated_value: f64,
252 pub confidence: f64,
254 pub model_used: String,
256 pub raw_measurements: Vec<f64>,
258 pub overhead: f64,
260 pub error_reduction: f64,
262 pub performance_metrics: PerformanceMetrics,
264}
265
266#[derive(Debug, Clone, Default, Serialize, Deserialize)]
268pub struct PerformanceMetrics {
269 pub mae: f64,
271 pub rmse: f64,
273 pub r_squared: f64,
275 pub bias: f64,
277 pub variance: f64,
279 pub computation_time_ms: f64,
281}
282
283#[derive(Debug, Clone)]
285pub struct GraphMitigationNetwork {
286 pub node_features: Array2<f64>,
288 pub edge_features: Array3<f64>,
290 pub attention_weights: Array2<f64>,
292 pub conv_layers: Vec<GraphConvLayer>,
294 pub pooling: GraphPooling,
296}
297
298#[derive(Debug, Clone)]
300pub struct GraphConvLayer {
301 pub weights: Array2<f64>,
303 pub bias: Array1<f64>,
305 pub activation: ActivationFunction,
307}
308
309#[derive(Debug, Clone, Copy, PartialEq, Eq)]
311pub enum GraphPooling {
312 Mean,
313 Max,
314 Sum,
315 Attention,
316 Set2Set,
317}
318
319pub struct AdvancedMLErrorMitigator {
321 config: AdvancedMLMitigationConfig,
323 deep_model: Option<DeepMitigationNetwork>,
325 rl_agent: Option<QLearningMitigationAgent>,
327 transfer_model: Option<TransferLearningModel>,
329 ensemble: Option<EnsembleMitigation>,
331 graph_model: Option<GraphMitigationNetwork>,
333 training_history: VecDeque<(Array1<f64>, f64)>,
335 performance_tracker: PerformanceTracker,
337}
338
339#[derive(Debug, Clone, Default)]
341pub struct PerformanceTracker {
342 pub accuracy_history: HashMap<String, Vec<f64>>,
344 pub cost_history: HashMap<String, Vec<f64>>,
346 pub error_reduction_history: Vec<f64>,
348 pub best_models: HashMap<String, String>,
350}
351
352impl AdvancedMLErrorMitigator {
353 pub fn new(config: AdvancedMLMitigationConfig) -> Result<Self> {
355 let mut mitigator = Self {
356 config: config.clone(),
357 deep_model: None,
358 rl_agent: None,
359 transfer_model: None,
360 ensemble: None,
361 graph_model: None,
362 training_history: VecDeque::with_capacity(config.memory_size),
363 performance_tracker: PerformanceTracker::default(),
364 };
365
366 if config.enable_deep_learning {
368 mitigator.deep_model = Some(mitigator.create_deep_model()?);
369 }
370
371 if config.enable_reinforcement_learning {
372 mitigator.rl_agent = Some(mitigator.create_rl_agent()?);
373 }
374
375 if config.enable_ensemble_methods {
376 mitigator.ensemble = Some(mitigator.create_ensemble()?);
377 }
378
379 Ok(mitigator)
380 }
381
382 pub fn mitigate_errors(
384 &mut self,
385 measurements: &Array1<f64>,
386 circuit: &InterfaceCircuit,
387 ) -> Result<AdvancedMLMitigationResult> {
388 let start_time = std::time::Instant::now();
389
390 let features = self.extract_features(circuit, measurements)?;
392
393 let strategy = self.select_mitigation_strategy(&features)?;
395
396 let mitigated_value = match strategy {
398 MitigationAction::MachineLearningPrediction => {
399 self.apply_deep_learning_mitigation(&features, measurements)?
400 }
401 MitigationAction::EnsembleMitigation => {
402 self.apply_ensemble_mitigation(&features, measurements, circuit)?
403 }
404 _ => {
405 self.apply_traditional_mitigation(strategy, measurements, circuit)?
407 }
408 };
409
410 let confidence = self.calculate_confidence(&features, mitigated_value)?;
412 let error_reduction = self.estimate_error_reduction(measurements, mitigated_value)?;
413
414 let computation_time = start_time.elapsed().as_millis() as f64;
415
416 self.update_models(&features, mitigated_value)?;
418
419 Ok(AdvancedMLMitigationResult {
420 mitigated_value,
421 confidence,
422 model_used: format!("{:?}", strategy),
423 raw_measurements: measurements.to_vec(),
424 overhead: computation_time / 1000.0, error_reduction,
426 performance_metrics: PerformanceMetrics {
427 computation_time_ms: computation_time,
428 ..Default::default()
429 },
430 })
431 }
432
433 pub fn create_deep_model(&self) -> Result<DeepMitigationNetwork> {
435 let layers = vec![18, 128, 64, 32, 1]; let mut weights = Vec::new();
437 let mut biases = Vec::new();
438
439 for i in 0..layers.len() - 1 {
441 let fan_in = layers[i];
442 let fan_out = layers[i + 1];
443 let limit = (6.0 / (fan_in + fan_out) as f64).sqrt();
444
445 let w =
446 Array2::from_shape_fn((fan_out, fan_in), |_| thread_rng().gen_range(-limit..limit));
447 let b = Array1::zeros(fan_out);
448
449 weights.push(w);
450 biases.push(b);
451 }
452
453 Ok(DeepMitigationNetwork {
454 layers,
455 weights,
456 biases,
457 activation: ActivationFunction::ReLU,
458 loss_history: Vec::new(),
459 })
460 }
461
462 pub fn create_rl_agent(&self) -> Result<QLearningMitigationAgent> {
464 Ok(QLearningMitigationAgent {
465 q_table: HashMap::new(),
466 learning_rate: self.config.learning_rate,
467 discount_factor: 0.95,
468 exploration_rate: self.config.exploration_rate,
469 experience_buffer: VecDeque::with_capacity(self.config.memory_size),
470 stats: RLTrainingStats::default(),
471 })
472 }
473
474 fn create_ensemble(&self) -> Result<EnsembleMitigation> {
476 let models: Vec<Box<dyn MitigationModel>> = Vec::new();
477 let weights = Array1::ones(self.config.ensemble_size) / self.config.ensemble_size as f64;
478
479 Ok(EnsembleMitigation {
480 models,
481 weights,
482 combination_strategy: EnsembleStrategy::WeightedAverage,
483 performance_history: Vec::new(),
484 })
485 }
486
487 pub fn extract_features(
489 &self,
490 circuit: &InterfaceCircuit,
491 measurements: &Array1<f64>,
492 ) -> Result<Array1<f64>> {
493 let mut features = Vec::new();
494
495 features.push(circuit.gates.len() as f64); features.push(circuit.num_qubits as f64); let mut gate_counts = HashMap::new();
501 for gate in &circuit.gates {
502 *gate_counts
503 .entry(format!("{:?}", gate.gate_type))
504 .or_insert(0) += 1;
505 }
506
507 let total_gates = circuit.gates.len() as f64;
509 for gate_type in [
510 "PauliX", "PauliY", "PauliZ", "Hadamard", "CNOT", "CZ", "RX", "RY", "RZ", "Phase",
511 ] {
512 let count = gate_counts.get(gate_type).unwrap_or(&0);
513 features.push(*count as f64 / total_gates);
514 }
515
516 features.push(measurements.mean().unwrap_or(0.0));
518 features.push(measurements.std(0.0));
519 features.push(measurements.var(0.0));
520 features.push(measurements.len() as f64);
521
522 features.push(self.calculate_circuit_connectivity(circuit)?);
524 features.push(self.calculate_entanglement_estimate(circuit)?);
525
526 Ok(Array1::from_vec(features))
527 }
528
529 pub fn select_mitigation_strategy(
531 &mut self,
532 features: &Array1<f64>,
533 ) -> Result<MitigationAction> {
534 if let Some(ref mut agent) = self.rl_agent {
535 let state_key = Self::features_to_state_key(features);
536
537 if thread_rng().gen::<f64>() < agent.exploration_rate {
539 let actions = [
541 MitigationAction::ZeroNoiseExtrapolation,
542 MitigationAction::VirtualDistillation,
543 MitigationAction::MachineLearningPrediction,
544 MitigationAction::EnsembleMitigation,
545 ];
546 Ok(actions[thread_rng().gen_range(0..actions.len())])
547 } else {
548 let q_values = agent.q_table.get(&state_key).cloned().unwrap_or_default();
550
551 let best_action = q_values
552 .iter()
553 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
554 .map(|(action, _)| *action)
555 .unwrap_or(MitigationAction::MachineLearningPrediction);
556
557 Ok(best_action)
558 }
559 } else {
560 Ok(MitigationAction::MachineLearningPrediction)
562 }
563 }
564
565 fn apply_deep_learning_mitigation(
567 &self,
568 features: &Array1<f64>,
569 measurements: &Array1<f64>,
570 ) -> Result<f64> {
571 if let Some(ref model) = self.deep_model {
572 let prediction = Self::forward_pass_static(model, features)?;
573
574 let correction_factor = prediction[0];
576 let mitigated_value = measurements.mean().unwrap_or(0.0) * (1.0 + correction_factor);
577
578 Ok(mitigated_value)
579 } else {
580 Err(SimulatorError::InvalidConfiguration(
581 "Deep learning model not initialized".to_string(),
582 ))
583 }
584 }
585
586 fn apply_ensemble_mitigation(
588 &self,
589 features: &Array1<f64>,
590 measurements: &Array1<f64>,
591 circuit: &InterfaceCircuit,
592 ) -> Result<f64> {
593 if let Some(ref ensemble) = self.ensemble {
594 let mut predictions = Vec::new();
595
596 for model in &ensemble.models {
598 let prediction = model.mitigate(measurements, circuit)?;
599 predictions.push(prediction);
600 }
601
602 let mitigated_value = match ensemble.combination_strategy {
604 EnsembleStrategy::WeightedAverage => {
605 let weighted_sum: f64 = predictions
606 .iter()
607 .zip(ensemble.weights.iter())
608 .map(|(pred, weight)| pred * weight)
609 .sum();
610 weighted_sum
611 }
612 EnsembleStrategy::MajorityVoting => {
613 let mut sorted_predictions = predictions.clone();
615 sorted_predictions.sort_by(|a, b| a.partial_cmp(b).unwrap());
616 sorted_predictions[sorted_predictions.len() / 2]
617 }
618 _ => {
619 predictions.iter().sum::<f64>() / predictions.len() as f64
621 }
622 };
623
624 Ok(mitigated_value)
625 } else {
626 Ok(measurements.mean().unwrap_or(0.0))
628 }
629 }
630
631 pub fn apply_traditional_mitigation(
633 &self,
634 strategy: MitigationAction,
635 measurements: &Array1<f64>,
636 _circuit: &InterfaceCircuit,
637 ) -> Result<f64> {
638 match strategy {
639 MitigationAction::ZeroNoiseExtrapolation => {
640 let noise_factors = [1.0, 1.5, 2.0];
642 let values: Vec<f64> = noise_factors
643 .iter()
644 .zip(measurements.iter())
645 .map(|(factor, &val)| val / factor)
646 .collect();
647
648 let extrapolated = 2.0 * values[0] - values[1];
650 Ok(extrapolated)
651 }
652 MitigationAction::VirtualDistillation => {
653 let mean_val = measurements.mean().unwrap_or(0.0);
655 let variance = measurements.var(0.0);
656 let corrected = mean_val + variance * 0.1; Ok(corrected)
658 }
659 _ => {
660 Ok(measurements.mean().unwrap_or(0.0))
662 }
663 }
664 }
665
666 fn forward_pass_static(
668 model: &DeepMitigationNetwork,
669 input: &Array1<f64>,
670 ) -> Result<Array1<f64>> {
671 let mut current = input.clone();
672
673 for (weights, bias) in model.weights.iter().zip(model.biases.iter()) {
674 current = weights.dot(¤t) + bias;
676
677 current.mapv_inplace(|x| Self::apply_activation_static(x, model.activation));
679 }
680
681 Ok(current)
682 }
683
684 fn apply_activation_static(x: f64, activation: ActivationFunction) -> f64 {
686 match activation {
687 ActivationFunction::ReLU => x.max(0.0),
688 ActivationFunction::Sigmoid => 1.0 / (1.0 + (-x).exp()),
689 ActivationFunction::Tanh => x.tanh(),
690 ActivationFunction::Swish => x * (1.0 / (1.0 + (-x).exp())),
691 ActivationFunction::GELU => {
692 0.5 * x
693 * (1.0
694 + ((2.0 / std::f64::consts::PI).sqrt() * (x + 0.044715 * x.powi(3))).tanh())
695 }
696 }
697 }
698
699 pub fn apply_activation(&self, x: f64, activation: ActivationFunction) -> f64 {
701 Self::apply_activation_static(x, activation)
702 }
703
704 pub fn forward_pass(
706 &self,
707 model: &DeepMitigationNetwork,
708 input: &Array1<f64>,
709 ) -> Result<Array1<f64>> {
710 Self::forward_pass_static(model, input)
711 }
712
713 fn calculate_circuit_connectivity(&self, circuit: &InterfaceCircuit) -> Result<f64> {
715 if circuit.num_qubits == 0 {
716 return Ok(0.0);
717 }
718
719 let mut connectivity_sum = 0.0;
720 let total_possible_connections = (circuit.num_qubits * (circuit.num_qubits - 1)) / 2;
721
722 for gate in &circuit.gates {
723 if gate.qubits.len() > 1 {
724 connectivity_sum += 1.0;
725 }
726 }
727
728 Ok(connectivity_sum / total_possible_connections as f64)
729 }
730
731 fn calculate_entanglement_estimate(&self, circuit: &InterfaceCircuit) -> Result<f64> {
733 let mut entangling_gates = 0;
734
735 for gate in &circuit.gates {
736 match gate.gate_type {
737 InterfaceGateType::CNOT
738 | InterfaceGateType::CZ
739 | InterfaceGateType::CY
740 | InterfaceGateType::SWAP
741 | InterfaceGateType::ISwap
742 | InterfaceGateType::Toffoli => {
743 entangling_gates += 1;
744 }
745 _ => {}
746 }
747 }
748
749 Ok(entangling_gates as f64 / circuit.gates.len() as f64)
750 }
751
752 fn features_to_state_key(features: &Array1<f64>) -> String {
754 let discretized: Vec<i32> = features
756 .iter()
757 .map(|&x| (x * 10.0).round() as i32)
758 .collect();
759 format!("{:?}", discretized)
760 }
761
762 fn calculate_confidence(&self, features: &Array1<f64>, _mitigated_value: f64) -> Result<f64> {
764 let feature_variance = features.var(0.0);
766 let confidence = 1.0 / (1.0 + feature_variance);
767 Ok(confidence.min(1.0).max(0.0))
768 }
769
770 fn estimate_error_reduction(&self, original: &Array1<f64>, mitigated: f64) -> Result<f64> {
772 let original_mean = original.mean().unwrap_or(0.0);
773 let original_variance = original.var(0.0);
774
775 let estimated_improvement = (original_variance.sqrt() - (mitigated - original_mean).abs())
777 / original_variance.sqrt();
778 Ok(estimated_improvement.max(0.0).min(1.0))
779 }
780
781 fn update_models(&mut self, features: &Array1<f64>, target: f64) -> Result<()> {
783 if self.training_history.len() >= self.config.memory_size {
785 self.training_history.pop_front();
786 }
787 self.training_history.push_back((features.clone(), target));
788
789 if self.training_history.len() >= self.config.batch_size {
791 self.update_deep_model()?;
792 }
793
794 self.update_rl_agent(features, target)?;
796
797 Ok(())
798 }
799
800 fn update_deep_model(&mut self) -> Result<()> {
802 if let Some(ref mut model) = self.deep_model {
803 let batch_size = self.config.batch_size.min(self.training_history.len());
807 let batch: Vec<_> = self
808 .training_history
809 .iter()
810 .rev()
811 .take(batch_size)
812 .collect();
813
814 let mut total_loss = 0.0;
815
816 for (features, target) in batch {
817 let prediction = Self::forward_pass_static(model, features)?;
818 let loss = (prediction[0] - target).powi(2);
819 total_loss += loss;
820 }
821
822 let avg_loss = total_loss / batch_size as f64;
823 model.loss_history.push(avg_loss);
824 }
825
826 Ok(())
827 }
828
829 fn update_rl_agent(&mut self, features: &Array1<f64>, reward: f64) -> Result<()> {
831 if let Some(ref mut agent) = self.rl_agent {
832 let state_key = Self::features_to_state_key(features);
833
834 agent.stats.episodes += 1;
838 agent.stats.avg_reward = (agent.stats.avg_reward * (agent.stats.episodes - 1) as f64
839 + reward)
840 / agent.stats.episodes as f64;
841
842 agent.exploration_rate *= 0.995;
844 agent.exploration_rate = agent.exploration_rate.max(0.01);
845 }
846
847 Ok(())
848 }
849}
850
851pub fn benchmark_advanced_ml_error_mitigation() -> Result<()> {
853 println!("Benchmarking Advanced ML Error Mitigation...");
854
855 let config = AdvancedMLMitigationConfig::default();
856 let mut mitigator = AdvancedMLErrorMitigator::new(config)?;
857
858 let mut circuit = InterfaceCircuit::new(4, 0);
860 circuit.add_gate(InterfaceGate::new(InterfaceGateType::Hadamard, vec![0]));
861 circuit.add_gate(InterfaceGate::new(InterfaceGateType::CNOT, vec![0, 1]));
862 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RZ(0.5), vec![2]));
863
864 let noisy_measurements = Array1::from_vec(vec![0.48, 0.52, 0.47, 0.53, 0.49]);
866
867 let start_time = std::time::Instant::now();
868
869 let result = mitigator.mitigate_errors(&noisy_measurements, &circuit)?;
871
872 let duration = start_time.elapsed();
873
874 println!("✅ Advanced ML Error Mitigation Results:");
875 println!(" Mitigated Value: {:.6}", result.mitigated_value);
876 println!(" Confidence: {:.4}", result.confidence);
877 println!(" Model Used: {}", result.model_used);
878 println!(" Error Reduction: {:.4}", result.error_reduction);
879 println!(" Computation Time: {:.2}ms", duration.as_millis());
880
881 Ok(())
882}
883
884#[cfg(test)]
885mod tests {
886 use super::*;
887
888 #[test]
889 fn test_advanced_ml_mitigator_creation() {
890 let config = AdvancedMLMitigationConfig::default();
891 let mitigator = AdvancedMLErrorMitigator::new(config);
892 assert!(mitigator.is_ok());
893 }
894
895 #[test]
896 fn test_feature_extraction() {
897 let config = AdvancedMLMitigationConfig::default();
898 let mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
899
900 let mut circuit = InterfaceCircuit::new(2, 0);
901 circuit.add_gate(InterfaceGate::new(InterfaceGateType::Hadamard, vec![0]));
902 circuit.add_gate(InterfaceGate::new(InterfaceGateType::CNOT, vec![0, 1]));
903
904 let measurements = Array1::from_vec(vec![0.5, 0.5, 0.5]);
905 let features = mitigator.extract_features(&circuit, &measurements);
906
907 assert!(features.is_ok());
908 let features = features.unwrap();
909 assert!(features.len() > 0);
910 }
911
912 #[test]
913 fn test_activation_functions() {
914 let config = AdvancedMLMitigationConfig::default();
915 let mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
916
917 assert_eq!(
919 mitigator.apply_activation(-1.0, ActivationFunction::ReLU),
920 0.0
921 );
922 assert_eq!(
923 mitigator.apply_activation(1.0, ActivationFunction::ReLU),
924 1.0
925 );
926
927 let sigmoid_result = mitigator.apply_activation(0.0, ActivationFunction::Sigmoid);
929 assert!((sigmoid_result - 0.5).abs() < 1e-10);
930 }
931
932 #[test]
933 fn test_mitigation_strategy_selection() {
934 let config = AdvancedMLMitigationConfig::default();
935 let mut mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
936
937 let features = Array1::from_vec(vec![1.0, 2.0, 3.0]);
938 let strategy = mitigator.select_mitigation_strategy(&features);
939
940 assert!(strategy.is_ok());
941 }
942
943 #[test]
944 fn test_traditional_mitigation() {
945 let config = AdvancedMLMitigationConfig::default();
946 let mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
947
948 let measurements = Array1::from_vec(vec![0.48, 0.52, 0.49]);
949 let circuit = InterfaceCircuit::new(2, 0);
950
951 let result = mitigator.apply_traditional_mitigation(
952 MitigationAction::ZeroNoiseExtrapolation,
953 &measurements,
954 &circuit,
955 );
956
957 assert!(result.is_ok());
958 }
959}