1use scirs2_core::ndarray::{Array1, Array2, Array3};
19use scirs2_core::random::{thread_rng, Rng};
20use serde::{Deserialize, Serialize};
21use std::collections::{HashMap, VecDeque};
22
23use crate::circuit_interfaces::{InterfaceCircuit, InterfaceGate, InterfaceGateType};
24use crate::error::{Result, SimulatorError};
25use scirs2_core::random::prelude::*;
26
27#[derive(Debug, Clone)]
29pub struct AdvancedMLMitigationConfig {
30 pub enable_deep_learning: bool,
32 pub enable_reinforcement_learning: bool,
34 pub enable_transfer_learning: bool,
36 pub enable_adversarial_training: bool,
38 pub enable_ensemble_methods: bool,
40 pub enable_online_learning: bool,
42 pub learning_rate: f64,
44 pub batch_size: usize,
46 pub memory_size: usize,
48 pub exploration_rate: f64,
50 pub transfer_alpha: f64,
52 pub ensemble_size: usize,
54}
55
56impl Default for AdvancedMLMitigationConfig {
57 fn default() -> Self {
58 Self {
59 enable_deep_learning: true,
60 enable_reinforcement_learning: true,
61 enable_transfer_learning: false,
62 enable_adversarial_training: false,
63 enable_ensemble_methods: true,
64 enable_online_learning: true,
65 learning_rate: 0.001,
66 batch_size: 64,
67 memory_size: 10000,
68 exploration_rate: 0.1,
69 transfer_alpha: 0.5,
70 ensemble_size: 5,
71 }
72 }
73}
74
75#[derive(Debug, Clone)]
77pub struct DeepMitigationNetwork {
78 pub layers: Vec<usize>,
80 pub weights: Vec<Array2<f64>>,
82 pub biases: Vec<Array1<f64>>,
84 pub activation: ActivationFunction,
86 pub loss_history: Vec<f64>,
88}
89
90#[derive(Debug, Clone, Copy, PartialEq, Eq)]
92pub enum ActivationFunction {
93 ReLU,
94 Sigmoid,
95 Tanh,
96 Swish,
97 GELU,
98}
99
100#[derive(Debug, Clone)]
102pub struct QLearningMitigationAgent {
103 pub q_table: HashMap<String, HashMap<MitigationAction, f64>>,
105 pub learning_rate: f64,
107 pub discount_factor: f64,
109 pub exploration_rate: f64,
111 pub experience_buffer: VecDeque<Experience>,
113 pub stats: RLTrainingStats,
115}
116
117#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
119pub enum MitigationAction {
120 ZeroNoiseExtrapolation,
121 VirtualDistillation,
122 SymmetryVerification,
123 PauliTwirling,
124 RandomizedCompiling,
125 ClusterExpansion,
126 MachineLearningPrediction,
127 EnsembleMitigation,
128}
129
130#[derive(Debug, Clone)]
132pub struct Experience {
133 pub state: Array1<f64>,
135 pub action: MitigationAction,
137 pub reward: f64,
139 pub next_state: Array1<f64>,
141 pub done: bool,
143}
144
145#[derive(Debug, Clone, Default)]
147pub struct RLTrainingStats {
148 pub episodes: usize,
150 pub avg_reward: f64,
152 pub success_rate: f64,
154 pub exploration_decay: f64,
156 pub loss_convergence: Vec<f64>,
158}
159
160#[derive(Debug, Clone)]
162pub struct TransferLearningModel {
163 pub source_device: DeviceCharacteristics,
165 pub target_device: DeviceCharacteristics,
167 pub feature_extractor: DeepMitigationNetwork,
169 pub device_heads: HashMap<String, DeepMitigationNetwork>,
171 pub transfer_alpha: f64,
173 pub adaptation_stats: TransferStats,
175}
176
177#[derive(Debug, Clone)]
179pub struct DeviceCharacteristics {
180 pub device_id: String,
182 pub gate_errors: HashMap<String, f64>,
184 pub coherence_times: HashMap<String, f64>,
186 pub connectivity: Array2<bool>,
188 pub noise_correlations: Array2<f64>,
190}
191
192#[derive(Debug, Clone, Default)]
194pub struct TransferStats {
195 pub adaptation_loss: f64,
197 pub source_performance: f64,
199 pub target_performance: f64,
201 pub transfer_efficiency: f64,
203}
204
205pub struct EnsembleMitigation {
207 pub models: Vec<Box<dyn MitigationModel>>,
209 pub weights: Array1<f64>,
211 pub combination_strategy: EnsembleStrategy,
213 pub performance_history: Vec<f64>,
215}
216
217#[derive(Debug, Clone, Copy, PartialEq, Eq)]
219pub enum EnsembleStrategy {
220 WeightedAverage,
222 MajorityVoting,
224 Stacking,
226 DynamicSelection,
228 BayesianAveraging,
230}
231
232pub trait MitigationModel: Send + Sync {
234 fn mitigate(&self, measurements: &Array1<f64>, circuit: &InterfaceCircuit) -> Result<f64>;
236
237 fn update(&mut self, training_data: &[(Array1<f64>, f64)]) -> Result<()>;
239
240 fn confidence(&self) -> f64;
242
243 fn name(&self) -> String;
245}
246
247#[derive(Debug, Clone, Serialize, Deserialize)]
249pub struct AdvancedMLMitigationResult {
250 pub mitigated_value: f64,
252 pub confidence: f64,
254 pub model_used: String,
256 pub raw_measurements: Vec<f64>,
258 pub overhead: f64,
260 pub error_reduction: f64,
262 pub performance_metrics: PerformanceMetrics,
264}
265
266#[derive(Debug, Clone, Default, Serialize, Deserialize)]
268pub struct PerformanceMetrics {
269 pub mae: f64,
271 pub rmse: f64,
273 pub r_squared: f64,
275 pub bias: f64,
277 pub variance: f64,
279 pub computation_time_ms: f64,
281}
282
283#[derive(Debug, Clone)]
285pub struct GraphMitigationNetwork {
286 pub node_features: Array2<f64>,
288 pub edge_features: Array3<f64>,
290 pub attention_weights: Array2<f64>,
292 pub conv_layers: Vec<GraphConvLayer>,
294 pub pooling: GraphPooling,
296}
297
298#[derive(Debug, Clone)]
300pub struct GraphConvLayer {
301 pub weights: Array2<f64>,
303 pub bias: Array1<f64>,
305 pub activation: ActivationFunction,
307}
308
309#[derive(Debug, Clone, Copy, PartialEq, Eq)]
311pub enum GraphPooling {
312 Mean,
313 Max,
314 Sum,
315 Attention,
316 Set2Set,
317}
318
319pub struct AdvancedMLErrorMitigator {
321 config: AdvancedMLMitigationConfig,
323 deep_model: Option<DeepMitigationNetwork>,
325 rl_agent: Option<QLearningMitigationAgent>,
327 transfer_model: Option<TransferLearningModel>,
329 ensemble: Option<EnsembleMitigation>,
331 graph_model: Option<GraphMitigationNetwork>,
333 training_history: VecDeque<(Array1<f64>, f64)>,
335 performance_tracker: PerformanceTracker,
337}
338
339#[derive(Debug, Clone, Default)]
341pub struct PerformanceTracker {
342 pub accuracy_history: HashMap<String, Vec<f64>>,
344 pub cost_history: HashMap<String, Vec<f64>>,
346 pub error_reduction_history: Vec<f64>,
348 pub best_models: HashMap<String, String>,
350}
351
352impl AdvancedMLErrorMitigator {
353 pub fn new(config: AdvancedMLMitigationConfig) -> Result<Self> {
355 let mut mitigator = Self {
356 config: config.clone(),
357 deep_model: None,
358 rl_agent: None,
359 transfer_model: None,
360 ensemble: None,
361 graph_model: None,
362 training_history: VecDeque::with_capacity(config.memory_size),
363 performance_tracker: PerformanceTracker::default(),
364 };
365
366 if config.enable_deep_learning {
368 mitigator.deep_model = Some(mitigator.create_deep_model()?);
369 }
370
371 if config.enable_reinforcement_learning {
372 mitigator.rl_agent = Some(mitigator.create_rl_agent()?);
373 }
374
375 if config.enable_ensemble_methods {
376 mitigator.ensemble = Some(mitigator.create_ensemble()?);
377 }
378
379 Ok(mitigator)
380 }
381
382 pub fn mitigate_errors(
384 &mut self,
385 measurements: &Array1<f64>,
386 circuit: &InterfaceCircuit,
387 ) -> Result<AdvancedMLMitigationResult> {
388 let start_time = std::time::Instant::now();
389
390 let features = self.extract_features(circuit, measurements)?;
392
393 let strategy = self.select_mitigation_strategy(&features)?;
395
396 let mitigated_value = match strategy {
398 MitigationAction::MachineLearningPrediction => {
399 self.apply_deep_learning_mitigation(&features, measurements)?
400 }
401 MitigationAction::EnsembleMitigation => {
402 self.apply_ensemble_mitigation(&features, measurements, circuit)?
403 }
404 _ => {
405 self.apply_traditional_mitigation(strategy, measurements, circuit)?
407 }
408 };
409
410 let confidence = self.calculate_confidence(&features, mitigated_value)?;
412 let error_reduction = self.estimate_error_reduction(measurements, mitigated_value)?;
413
414 let computation_time = start_time.elapsed().as_millis() as f64;
415
416 self.update_models(&features, mitigated_value)?;
418
419 Ok(AdvancedMLMitigationResult {
420 mitigated_value,
421 confidence,
422 model_used: format!("{strategy:?}"),
423 raw_measurements: measurements.to_vec(),
424 overhead: computation_time / 1000.0, error_reduction,
426 performance_metrics: PerformanceMetrics {
427 computation_time_ms: computation_time,
428 ..Default::default()
429 },
430 })
431 }
432
433 pub fn create_deep_model(&self) -> Result<DeepMitigationNetwork> {
435 let layers = vec![18, 128, 64, 32, 1]; let mut weights = Vec::new();
437 let mut biases = Vec::new();
438
439 for i in 0..layers.len() - 1 {
441 let fan_in = layers[i];
442 let fan_out = layers[i + 1];
443 let limit = (6.0 / (fan_in + fan_out) as f64).sqrt();
444
445 let w =
446 Array2::from_shape_fn((fan_out, fan_in), |_| thread_rng().gen_range(-limit..limit));
447 let b = Array1::zeros(fan_out);
448
449 weights.push(w);
450 biases.push(b);
451 }
452
453 Ok(DeepMitigationNetwork {
454 layers,
455 weights,
456 biases,
457 activation: ActivationFunction::ReLU,
458 loss_history: Vec::new(),
459 })
460 }
461
462 pub fn create_rl_agent(&self) -> Result<QLearningMitigationAgent> {
464 Ok(QLearningMitigationAgent {
465 q_table: HashMap::new(),
466 learning_rate: self.config.learning_rate,
467 discount_factor: 0.95,
468 exploration_rate: self.config.exploration_rate,
469 experience_buffer: VecDeque::with_capacity(self.config.memory_size),
470 stats: RLTrainingStats::default(),
471 })
472 }
473
474 fn create_ensemble(&self) -> Result<EnsembleMitigation> {
476 let models: Vec<Box<dyn MitigationModel>> = Vec::new();
477 let weights = Array1::ones(self.config.ensemble_size) / self.config.ensemble_size as f64;
478
479 Ok(EnsembleMitigation {
480 models,
481 weights,
482 combination_strategy: EnsembleStrategy::WeightedAverage,
483 performance_history: Vec::new(),
484 })
485 }
486
487 pub fn extract_features(
489 &self,
490 circuit: &InterfaceCircuit,
491 measurements: &Array1<f64>,
492 ) -> Result<Array1<f64>> {
493 let mut features = Vec::new();
494
495 features.push(circuit.gates.len() as f64); features.push(circuit.num_qubits as f64); let mut gate_counts = HashMap::new();
501 for gate in &circuit.gates {
502 *gate_counts
503 .entry(format!("{:?}", gate.gate_type))
504 .or_insert(0) += 1;
505 }
506
507 let total_gates = circuit.gates.len() as f64;
509 for gate_type in [
510 "PauliX", "PauliY", "PauliZ", "Hadamard", "CNOT", "CZ", "RX", "RY", "RZ", "Phase",
511 ] {
512 let count = gate_counts.get(gate_type).unwrap_or(&0);
513 features.push(*count as f64 / total_gates);
514 }
515
516 features.push(measurements.mean().unwrap_or(0.0));
518 features.push(measurements.std(0.0));
519 features.push(measurements.var(0.0));
520 features.push(measurements.len() as f64);
521
522 features.push(self.calculate_circuit_connectivity(circuit)?);
524 features.push(self.calculate_entanglement_estimate(circuit)?);
525
526 Ok(Array1::from_vec(features))
527 }
528
529 pub fn select_mitigation_strategy(
531 &mut self,
532 features: &Array1<f64>,
533 ) -> Result<MitigationAction> {
534 if let Some(ref mut agent) = self.rl_agent {
535 let state_key = Self::features_to_state_key(features);
536
537 if thread_rng().gen::<f64>() < agent.exploration_rate {
539 let actions = [
541 MitigationAction::ZeroNoiseExtrapolation,
542 MitigationAction::VirtualDistillation,
543 MitigationAction::MachineLearningPrediction,
544 MitigationAction::EnsembleMitigation,
545 ];
546 Ok(actions[thread_rng().gen_range(0..actions.len())])
547 } else {
548 let q_values = agent.q_table.get(&state_key).cloned().unwrap_or_default();
550
551 let best_action = q_values
552 .iter()
553 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
554 .map_or(
555 MitigationAction::MachineLearningPrediction,
556 |(action, _)| *action,
557 );
558
559 Ok(best_action)
560 }
561 } else {
562 Ok(MitigationAction::MachineLearningPrediction)
564 }
565 }
566
567 fn apply_deep_learning_mitigation(
569 &self,
570 features: &Array1<f64>,
571 measurements: &Array1<f64>,
572 ) -> Result<f64> {
573 if let Some(ref model) = self.deep_model {
574 let prediction = Self::forward_pass_static(model, features)?;
575
576 let correction_factor = prediction[0];
578 let mitigated_value = measurements.mean().unwrap_or(0.0) * (1.0 + correction_factor);
579
580 Ok(mitigated_value)
581 } else {
582 Err(SimulatorError::InvalidConfiguration(
583 "Deep learning model not initialized".to_string(),
584 ))
585 }
586 }
587
588 fn apply_ensemble_mitigation(
590 &self,
591 features: &Array1<f64>,
592 measurements: &Array1<f64>,
593 circuit: &InterfaceCircuit,
594 ) -> Result<f64> {
595 if let Some(ref ensemble) = self.ensemble {
596 let mut predictions = Vec::new();
597
598 for model in &ensemble.models {
600 let prediction = model.mitigate(measurements, circuit)?;
601 predictions.push(prediction);
602 }
603
604 let mitigated_value = match ensemble.combination_strategy {
606 EnsembleStrategy::WeightedAverage => {
607 let weighted_sum: f64 = predictions
608 .iter()
609 .zip(ensemble.weights.iter())
610 .map(|(pred, weight)| pred * weight)
611 .sum();
612 weighted_sum
613 }
614 EnsembleStrategy::MajorityVoting => {
615 let mut sorted_predictions = predictions.clone();
617 sorted_predictions.sort_by(|a, b| a.partial_cmp(b).unwrap());
618 sorted_predictions[sorted_predictions.len() / 2]
619 }
620 _ => {
621 predictions.iter().sum::<f64>() / predictions.len() as f64
623 }
624 };
625
626 Ok(mitigated_value)
627 } else {
628 Ok(measurements.mean().unwrap_or(0.0))
630 }
631 }
632
633 pub fn apply_traditional_mitigation(
635 &self,
636 strategy: MitigationAction,
637 measurements: &Array1<f64>,
638 _circuit: &InterfaceCircuit,
639 ) -> Result<f64> {
640 match strategy {
641 MitigationAction::ZeroNoiseExtrapolation => {
642 let noise_factors = [1.0, 1.5, 2.0];
644 let values: Vec<f64> = noise_factors
645 .iter()
646 .zip(measurements.iter())
647 .map(|(factor, &val)| val / factor)
648 .collect();
649
650 let extrapolated = 2.0f64.mul_add(values[0], -values[1]);
652 Ok(extrapolated)
653 }
654 MitigationAction::VirtualDistillation => {
655 let mean_val = measurements.mean().unwrap_or(0.0);
657 let variance = measurements.var(0.0);
658 let corrected = mean_val + variance * 0.1; Ok(corrected)
660 }
661 _ => {
662 Ok(measurements.mean().unwrap_or(0.0))
664 }
665 }
666 }
667
668 fn forward_pass_static(
670 model: &DeepMitigationNetwork,
671 input: &Array1<f64>,
672 ) -> Result<Array1<f64>> {
673 let mut current = input.clone();
674
675 for (weights, bias) in model.weights.iter().zip(model.biases.iter()) {
676 current = weights.dot(¤t) + bias;
678
679 current.mapv_inplace(|x| Self::apply_activation_static(x, model.activation));
681 }
682
683 Ok(current)
684 }
685
686 fn apply_activation_static(x: f64, activation: ActivationFunction) -> f64 {
688 match activation {
689 ActivationFunction::ReLU => x.max(0.0),
690 ActivationFunction::Sigmoid => 1.0 / (1.0 + (-x).exp()),
691 ActivationFunction::Tanh => x.tanh(),
692 ActivationFunction::Swish => x * (1.0 / (1.0 + (-x).exp())),
693 ActivationFunction::GELU => {
694 0.5 * x
695 * (1.0
696 + ((2.0 / std::f64::consts::PI).sqrt()
697 * 0.044_715f64.mul_add(x.powi(3), x))
698 .tanh())
699 }
700 }
701 }
702
703 pub fn apply_activation(&self, x: f64, activation: ActivationFunction) -> f64 {
705 Self::apply_activation_static(x, activation)
706 }
707
708 pub fn forward_pass(
710 &self,
711 model: &DeepMitigationNetwork,
712 input: &Array1<f64>,
713 ) -> Result<Array1<f64>> {
714 Self::forward_pass_static(model, input)
715 }
716
717 fn calculate_circuit_connectivity(&self, circuit: &InterfaceCircuit) -> Result<f64> {
719 if circuit.num_qubits == 0 {
720 return Ok(0.0);
721 }
722
723 let mut connectivity_sum = 0.0;
724 let total_possible_connections = (circuit.num_qubits * (circuit.num_qubits - 1)) / 2;
725
726 for gate in &circuit.gates {
727 if gate.qubits.len() > 1 {
728 connectivity_sum += 1.0;
729 }
730 }
731
732 Ok(connectivity_sum / total_possible_connections as f64)
733 }
734
735 fn calculate_entanglement_estimate(&self, circuit: &InterfaceCircuit) -> Result<f64> {
737 let mut entangling_gates = 0;
738
739 for gate in &circuit.gates {
740 match gate.gate_type {
741 InterfaceGateType::CNOT
742 | InterfaceGateType::CZ
743 | InterfaceGateType::CY
744 | InterfaceGateType::SWAP
745 | InterfaceGateType::ISwap
746 | InterfaceGateType::Toffoli => {
747 entangling_gates += 1;
748 }
749 _ => {}
750 }
751 }
752
753 Ok(entangling_gates as f64 / circuit.gates.len() as f64)
754 }
755
756 fn features_to_state_key(features: &Array1<f64>) -> String {
758 let discretized: Vec<i32> = features
760 .iter()
761 .map(|&x| (x * 10.0).round() as i32)
762 .collect();
763 format!("{discretized:?}")
764 }
765
766 fn calculate_confidence(&self, features: &Array1<f64>, _mitigated_value: f64) -> Result<f64> {
768 let feature_variance = features.var(0.0);
770 let confidence = 1.0 / (1.0 + feature_variance);
771 Ok(confidence.min(1.0).max(0.0))
772 }
773
774 fn estimate_error_reduction(&self, original: &Array1<f64>, mitigated: f64) -> Result<f64> {
776 let original_mean = original.mean().unwrap_or(0.0);
777 let original_variance = original.var(0.0);
778
779 let estimated_improvement = (original_variance.sqrt() - (mitigated - original_mean).abs())
781 / original_variance.sqrt();
782 Ok(estimated_improvement.max(0.0).min(1.0))
783 }
784
785 fn update_models(&mut self, features: &Array1<f64>, target: f64) -> Result<()> {
787 if self.training_history.len() >= self.config.memory_size {
789 self.training_history.pop_front();
790 }
791 self.training_history.push_back((features.clone(), target));
792
793 if self.training_history.len() >= self.config.batch_size {
795 self.update_deep_model()?;
796 }
797
798 self.update_rl_agent(features, target)?;
800
801 Ok(())
802 }
803
804 fn update_deep_model(&mut self) -> Result<()> {
806 if let Some(ref mut model) = self.deep_model {
807 let batch_size = self.config.batch_size.min(self.training_history.len());
811 let batch: Vec<_> = self
812 .training_history
813 .iter()
814 .rev()
815 .take(batch_size)
816 .collect();
817
818 let mut total_loss = 0.0;
819
820 for (features, target) in batch {
821 let prediction = Self::forward_pass_static(model, features)?;
822 let loss = (prediction[0] - target).powi(2);
823 total_loss += loss;
824 }
825
826 let avg_loss = total_loss / batch_size as f64;
827 model.loss_history.push(avg_loss);
828 }
829
830 Ok(())
831 }
832
833 fn update_rl_agent(&mut self, features: &Array1<f64>, reward: f64) -> Result<()> {
835 if let Some(ref mut agent) = self.rl_agent {
836 let state_key = Self::features_to_state_key(features);
837
838 agent.stats.episodes += 1;
842 agent.stats.avg_reward = agent
843 .stats
844 .avg_reward
845 .mul_add((agent.stats.episodes - 1) as f64, reward)
846 / agent.stats.episodes as f64;
847
848 agent.exploration_rate *= 0.995;
850 agent.exploration_rate = agent.exploration_rate.max(0.01);
851 }
852
853 Ok(())
854 }
855}
856
857pub fn benchmark_advanced_ml_error_mitigation() -> Result<()> {
859 println!("Benchmarking Advanced ML Error Mitigation...");
860
861 let config = AdvancedMLMitigationConfig::default();
862 let mut mitigator = AdvancedMLErrorMitigator::new(config)?;
863
864 let mut circuit = InterfaceCircuit::new(4, 0);
866 circuit.add_gate(InterfaceGate::new(InterfaceGateType::Hadamard, vec![0]));
867 circuit.add_gate(InterfaceGate::new(InterfaceGateType::CNOT, vec![0, 1]));
868 circuit.add_gate(InterfaceGate::new(InterfaceGateType::RZ(0.5), vec![2]));
869
870 let noisy_measurements = Array1::from_vec(vec![0.48, 0.52, 0.47, 0.53, 0.49]);
872
873 let start_time = std::time::Instant::now();
874
875 let result = mitigator.mitigate_errors(&noisy_measurements, &circuit)?;
877
878 let duration = start_time.elapsed();
879
880 println!("✅ Advanced ML Error Mitigation Results:");
881 println!(" Mitigated Value: {:.6}", result.mitigated_value);
882 println!(" Confidence: {:.4}", result.confidence);
883 println!(" Model Used: {}", result.model_used);
884 println!(" Error Reduction: {:.4}", result.error_reduction);
885 println!(" Computation Time: {:.2}ms", duration.as_millis());
886
887 Ok(())
888}
889
890#[cfg(test)]
891mod tests {
892 use super::*;
893
894 #[test]
895 fn test_advanced_ml_mitigator_creation() {
896 let config = AdvancedMLMitigationConfig::default();
897 let mitigator = AdvancedMLErrorMitigator::new(config);
898 assert!(mitigator.is_ok());
899 }
900
901 #[test]
902 fn test_feature_extraction() {
903 let config = AdvancedMLMitigationConfig::default();
904 let mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
905
906 let mut circuit = InterfaceCircuit::new(2, 0);
907 circuit.add_gate(InterfaceGate::new(InterfaceGateType::Hadamard, vec![0]));
908 circuit.add_gate(InterfaceGate::new(InterfaceGateType::CNOT, vec![0, 1]));
909
910 let measurements = Array1::from_vec(vec![0.5, 0.5, 0.5]);
911 let features = mitigator.extract_features(&circuit, &measurements);
912
913 assert!(features.is_ok());
914 let features = features.unwrap();
915 assert!(!features.is_empty());
916 }
917
918 #[test]
919 fn test_activation_functions() {
920 let config = AdvancedMLMitigationConfig::default();
921 let mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
922
923 assert_eq!(
925 mitigator.apply_activation(-1.0, ActivationFunction::ReLU),
926 0.0
927 );
928 assert_eq!(
929 mitigator.apply_activation(1.0, ActivationFunction::ReLU),
930 1.0
931 );
932
933 let sigmoid_result = mitigator.apply_activation(0.0, ActivationFunction::Sigmoid);
935 assert!((sigmoid_result - 0.5).abs() < 1e-10);
936 }
937
938 #[test]
939 fn test_mitigation_strategy_selection() {
940 let config = AdvancedMLMitigationConfig::default();
941 let mut mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
942
943 let features = Array1::from_vec(vec![1.0, 2.0, 3.0]);
944 let strategy = mitigator.select_mitigation_strategy(&features);
945
946 assert!(strategy.is_ok());
947 }
948
949 #[test]
950 fn test_traditional_mitigation() {
951 let config = AdvancedMLMitigationConfig::default();
952 let mitigator = AdvancedMLErrorMitigator::new(config).unwrap();
953
954 let measurements = Array1::from_vec(vec![0.48, 0.52, 0.49]);
955 let circuit = InterfaceCircuit::new(2, 0);
956
957 let result = mitigator.apply_traditional_mitigation(
958 MitigationAction::ZeroNoiseExtrapolation,
959 &measurements,
960 &circuit,
961 );
962
963 assert!(result.is_ok());
964 }
965}