QuantumExplainableAI

Struct QuantumExplainableAI 

Source
pub struct QuantumExplainableAI { /* private fields */ }
Expand description

Main quantum explainable AI engine

Implementations§

Source§

impl QuantumExplainableAI

Source

pub fn new(model: QuantumNeuralNetwork, methods: Vec<ExplanationMethod>) -> Self

Create a new quantum explainable AI instance

Examples found in repository?
examples/quantum_explainable_ai.rs (line 119)
53fn feature_attribution_demo() -> Result<()> {
54    // Create quantum model
55    let layers = vec![
56        QNNLayerType::EncodingLayer { num_features: 4 },
57        QNNLayerType::VariationalLayer { num_params: 12 },
58        QNNLayerType::EntanglementLayer {
59            connectivity: "circular".to_string(),
60        },
61        QNNLayerType::VariationalLayer { num_params: 8 },
62        QNNLayerType::MeasurementLayer {
63            measurement_basis: "computational".to_string(),
64        },
65    ];
66
67    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
68
69    println!(
70        "   Created quantum model with {} parameters",
71        model.parameters.len()
72    );
73
74    // Test different attribution methods
75    let attribution_methods = vec![
76        (
77            "Integrated Gradients",
78            ExplanationMethod::QuantumFeatureAttribution {
79                method: AttributionMethod::IntegratedGradients,
80                num_samples: 50,
81                baseline: Some(Array1::zeros(4)),
82            },
83        ),
84        (
85            "Gradient × Input",
86            ExplanationMethod::QuantumFeatureAttribution {
87                method: AttributionMethod::GradientInput,
88                num_samples: 1,
89                baseline: None,
90            },
91        ),
92        (
93            "Gradient SHAP",
94            ExplanationMethod::QuantumFeatureAttribution {
95                method: AttributionMethod::GradientSHAP,
96                num_samples: 30,
97                baseline: None,
98            },
99        ),
100        (
101            "Quantum Attribution",
102            ExplanationMethod::QuantumFeatureAttribution {
103                method: AttributionMethod::QuantumAttribution,
104                num_samples: 25,
105                baseline: None,
106            },
107        ),
108    ];
109
110    // Test input
111    let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
112
113    println!(
114        "\n   Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
115        test_input[0], test_input[1], test_input[2], test_input[3]
116    );
117
118    for (method_name, method) in attribution_methods {
119        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
120
121        // Set background data for gradient SHAP
122        let background_data = Array2::from_shape_fn((20, 4), |(_, j)| {
123            0.3f64.mul_add((j as f64 * 0.2).sin(), 0.5)
124        });
125        xai.set_background_data(background_data);
126
127        let explanation = xai.explain(&test_input)?;
128
129        if let Some(ref attributions) = explanation.feature_attributions {
130            println!("\n   {method_name} Attribution:");
131            for (i, &attr) in attributions.iter().enumerate() {
132                println!(
133                    "     Feature {}: {:+.4} {}",
134                    i,
135                    attr,
136                    if attr.abs() > 0.1 {
137                        if attr > 0.0 {
138                            "(strong positive)"
139                        } else {
140                            "(strong negative)"
141                        }
142                    } else {
143                        "(weak influence)"
144                    }
145                );
146            }
147
148            // Find most important feature
149            let max_idx = attributions
150                .iter()
151                .enumerate()
152                .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
153                .map_or(0, |(i, _)| i);
154
155            println!(
156                "     → Most important feature: Feature {} ({:.4})",
157                max_idx, attributions[max_idx]
158            );
159        }
160    }
161
162    Ok(())
163}
164
165/// Demonstrate circuit analysis and visualization
166fn circuit_analysis_demo() -> Result<()> {
167    let layers = vec![
168        QNNLayerType::EncodingLayer { num_features: 4 },
169        QNNLayerType::VariationalLayer { num_params: 6 },
170        QNNLayerType::EntanglementLayer {
171            connectivity: "full".to_string(),
172        },
173        QNNLayerType::VariationalLayer { num_params: 6 },
174        QNNLayerType::MeasurementLayer {
175            measurement_basis: "Pauli-Z".to_string(),
176        },
177    ];
178
179    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
180
181    let method = ExplanationMethod::CircuitVisualization {
182        include_measurements: true,
183        parameter_sensitivity: true,
184    };
185
186    let mut xai = QuantumExplainableAI::new(model, vec![method]);
187
188    println!("   Analyzing quantum circuit structure and parameter importance...");
189
190    let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
191    let explanation = xai.explain(&test_input)?;
192
193    if let Some(ref circuit) = explanation.circuit_explanation {
194        println!("\n   Circuit Analysis Results:");
195
196        // Parameter importance
197        println!("   Parameter Importance Scores:");
198        for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
199            if importance > 0.5 {
200                println!("     Parameter {i}: {importance:.3} (high importance)");
201            } else if importance > 0.2 {
202                println!("     Parameter {i}: {importance:.3} (medium importance)");
203            }
204        }
205
206        // Layer analysis
207        println!("\n   Layer-wise Analysis:");
208        for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
209            println!(
210                "     Layer {}: {}",
211                i,
212                format_layer_type(&layer_analysis.layer_type)
213            );
214            println!(
215                "       Information gain: {:.3}",
216                layer_analysis.information_gain
217            );
218            println!(
219                "       Entanglement generated: {:.3}",
220                layer_analysis.entanglement_generated
221            );
222
223            if layer_analysis.entanglement_generated > 0.5 {
224                println!("       → Significant entanglement layer");
225            }
226        }
227
228        // Gate contributions
229        println!("\n   Gate Contribution Analysis:");
230        for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
231            println!(
232                "     Gate {}: {} on qubits {:?}",
233                gate.gate_index, gate.gate_type, gate.qubits
234            );
235            println!("       Contribution: {:.3}", gate.contribution);
236
237            if let Some(ref params) = gate.parameters {
238                println!("       Parameters: {:.3}", params[0]);
239            }
240        }
241
242        // Critical path
243        println!("\n   Critical Path (most important parameters):");
244        print!("     ");
245        for (i, &param_idx) in circuit.critical_path.iter().enumerate() {
246            if i > 0 {
247                print!(" → ");
248            }
249            print!("P{param_idx}");
250        }
251        println!();
252
253        println!("   → This path represents the most influential quantum operations");
254    }
255
256    Ok(())
257}
258
259/// Demonstrate quantum state analysis
260fn quantum_state_demo() -> Result<()> {
261    let layers = vec![
262        QNNLayerType::EncodingLayer { num_features: 3 },
263        QNNLayerType::VariationalLayer { num_params: 9 },
264        QNNLayerType::EntanglementLayer {
265            connectivity: "circular".to_string(),
266        },
267        QNNLayerType::MeasurementLayer {
268            measurement_basis: "computational".to_string(),
269        },
270    ];
271
272    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
273
274    let method = ExplanationMethod::StateAnalysis {
275        entanglement_measures: true,
276        coherence_analysis: true,
277        superposition_analysis: true,
278    };
279
280    let mut xai = QuantumExplainableAI::new(model, vec![method]);
281
282    println!("   Analyzing quantum state properties...");
283
284    // Test different inputs to see state evolution
285    let test_inputs = [
286        Array1::from_vec(vec![0.0, 0.0, 0.0]),
287        Array1::from_vec(vec![1.0, 0.0, 0.0]),
288        Array1::from_vec(vec![0.5, 0.5, 0.5]),
289        Array1::from_vec(vec![1.0, 1.0, 1.0]),
290    ];
291
292    for (i, input) in test_inputs.iter().enumerate() {
293        println!(
294            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
295            i + 1,
296            input[0],
297            input[1],
298            input[2]
299        );
300
301        let explanation = xai.explain(input)?;
302
303        if let Some(ref state) = explanation.state_properties {
304            println!("     Quantum State Properties:");
305            println!(
306                "     - Entanglement entropy: {:.3}",
307                state.entanglement_entropy
308            );
309
310            // Coherence measures
311            for (measure_name, &value) in &state.coherence_measures {
312                println!("     - {measure_name}: {value:.3}");
313            }
314
315            // Superposition analysis
316            let max_component = state
317                .superposition_components
318                .iter()
319                .copied()
320                .fold(f64::NEG_INFINITY, f64::max);
321            println!("     - Max superposition component: {max_component:.3}");
322
323            // Measurement probabilities
324            let total_prob = state.measurement_probabilities.sum();
325            println!("     - Total measurement probability: {total_prob:.3}");
326
327            // Most likely measurement outcome
328            let most_likely = state
329                .measurement_probabilities
330                .iter()
331                .enumerate()
332                .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
333                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
334
335            println!(
336                "     - Most likely outcome: state {} with prob {:.3}",
337                most_likely.0, most_likely.1
338            );
339
340            // State fidelities
341            if let Some(highest_fidelity) = state
342                .state_fidelities
343                .values()
344                .copied()
345                .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
346            {
347                println!("     - Highest basis state fidelity: {highest_fidelity:.3}");
348            }
349
350            // Interpretation
351            if state.entanglement_entropy > 0.5 {
352                println!("     → Highly entangled state");
353            } else if state.entanglement_entropy > 0.1 {
354                println!("     → Moderately entangled state");
355            } else {
356                println!("     → Separable or weakly entangled state");
357            }
358        }
359    }
360
361    Ok(())
362}
363
364/// Demonstrate saliency mapping
365fn saliency_mapping_demo() -> Result<()> {
366    let layers = vec![
367        QNNLayerType::EncodingLayer { num_features: 4 },
368        QNNLayerType::VariationalLayer { num_params: 8 },
369        QNNLayerType::MeasurementLayer {
370            measurement_basis: "computational".to_string(),
371        },
372    ];
373
374    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
375
376    // Test different perturbation methods
377    let perturbation_methods = vec![
378        (
379            "Gaussian Noise",
380            PerturbationMethod::Gaussian { sigma: 0.1 },
381        ),
382        (
383            "Quantum Phase",
384            PerturbationMethod::QuantumPhase { magnitude: 0.2 },
385        ),
386        ("Feature Masking", PerturbationMethod::FeatureMasking),
387        (
388            "Parameter Perturbation",
389            PerturbationMethod::ParameterPerturbation { strength: 0.1 },
390        ),
391    ];
392
393    let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
394
395    println!("   Computing saliency maps with different perturbation methods...");
396    println!(
397        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
398        test_input[0], test_input[1], test_input[2], test_input[3]
399    );
400
401    for (method_name, perturbation_method) in perturbation_methods {
402        let method = ExplanationMethod::SaliencyMapping {
403            perturbation_method,
404            aggregation: AggregationMethod::Mean,
405        };
406
407        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
408        let explanation = xai.explain(&test_input)?;
409
410        if let Some(ref saliency) = explanation.saliency_map {
411            println!("\n   {method_name} Saliency Map:");
412
413            // Analyze saliency for each output
414            for output_idx in 0..saliency.ncols() {
415                println!("     Output {output_idx}:");
416                for input_idx in 0..saliency.nrows() {
417                    let saliency_score = saliency[[input_idx, output_idx]];
418                    if saliency_score > 0.1 {
419                        println!(
420                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (important)"
421                        );
422                    } else if saliency_score > 0.05 {
423                        println!(
424                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (moderate)"
425                        );
426                    }
427                }
428            }
429
430            // Find most salient feature-output pair
431            let mut max_saliency = 0.0;
432            let mut max_pair = (0, 0);
433
434            for i in 0..saliency.nrows() {
435                for j in 0..saliency.ncols() {
436                    if saliency[[i, j]] > max_saliency {
437                        max_saliency = saliency[[i, j]];
438                        max_pair = (i, j);
439                    }
440                }
441            }
442
443            println!(
444                "     → Most salient: Feature {} → Output {} ({:.3})",
445                max_pair.0, max_pair.1, max_saliency
446            );
447        }
448    }
449
450    Ok(())
451}
452
453/// Demonstrate Quantum LIME
454fn quantum_lime_demo() -> Result<()> {
455    let layers = vec![
456        QNNLayerType::EncodingLayer { num_features: 4 },
457        QNNLayerType::VariationalLayer { num_params: 10 },
458        QNNLayerType::EntanglementLayer {
459            connectivity: "circular".to_string(),
460        },
461        QNNLayerType::MeasurementLayer {
462            measurement_basis: "computational".to_string(),
463        },
464    ];
465
466    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
467
468    // Test different local models
469    let local_models = vec![
470        ("Linear Regression", LocalModelType::LinearRegression),
471        ("Decision Tree", LocalModelType::DecisionTree),
472        ("Quantum Linear", LocalModelType::QuantumLinear),
473    ];
474
475    let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
476
477    println!("   Quantum LIME: Local Interpretable Model-agnostic Explanations");
478    println!(
479        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
480        test_input[0], test_input[1], test_input[2], test_input[3]
481    );
482
483    for (model_name, local_model) in local_models {
484        let method = ExplanationMethod::QuantumLIME {
485            num_perturbations: 100,
486            kernel_width: 0.5,
487            local_model,
488        };
489
490        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
491        let explanation = xai.explain(&test_input)?;
492
493        if let Some(ref attributions) = explanation.feature_attributions {
494            println!("\n   LIME with {model_name}:");
495
496            for (i, &attr) in attributions.iter().enumerate() {
497                let impact = if attr.abs() > 0.3 {
498                    "high"
499                } else if attr.abs() > 0.1 {
500                    "medium"
501                } else {
502                    "low"
503                };
504
505                println!("     Feature {i}: {attr:+.3} ({impact} impact)");
506            }
507
508            // Local model interpretation
509            match model_name {
510                "Linear Regression" => {
511                    println!("     → Linear relationship approximation in local region");
512                }
513                "Decision Tree" => {
514                    println!("     → Rule-based approximation with thresholds");
515                }
516                "Quantum Linear" => {
517                    println!("     → Quantum-aware linear approximation");
518                }
519                _ => {}
520            }
521
522            // Compute local fidelity (simplified)
523            let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
524            println!("     → Local explanation complexity: {local_complexity:.3}");
525        }
526    }
527
528    Ok(())
529}
530
531/// Demonstrate Quantum SHAP
532fn quantum_shap_demo() -> Result<()> {
533    let layers = vec![
534        QNNLayerType::EncodingLayer { num_features: 3 },
535        QNNLayerType::VariationalLayer { num_params: 6 },
536        QNNLayerType::MeasurementLayer {
537            measurement_basis: "Pauli-Z".to_string(),
538        },
539    ];
540
541    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
542
543    let method = ExplanationMethod::QuantumSHAP {
544        num_coalitions: 100,
545        background_samples: 20,
546    };
547
548    let mut xai = QuantumExplainableAI::new(model, vec![method]);
549
550    // Set background data for SHAP
551    let background_data = Array2::from_shape_fn((50, 3), |(i, j)| {
552        0.3f64.mul_add(((i + j) as f64 * 0.1).sin(), 0.5)
553    });
554    xai.set_background_data(background_data);
555
556    println!("   Quantum SHAP: SHapley Additive exPlanations");
557
558    // Test multiple inputs
559    let test_inputs = [
560        Array1::from_vec(vec![0.1, 0.5, 0.9]),
561        Array1::from_vec(vec![0.8, 0.3, 0.6]),
562        Array1::from_vec(vec![0.4, 0.7, 0.2]),
563    ];
564
565    for (i, input) in test_inputs.iter().enumerate() {
566        println!(
567            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
568            i + 1,
569            input[0],
570            input[1],
571            input[2]
572        );
573
574        let explanation = xai.explain(input)?;
575
576        if let Some(ref shap_values) = explanation.feature_attributions {
577            println!("     SHAP Values:");
578
579            let mut total_shap = 0.0;
580            for (j, &value) in shap_values.iter().enumerate() {
581                total_shap += value;
582                println!("     - Feature {j}: {value:+.4}");
583            }
584
585            println!("     - Sum of SHAP values: {total_shap:.4}");
586
587            // Feature ranking
588            let mut indexed_shap: Vec<(usize, f64)> = shap_values
589                .iter()
590                .enumerate()
591                .map(|(idx, &val)| (idx, val.abs()))
592                .collect();
593            indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
594
595            println!("     Feature importance ranking:");
596            for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
597                let original_value = shap_values[*feature_idx];
598                println!(
599                    "     {}. Feature {}: {:.4} (|{:.4}|)",
600                    rank + 1,
601                    feature_idx,
602                    original_value,
603                    abs_value
604                );
605            }
606
607            // SHAP properties
608            println!(
609                "     → SHAP values satisfy efficiency property (sum to prediction difference)"
610            );
611            println!("     → Each value represents feature's average marginal contribution");
612        }
613    }
614
615    Ok(())
616}
617
618/// Demonstrate Layer-wise Relevance Propagation
619fn quantum_lrp_demo() -> Result<()> {
620    let layers = vec![
621        QNNLayerType::EncodingLayer { num_features: 4 },
622        QNNLayerType::VariationalLayer { num_params: 8 },
623        QNNLayerType::VariationalLayer { num_params: 6 },
624        QNNLayerType::MeasurementLayer {
625            measurement_basis: "computational".to_string(),
626        },
627    ];
628
629    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
630
631    // Test different LRP rules
632    let lrp_rules = vec![
633        ("Epsilon Rule", LRPRule::Epsilon),
634        ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
635        (
636            "Alpha-Beta Rule",
637            LRPRule::AlphaBeta {
638                alpha: 2.0,
639                beta: 1.0,
640            },
641        ),
642        ("Quantum Rule", LRPRule::QuantumRule),
643    ];
644
645    let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
646
647    println!("   Layer-wise Relevance Propagation for Quantum Circuits");
648    println!(
649        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
650        test_input[0], test_input[1], test_input[2], test_input[3]
651    );
652
653    for (rule_name, lrp_rule) in lrp_rules {
654        let method = ExplanationMethod::QuantumLRP {
655            propagation_rule: lrp_rule,
656            epsilon: 1e-6,
657        };
658
659        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
660        let explanation = xai.explain(&test_input)?;
661
662        if let Some(ref relevance) = explanation.feature_attributions {
663            println!("\n   LRP with {rule_name}:");
664
665            let total_relevance = relevance.sum();
666
667            for (i, &rel) in relevance.iter().enumerate() {
668                let percentage = if total_relevance.abs() > 1e-10 {
669                    rel / total_relevance * 100.0
670                } else {
671                    0.0
672                };
673
674                println!("     Feature {i}: {rel:.4} ({percentage:.1}% of total relevance)");
675            }
676
677            println!("     Total relevance: {total_relevance:.4}");
678
679            // Rule-specific interpretation
680            match rule_name {
681                "Epsilon Rule" => {
682                    println!("     → Distributes relevance proportionally to activations");
683                }
684                "Gamma Rule" => {
685                    println!("     → Emphasizes positive contributions");
686                }
687                "Alpha-Beta Rule" => {
688                    println!("     → Separates positive and negative contributions");
689                }
690                "Quantum Rule" => {
691                    println!("     → Accounts for quantum superposition and entanglement");
692                }
693                _ => {}
694            }
695        }
696    }
697
698    Ok(())
699}
700
701/// Comprehensive explanation demonstration
702fn comprehensive_explanation_demo() -> Result<()> {
703    let layers = vec![
704        QNNLayerType::EncodingLayer { num_features: 4 },
705        QNNLayerType::VariationalLayer { num_params: 12 },
706        QNNLayerType::EntanglementLayer {
707            connectivity: "full".to_string(),
708        },
709        QNNLayerType::VariationalLayer { num_params: 8 },
710        QNNLayerType::MeasurementLayer {
711            measurement_basis: "computational".to_string(),
712        },
713    ];
714
715    let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
716
717    // Use comprehensive explanation methods
718    let methods = vec![
719        ExplanationMethod::QuantumFeatureAttribution {
720            method: AttributionMethod::IntegratedGradients,
721            num_samples: 30,
722            baseline: Some(Array1::zeros(4)),
723        },
724        ExplanationMethod::CircuitVisualization {
725            include_measurements: true,
726            parameter_sensitivity: true,
727        },
728        ExplanationMethod::StateAnalysis {
729            entanglement_measures: true,
730            coherence_analysis: true,
731            superposition_analysis: true,
732        },
733        ExplanationMethod::ConceptActivation {
734            concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
735            activation_threshold: 0.3,
736        },
737    ];
738
739    let mut xai = QuantumExplainableAI::new(model, methods);
740
741    // Add concept vectors
742    xai.add_concept(
743        "pattern_A".to_string(),
744        Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
745    );
746    xai.add_concept(
747        "pattern_B".to_string(),
748        Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
749    );
750
751    // Set background data
752    let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
753        0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
754    });
755    xai.set_background_data(background_data);
756
757    println!("   Comprehensive Quantum Model Explanation");
758
759    // Test input representing a specific pattern
760    let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
761
762    println!(
763        "\n   Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
764        test_input[0], test_input[1], test_input[2], test_input[3]
765    );
766
767    let explanation = xai.explain(&test_input)?;
768
769    // Display comprehensive results
770    println!("\n   === COMPREHENSIVE EXPLANATION RESULTS ===");
771
772    // Feature attributions
773    if let Some(ref attributions) = explanation.feature_attributions {
774        println!("\n   Feature Attributions:");
775        for (i, &attr) in attributions.iter().enumerate() {
776            println!("   - Feature {i}: {attr:+.3}");
777        }
778    }
779
780    // Circuit analysis summary
781    if let Some(ref circuit) = explanation.circuit_explanation {
782        println!("\n   Circuit Analysis Summary:");
783        let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
784        println!("   - Average parameter importance: {avg_importance:.3}");
785        println!(
786            "   - Number of analyzed layers: {}",
787            circuit.layer_analysis.len()
788        );
789        println!("   - Critical path length: {}", circuit.critical_path.len());
790    }
791
792    // Quantum state properties
793    if let Some(ref state) = explanation.state_properties {
794        println!("\n   Quantum State Properties:");
795        println!(
796            "   - Entanglement entropy: {:.3}",
797            state.entanglement_entropy
798        );
799        println!(
800            "   - Coherence measures: {} types",
801            state.coherence_measures.len()
802        );
803
804        let max_measurement_prob = state
805            .measurement_probabilities
806            .iter()
807            .copied()
808            .fold(f64::NEG_INFINITY, f64::max);
809        println!("   - Max measurement probability: {max_measurement_prob:.3}");
810    }
811
812    // Concept activations
813    if let Some(ref concepts) = explanation.concept_activations {
814        println!("\n   Concept Activations:");
815        for (concept, &activation) in concepts {
816            let similarity = if activation > 0.7 {
817                "high"
818            } else if activation > 0.3 {
819                "medium"
820            } else {
821                "low"
822            };
823            println!("   - {concept}: {activation:.3} ({similarity} similarity)");
824        }
825    }
826
827    // Confidence scores
828    println!("\n   Explanation Confidence Scores:");
829    for (component, &confidence) in &explanation.confidence_scores {
830        println!("   - {component}: {confidence:.3}");
831    }
832
833    // Textual explanation
834    println!("\n   Generated Explanation:");
835    println!("{}", explanation.textual_explanation);
836
837    // Summary insights
838    println!("\n   === KEY INSIGHTS ===");
839
840    if let Some(ref attributions) = explanation.feature_attributions {
841        let max_attr_idx = attributions
842            .iter()
843            .enumerate()
844            .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
845            .map_or(0, |(i, _)| i);
846
847        println!(
848            "   • Most influential feature: Feature {} ({:.3})",
849            max_attr_idx, attributions[max_attr_idx]
850        );
851    }
852
853    if let Some(ref state) = explanation.state_properties {
854        if state.entanglement_entropy > 0.5 {
855            println!("   • Model creates significant quantum entanglement");
856        }
857
858        let coherence_level = state
859            .coherence_measures
860            .values()
861            .copied()
862            .fold(0.0, f64::max);
863        if coherence_level > 0.5 {
864            println!("   • High quantum coherence detected");
865        }
866    }
867
868    if let Some(ref concepts) = explanation.concept_activations {
869        if let Some((best_concept, &max_activation)) =
870            concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
871        {
872            if max_activation > 0.5 {
873                println!("   • Input strongly matches concept: {best_concept}");
874            }
875        }
876    }
877
878    println!("   • Explanation provides multi-faceted interpretation of quantum model behavior");
879
880    Ok(())
881}
Source

pub fn set_background_data(&mut self, data: Array2<f64>)

Set background data for explanations

Examples found in repository?
examples/quantum_explainable_ai.rs (line 125)
53fn feature_attribution_demo() -> Result<()> {
54    // Create quantum model
55    let layers = vec![
56        QNNLayerType::EncodingLayer { num_features: 4 },
57        QNNLayerType::VariationalLayer { num_params: 12 },
58        QNNLayerType::EntanglementLayer {
59            connectivity: "circular".to_string(),
60        },
61        QNNLayerType::VariationalLayer { num_params: 8 },
62        QNNLayerType::MeasurementLayer {
63            measurement_basis: "computational".to_string(),
64        },
65    ];
66
67    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
68
69    println!(
70        "   Created quantum model with {} parameters",
71        model.parameters.len()
72    );
73
74    // Test different attribution methods
75    let attribution_methods = vec![
76        (
77            "Integrated Gradients",
78            ExplanationMethod::QuantumFeatureAttribution {
79                method: AttributionMethod::IntegratedGradients,
80                num_samples: 50,
81                baseline: Some(Array1::zeros(4)),
82            },
83        ),
84        (
85            "Gradient × Input",
86            ExplanationMethod::QuantumFeatureAttribution {
87                method: AttributionMethod::GradientInput,
88                num_samples: 1,
89                baseline: None,
90            },
91        ),
92        (
93            "Gradient SHAP",
94            ExplanationMethod::QuantumFeatureAttribution {
95                method: AttributionMethod::GradientSHAP,
96                num_samples: 30,
97                baseline: None,
98            },
99        ),
100        (
101            "Quantum Attribution",
102            ExplanationMethod::QuantumFeatureAttribution {
103                method: AttributionMethod::QuantumAttribution,
104                num_samples: 25,
105                baseline: None,
106            },
107        ),
108    ];
109
110    // Test input
111    let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
112
113    println!(
114        "\n   Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
115        test_input[0], test_input[1], test_input[2], test_input[3]
116    );
117
118    for (method_name, method) in attribution_methods {
119        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
120
121        // Set background data for gradient SHAP
122        let background_data = Array2::from_shape_fn((20, 4), |(_, j)| {
123            0.3f64.mul_add((j as f64 * 0.2).sin(), 0.5)
124        });
125        xai.set_background_data(background_data);
126
127        let explanation = xai.explain(&test_input)?;
128
129        if let Some(ref attributions) = explanation.feature_attributions {
130            println!("\n   {method_name} Attribution:");
131            for (i, &attr) in attributions.iter().enumerate() {
132                println!(
133                    "     Feature {}: {:+.4} {}",
134                    i,
135                    attr,
136                    if attr.abs() > 0.1 {
137                        if attr > 0.0 {
138                            "(strong positive)"
139                        } else {
140                            "(strong negative)"
141                        }
142                    } else {
143                        "(weak influence)"
144                    }
145                );
146            }
147
148            // Find most important feature
149            let max_idx = attributions
150                .iter()
151                .enumerate()
152                .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
153                .map_or(0, |(i, _)| i);
154
155            println!(
156                "     → Most important feature: Feature {} ({:.4})",
157                max_idx, attributions[max_idx]
158            );
159        }
160    }
161
162    Ok(())
163}
164
165/// Demonstrate circuit analysis and visualization
166fn circuit_analysis_demo() -> Result<()> {
167    let layers = vec![
168        QNNLayerType::EncodingLayer { num_features: 4 },
169        QNNLayerType::VariationalLayer { num_params: 6 },
170        QNNLayerType::EntanglementLayer {
171            connectivity: "full".to_string(),
172        },
173        QNNLayerType::VariationalLayer { num_params: 6 },
174        QNNLayerType::MeasurementLayer {
175            measurement_basis: "Pauli-Z".to_string(),
176        },
177    ];
178
179    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
180
181    let method = ExplanationMethod::CircuitVisualization {
182        include_measurements: true,
183        parameter_sensitivity: true,
184    };
185
186    let mut xai = QuantumExplainableAI::new(model, vec![method]);
187
188    println!("   Analyzing quantum circuit structure and parameter importance...");
189
190    let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
191    let explanation = xai.explain(&test_input)?;
192
193    if let Some(ref circuit) = explanation.circuit_explanation {
194        println!("\n   Circuit Analysis Results:");
195
196        // Parameter importance
197        println!("   Parameter Importance Scores:");
198        for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
199            if importance > 0.5 {
200                println!("     Parameter {i}: {importance:.3} (high importance)");
201            } else if importance > 0.2 {
202                println!("     Parameter {i}: {importance:.3} (medium importance)");
203            }
204        }
205
206        // Layer analysis
207        println!("\n   Layer-wise Analysis:");
208        for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
209            println!(
210                "     Layer {}: {}",
211                i,
212                format_layer_type(&layer_analysis.layer_type)
213            );
214            println!(
215                "       Information gain: {:.3}",
216                layer_analysis.information_gain
217            );
218            println!(
219                "       Entanglement generated: {:.3}",
220                layer_analysis.entanglement_generated
221            );
222
223            if layer_analysis.entanglement_generated > 0.5 {
224                println!("       → Significant entanglement layer");
225            }
226        }
227
228        // Gate contributions
229        println!("\n   Gate Contribution Analysis:");
230        for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
231            println!(
232                "     Gate {}: {} on qubits {:?}",
233                gate.gate_index, gate.gate_type, gate.qubits
234            );
235            println!("       Contribution: {:.3}", gate.contribution);
236
237            if let Some(ref params) = gate.parameters {
238                println!("       Parameters: {:.3}", params[0]);
239            }
240        }
241
242        // Critical path
243        println!("\n   Critical Path (most important parameters):");
244        print!("     ");
245        for (i, &param_idx) in circuit.critical_path.iter().enumerate() {
246            if i > 0 {
247                print!(" → ");
248            }
249            print!("P{param_idx}");
250        }
251        println!();
252
253        println!("   → This path represents the most influential quantum operations");
254    }
255
256    Ok(())
257}
258
259/// Demonstrate quantum state analysis
260fn quantum_state_demo() -> Result<()> {
261    let layers = vec![
262        QNNLayerType::EncodingLayer { num_features: 3 },
263        QNNLayerType::VariationalLayer { num_params: 9 },
264        QNNLayerType::EntanglementLayer {
265            connectivity: "circular".to_string(),
266        },
267        QNNLayerType::MeasurementLayer {
268            measurement_basis: "computational".to_string(),
269        },
270    ];
271
272    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
273
274    let method = ExplanationMethod::StateAnalysis {
275        entanglement_measures: true,
276        coherence_analysis: true,
277        superposition_analysis: true,
278    };
279
280    let mut xai = QuantumExplainableAI::new(model, vec![method]);
281
282    println!("   Analyzing quantum state properties...");
283
284    // Test different inputs to see state evolution
285    let test_inputs = [
286        Array1::from_vec(vec![0.0, 0.0, 0.0]),
287        Array1::from_vec(vec![1.0, 0.0, 0.0]),
288        Array1::from_vec(vec![0.5, 0.5, 0.5]),
289        Array1::from_vec(vec![1.0, 1.0, 1.0]),
290    ];
291
292    for (i, input) in test_inputs.iter().enumerate() {
293        println!(
294            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
295            i + 1,
296            input[0],
297            input[1],
298            input[2]
299        );
300
301        let explanation = xai.explain(input)?;
302
303        if let Some(ref state) = explanation.state_properties {
304            println!("     Quantum State Properties:");
305            println!(
306                "     - Entanglement entropy: {:.3}",
307                state.entanglement_entropy
308            );
309
310            // Coherence measures
311            for (measure_name, &value) in &state.coherence_measures {
312                println!("     - {measure_name}: {value:.3}");
313            }
314
315            // Superposition analysis
316            let max_component = state
317                .superposition_components
318                .iter()
319                .copied()
320                .fold(f64::NEG_INFINITY, f64::max);
321            println!("     - Max superposition component: {max_component:.3}");
322
323            // Measurement probabilities
324            let total_prob = state.measurement_probabilities.sum();
325            println!("     - Total measurement probability: {total_prob:.3}");
326
327            // Most likely measurement outcome
328            let most_likely = state
329                .measurement_probabilities
330                .iter()
331                .enumerate()
332                .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
333                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
334
335            println!(
336                "     - Most likely outcome: state {} with prob {:.3}",
337                most_likely.0, most_likely.1
338            );
339
340            // State fidelities
341            if let Some(highest_fidelity) = state
342                .state_fidelities
343                .values()
344                .copied()
345                .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
346            {
347                println!("     - Highest basis state fidelity: {highest_fidelity:.3}");
348            }
349
350            // Interpretation
351            if state.entanglement_entropy > 0.5 {
352                println!("     → Highly entangled state");
353            } else if state.entanglement_entropy > 0.1 {
354                println!("     → Moderately entangled state");
355            } else {
356                println!("     → Separable or weakly entangled state");
357            }
358        }
359    }
360
361    Ok(())
362}
363
364/// Demonstrate saliency mapping
365fn saliency_mapping_demo() -> Result<()> {
366    let layers = vec![
367        QNNLayerType::EncodingLayer { num_features: 4 },
368        QNNLayerType::VariationalLayer { num_params: 8 },
369        QNNLayerType::MeasurementLayer {
370            measurement_basis: "computational".to_string(),
371        },
372    ];
373
374    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
375
376    // Test different perturbation methods
377    let perturbation_methods = vec![
378        (
379            "Gaussian Noise",
380            PerturbationMethod::Gaussian { sigma: 0.1 },
381        ),
382        (
383            "Quantum Phase",
384            PerturbationMethod::QuantumPhase { magnitude: 0.2 },
385        ),
386        ("Feature Masking", PerturbationMethod::FeatureMasking),
387        (
388            "Parameter Perturbation",
389            PerturbationMethod::ParameterPerturbation { strength: 0.1 },
390        ),
391    ];
392
393    let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
394
395    println!("   Computing saliency maps with different perturbation methods...");
396    println!(
397        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
398        test_input[0], test_input[1], test_input[2], test_input[3]
399    );
400
401    for (method_name, perturbation_method) in perturbation_methods {
402        let method = ExplanationMethod::SaliencyMapping {
403            perturbation_method,
404            aggregation: AggregationMethod::Mean,
405        };
406
407        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
408        let explanation = xai.explain(&test_input)?;
409
410        if let Some(ref saliency) = explanation.saliency_map {
411            println!("\n   {method_name} Saliency Map:");
412
413            // Analyze saliency for each output
414            for output_idx in 0..saliency.ncols() {
415                println!("     Output {output_idx}:");
416                for input_idx in 0..saliency.nrows() {
417                    let saliency_score = saliency[[input_idx, output_idx]];
418                    if saliency_score > 0.1 {
419                        println!(
420                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (important)"
421                        );
422                    } else if saliency_score > 0.05 {
423                        println!(
424                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (moderate)"
425                        );
426                    }
427                }
428            }
429
430            // Find most salient feature-output pair
431            let mut max_saliency = 0.0;
432            let mut max_pair = (0, 0);
433
434            for i in 0..saliency.nrows() {
435                for j in 0..saliency.ncols() {
436                    if saliency[[i, j]] > max_saliency {
437                        max_saliency = saliency[[i, j]];
438                        max_pair = (i, j);
439                    }
440                }
441            }
442
443            println!(
444                "     → Most salient: Feature {} → Output {} ({:.3})",
445                max_pair.0, max_pair.1, max_saliency
446            );
447        }
448    }
449
450    Ok(())
451}
452
453/// Demonstrate Quantum LIME
454fn quantum_lime_demo() -> Result<()> {
455    let layers = vec![
456        QNNLayerType::EncodingLayer { num_features: 4 },
457        QNNLayerType::VariationalLayer { num_params: 10 },
458        QNNLayerType::EntanglementLayer {
459            connectivity: "circular".to_string(),
460        },
461        QNNLayerType::MeasurementLayer {
462            measurement_basis: "computational".to_string(),
463        },
464    ];
465
466    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
467
468    // Test different local models
469    let local_models = vec![
470        ("Linear Regression", LocalModelType::LinearRegression),
471        ("Decision Tree", LocalModelType::DecisionTree),
472        ("Quantum Linear", LocalModelType::QuantumLinear),
473    ];
474
475    let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
476
477    println!("   Quantum LIME: Local Interpretable Model-agnostic Explanations");
478    println!(
479        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
480        test_input[0], test_input[1], test_input[2], test_input[3]
481    );
482
483    for (model_name, local_model) in local_models {
484        let method = ExplanationMethod::QuantumLIME {
485            num_perturbations: 100,
486            kernel_width: 0.5,
487            local_model,
488        };
489
490        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
491        let explanation = xai.explain(&test_input)?;
492
493        if let Some(ref attributions) = explanation.feature_attributions {
494            println!("\n   LIME with {model_name}:");
495
496            for (i, &attr) in attributions.iter().enumerate() {
497                let impact = if attr.abs() > 0.3 {
498                    "high"
499                } else if attr.abs() > 0.1 {
500                    "medium"
501                } else {
502                    "low"
503                };
504
505                println!("     Feature {i}: {attr:+.3} ({impact} impact)");
506            }
507
508            // Local model interpretation
509            match model_name {
510                "Linear Regression" => {
511                    println!("     → Linear relationship approximation in local region");
512                }
513                "Decision Tree" => {
514                    println!("     → Rule-based approximation with thresholds");
515                }
516                "Quantum Linear" => {
517                    println!("     → Quantum-aware linear approximation");
518                }
519                _ => {}
520            }
521
522            // Compute local fidelity (simplified)
523            let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
524            println!("     → Local explanation complexity: {local_complexity:.3}");
525        }
526    }
527
528    Ok(())
529}
530
531/// Demonstrate Quantum SHAP
532fn quantum_shap_demo() -> Result<()> {
533    let layers = vec![
534        QNNLayerType::EncodingLayer { num_features: 3 },
535        QNNLayerType::VariationalLayer { num_params: 6 },
536        QNNLayerType::MeasurementLayer {
537            measurement_basis: "Pauli-Z".to_string(),
538        },
539    ];
540
541    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
542
543    let method = ExplanationMethod::QuantumSHAP {
544        num_coalitions: 100,
545        background_samples: 20,
546    };
547
548    let mut xai = QuantumExplainableAI::new(model, vec![method]);
549
550    // Set background data for SHAP
551    let background_data = Array2::from_shape_fn((50, 3), |(i, j)| {
552        0.3f64.mul_add(((i + j) as f64 * 0.1).sin(), 0.5)
553    });
554    xai.set_background_data(background_data);
555
556    println!("   Quantum SHAP: SHapley Additive exPlanations");
557
558    // Test multiple inputs
559    let test_inputs = [
560        Array1::from_vec(vec![0.1, 0.5, 0.9]),
561        Array1::from_vec(vec![0.8, 0.3, 0.6]),
562        Array1::from_vec(vec![0.4, 0.7, 0.2]),
563    ];
564
565    for (i, input) in test_inputs.iter().enumerate() {
566        println!(
567            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
568            i + 1,
569            input[0],
570            input[1],
571            input[2]
572        );
573
574        let explanation = xai.explain(input)?;
575
576        if let Some(ref shap_values) = explanation.feature_attributions {
577            println!("     SHAP Values:");
578
579            let mut total_shap = 0.0;
580            for (j, &value) in shap_values.iter().enumerate() {
581                total_shap += value;
582                println!("     - Feature {j}: {value:+.4}");
583            }
584
585            println!("     - Sum of SHAP values: {total_shap:.4}");
586
587            // Feature ranking
588            let mut indexed_shap: Vec<(usize, f64)> = shap_values
589                .iter()
590                .enumerate()
591                .map(|(idx, &val)| (idx, val.abs()))
592                .collect();
593            indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
594
595            println!("     Feature importance ranking:");
596            for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
597                let original_value = shap_values[*feature_idx];
598                println!(
599                    "     {}. Feature {}: {:.4} (|{:.4}|)",
600                    rank + 1,
601                    feature_idx,
602                    original_value,
603                    abs_value
604                );
605            }
606
607            // SHAP properties
608            println!(
609                "     → SHAP values satisfy efficiency property (sum to prediction difference)"
610            );
611            println!("     → Each value represents feature's average marginal contribution");
612        }
613    }
614
615    Ok(())
616}
617
618/// Demonstrate Layer-wise Relevance Propagation
619fn quantum_lrp_demo() -> Result<()> {
620    let layers = vec![
621        QNNLayerType::EncodingLayer { num_features: 4 },
622        QNNLayerType::VariationalLayer { num_params: 8 },
623        QNNLayerType::VariationalLayer { num_params: 6 },
624        QNNLayerType::MeasurementLayer {
625            measurement_basis: "computational".to_string(),
626        },
627    ];
628
629    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
630
631    // Test different LRP rules
632    let lrp_rules = vec![
633        ("Epsilon Rule", LRPRule::Epsilon),
634        ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
635        (
636            "Alpha-Beta Rule",
637            LRPRule::AlphaBeta {
638                alpha: 2.0,
639                beta: 1.0,
640            },
641        ),
642        ("Quantum Rule", LRPRule::QuantumRule),
643    ];
644
645    let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
646
647    println!("   Layer-wise Relevance Propagation for Quantum Circuits");
648    println!(
649        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
650        test_input[0], test_input[1], test_input[2], test_input[3]
651    );
652
653    for (rule_name, lrp_rule) in lrp_rules {
654        let method = ExplanationMethod::QuantumLRP {
655            propagation_rule: lrp_rule,
656            epsilon: 1e-6,
657        };
658
659        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
660        let explanation = xai.explain(&test_input)?;
661
662        if let Some(ref relevance) = explanation.feature_attributions {
663            println!("\n   LRP with {rule_name}:");
664
665            let total_relevance = relevance.sum();
666
667            for (i, &rel) in relevance.iter().enumerate() {
668                let percentage = if total_relevance.abs() > 1e-10 {
669                    rel / total_relevance * 100.0
670                } else {
671                    0.0
672                };
673
674                println!("     Feature {i}: {rel:.4} ({percentage:.1}% of total relevance)");
675            }
676
677            println!("     Total relevance: {total_relevance:.4}");
678
679            // Rule-specific interpretation
680            match rule_name {
681                "Epsilon Rule" => {
682                    println!("     → Distributes relevance proportionally to activations");
683                }
684                "Gamma Rule" => {
685                    println!("     → Emphasizes positive contributions");
686                }
687                "Alpha-Beta Rule" => {
688                    println!("     → Separates positive and negative contributions");
689                }
690                "Quantum Rule" => {
691                    println!("     → Accounts for quantum superposition and entanglement");
692                }
693                _ => {}
694            }
695        }
696    }
697
698    Ok(())
699}
700
701/// Comprehensive explanation demonstration
702fn comprehensive_explanation_demo() -> Result<()> {
703    let layers = vec![
704        QNNLayerType::EncodingLayer { num_features: 4 },
705        QNNLayerType::VariationalLayer { num_params: 12 },
706        QNNLayerType::EntanglementLayer {
707            connectivity: "full".to_string(),
708        },
709        QNNLayerType::VariationalLayer { num_params: 8 },
710        QNNLayerType::MeasurementLayer {
711            measurement_basis: "computational".to_string(),
712        },
713    ];
714
715    let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
716
717    // Use comprehensive explanation methods
718    let methods = vec![
719        ExplanationMethod::QuantumFeatureAttribution {
720            method: AttributionMethod::IntegratedGradients,
721            num_samples: 30,
722            baseline: Some(Array1::zeros(4)),
723        },
724        ExplanationMethod::CircuitVisualization {
725            include_measurements: true,
726            parameter_sensitivity: true,
727        },
728        ExplanationMethod::StateAnalysis {
729            entanglement_measures: true,
730            coherence_analysis: true,
731            superposition_analysis: true,
732        },
733        ExplanationMethod::ConceptActivation {
734            concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
735            activation_threshold: 0.3,
736        },
737    ];
738
739    let mut xai = QuantumExplainableAI::new(model, methods);
740
741    // Add concept vectors
742    xai.add_concept(
743        "pattern_A".to_string(),
744        Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
745    );
746    xai.add_concept(
747        "pattern_B".to_string(),
748        Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
749    );
750
751    // Set background data
752    let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
753        0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
754    });
755    xai.set_background_data(background_data);
756
757    println!("   Comprehensive Quantum Model Explanation");
758
759    // Test input representing a specific pattern
760    let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
761
762    println!(
763        "\n   Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
764        test_input[0], test_input[1], test_input[2], test_input[3]
765    );
766
767    let explanation = xai.explain(&test_input)?;
768
769    // Display comprehensive results
770    println!("\n   === COMPREHENSIVE EXPLANATION RESULTS ===");
771
772    // Feature attributions
773    if let Some(ref attributions) = explanation.feature_attributions {
774        println!("\n   Feature Attributions:");
775        for (i, &attr) in attributions.iter().enumerate() {
776            println!("   - Feature {i}: {attr:+.3}");
777        }
778    }
779
780    // Circuit analysis summary
781    if let Some(ref circuit) = explanation.circuit_explanation {
782        println!("\n   Circuit Analysis Summary:");
783        let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
784        println!("   - Average parameter importance: {avg_importance:.3}");
785        println!(
786            "   - Number of analyzed layers: {}",
787            circuit.layer_analysis.len()
788        );
789        println!("   - Critical path length: {}", circuit.critical_path.len());
790    }
791
792    // Quantum state properties
793    if let Some(ref state) = explanation.state_properties {
794        println!("\n   Quantum State Properties:");
795        println!(
796            "   - Entanglement entropy: {:.3}",
797            state.entanglement_entropy
798        );
799        println!(
800            "   - Coherence measures: {} types",
801            state.coherence_measures.len()
802        );
803
804        let max_measurement_prob = state
805            .measurement_probabilities
806            .iter()
807            .copied()
808            .fold(f64::NEG_INFINITY, f64::max);
809        println!("   - Max measurement probability: {max_measurement_prob:.3}");
810    }
811
812    // Concept activations
813    if let Some(ref concepts) = explanation.concept_activations {
814        println!("\n   Concept Activations:");
815        for (concept, &activation) in concepts {
816            let similarity = if activation > 0.7 {
817                "high"
818            } else if activation > 0.3 {
819                "medium"
820            } else {
821                "low"
822            };
823            println!("   - {concept}: {activation:.3} ({similarity} similarity)");
824        }
825    }
826
827    // Confidence scores
828    println!("\n   Explanation Confidence Scores:");
829    for (component, &confidence) in &explanation.confidence_scores {
830        println!("   - {component}: {confidence:.3}");
831    }
832
833    // Textual explanation
834    println!("\n   Generated Explanation:");
835    println!("{}", explanation.textual_explanation);
836
837    // Summary insights
838    println!("\n   === KEY INSIGHTS ===");
839
840    if let Some(ref attributions) = explanation.feature_attributions {
841        let max_attr_idx = attributions
842            .iter()
843            .enumerate()
844            .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
845            .map_or(0, |(i, _)| i);
846
847        println!(
848            "   • Most influential feature: Feature {} ({:.3})",
849            max_attr_idx, attributions[max_attr_idx]
850        );
851    }
852
853    if let Some(ref state) = explanation.state_properties {
854        if state.entanglement_entropy > 0.5 {
855            println!("   • Model creates significant quantum entanglement");
856        }
857
858        let coherence_level = state
859            .coherence_measures
860            .values()
861            .copied()
862            .fold(0.0, f64::max);
863        if coherence_level > 0.5 {
864            println!("   • High quantum coherence detected");
865        }
866    }
867
868    if let Some(ref concepts) = explanation.concept_activations {
869        if let Some((best_concept, &max_activation)) =
870            concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
871        {
872            if max_activation > 0.5 {
873                println!("   • Input strongly matches concept: {best_concept}");
874            }
875        }
876    }
877
878    println!("   • Explanation provides multi-faceted interpretation of quantum model behavior");
879
880    Ok(())
881}
Source

pub fn add_concept(&mut self, name: String, vector: Array1<f64>)

Add concept vector

Examples found in repository?
examples/quantum_explainable_ai.rs (lines 742-745)
702fn comprehensive_explanation_demo() -> Result<()> {
703    let layers = vec![
704        QNNLayerType::EncodingLayer { num_features: 4 },
705        QNNLayerType::VariationalLayer { num_params: 12 },
706        QNNLayerType::EntanglementLayer {
707            connectivity: "full".to_string(),
708        },
709        QNNLayerType::VariationalLayer { num_params: 8 },
710        QNNLayerType::MeasurementLayer {
711            measurement_basis: "computational".to_string(),
712        },
713    ];
714
715    let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
716
717    // Use comprehensive explanation methods
718    let methods = vec![
719        ExplanationMethod::QuantumFeatureAttribution {
720            method: AttributionMethod::IntegratedGradients,
721            num_samples: 30,
722            baseline: Some(Array1::zeros(4)),
723        },
724        ExplanationMethod::CircuitVisualization {
725            include_measurements: true,
726            parameter_sensitivity: true,
727        },
728        ExplanationMethod::StateAnalysis {
729            entanglement_measures: true,
730            coherence_analysis: true,
731            superposition_analysis: true,
732        },
733        ExplanationMethod::ConceptActivation {
734            concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
735            activation_threshold: 0.3,
736        },
737    ];
738
739    let mut xai = QuantumExplainableAI::new(model, methods);
740
741    // Add concept vectors
742    xai.add_concept(
743        "pattern_A".to_string(),
744        Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
745    );
746    xai.add_concept(
747        "pattern_B".to_string(),
748        Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
749    );
750
751    // Set background data
752    let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
753        0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
754    });
755    xai.set_background_data(background_data);
756
757    println!("   Comprehensive Quantum Model Explanation");
758
759    // Test input representing a specific pattern
760    let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
761
762    println!(
763        "\n   Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
764        test_input[0], test_input[1], test_input[2], test_input[3]
765    );
766
767    let explanation = xai.explain(&test_input)?;
768
769    // Display comprehensive results
770    println!("\n   === COMPREHENSIVE EXPLANATION RESULTS ===");
771
772    // Feature attributions
773    if let Some(ref attributions) = explanation.feature_attributions {
774        println!("\n   Feature Attributions:");
775        for (i, &attr) in attributions.iter().enumerate() {
776            println!("   - Feature {i}: {attr:+.3}");
777        }
778    }
779
780    // Circuit analysis summary
781    if let Some(ref circuit) = explanation.circuit_explanation {
782        println!("\n   Circuit Analysis Summary:");
783        let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
784        println!("   - Average parameter importance: {avg_importance:.3}");
785        println!(
786            "   - Number of analyzed layers: {}",
787            circuit.layer_analysis.len()
788        );
789        println!("   - Critical path length: {}", circuit.critical_path.len());
790    }
791
792    // Quantum state properties
793    if let Some(ref state) = explanation.state_properties {
794        println!("\n   Quantum State Properties:");
795        println!(
796            "   - Entanglement entropy: {:.3}",
797            state.entanglement_entropy
798        );
799        println!(
800            "   - Coherence measures: {} types",
801            state.coherence_measures.len()
802        );
803
804        let max_measurement_prob = state
805            .measurement_probabilities
806            .iter()
807            .copied()
808            .fold(f64::NEG_INFINITY, f64::max);
809        println!("   - Max measurement probability: {max_measurement_prob:.3}");
810    }
811
812    // Concept activations
813    if let Some(ref concepts) = explanation.concept_activations {
814        println!("\n   Concept Activations:");
815        for (concept, &activation) in concepts {
816            let similarity = if activation > 0.7 {
817                "high"
818            } else if activation > 0.3 {
819                "medium"
820            } else {
821                "low"
822            };
823            println!("   - {concept}: {activation:.3} ({similarity} similarity)");
824        }
825    }
826
827    // Confidence scores
828    println!("\n   Explanation Confidence Scores:");
829    for (component, &confidence) in &explanation.confidence_scores {
830        println!("   - {component}: {confidence:.3}");
831    }
832
833    // Textual explanation
834    println!("\n   Generated Explanation:");
835    println!("{}", explanation.textual_explanation);
836
837    // Summary insights
838    println!("\n   === KEY INSIGHTS ===");
839
840    if let Some(ref attributions) = explanation.feature_attributions {
841        let max_attr_idx = attributions
842            .iter()
843            .enumerate()
844            .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
845            .map_or(0, |(i, _)| i);
846
847        println!(
848            "   • Most influential feature: Feature {} ({:.3})",
849            max_attr_idx, attributions[max_attr_idx]
850        );
851    }
852
853    if let Some(ref state) = explanation.state_properties {
854        if state.entanglement_entropy > 0.5 {
855            println!("   • Model creates significant quantum entanglement");
856        }
857
858        let coherence_level = state
859            .coherence_measures
860            .values()
861            .copied()
862            .fold(0.0, f64::max);
863        if coherence_level > 0.5 {
864            println!("   • High quantum coherence detected");
865        }
866    }
867
868    if let Some(ref concepts) = explanation.concept_activations {
869        if let Some((best_concept, &max_activation)) =
870            concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
871        {
872            if max_activation > 0.5 {
873                println!("   • Input strongly matches concept: {best_concept}");
874            }
875        }
876    }
877
878    println!("   • Explanation provides multi-faceted interpretation of quantum model behavior");
879
880    Ok(())
881}
Source

pub fn explain(&mut self, input: &Array1<f64>) -> Result<ExplanationResult>

Generate comprehensive explanation for an input

Examples found in repository?
examples/quantum_explainable_ai.rs (line 127)
53fn feature_attribution_demo() -> Result<()> {
54    // Create quantum model
55    let layers = vec![
56        QNNLayerType::EncodingLayer { num_features: 4 },
57        QNNLayerType::VariationalLayer { num_params: 12 },
58        QNNLayerType::EntanglementLayer {
59            connectivity: "circular".to_string(),
60        },
61        QNNLayerType::VariationalLayer { num_params: 8 },
62        QNNLayerType::MeasurementLayer {
63            measurement_basis: "computational".to_string(),
64        },
65    ];
66
67    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
68
69    println!(
70        "   Created quantum model with {} parameters",
71        model.parameters.len()
72    );
73
74    // Test different attribution methods
75    let attribution_methods = vec![
76        (
77            "Integrated Gradients",
78            ExplanationMethod::QuantumFeatureAttribution {
79                method: AttributionMethod::IntegratedGradients,
80                num_samples: 50,
81                baseline: Some(Array1::zeros(4)),
82            },
83        ),
84        (
85            "Gradient × Input",
86            ExplanationMethod::QuantumFeatureAttribution {
87                method: AttributionMethod::GradientInput,
88                num_samples: 1,
89                baseline: None,
90            },
91        ),
92        (
93            "Gradient SHAP",
94            ExplanationMethod::QuantumFeatureAttribution {
95                method: AttributionMethod::GradientSHAP,
96                num_samples: 30,
97                baseline: None,
98            },
99        ),
100        (
101            "Quantum Attribution",
102            ExplanationMethod::QuantumFeatureAttribution {
103                method: AttributionMethod::QuantumAttribution,
104                num_samples: 25,
105                baseline: None,
106            },
107        ),
108    ];
109
110    // Test input
111    let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
112
113    println!(
114        "\n   Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
115        test_input[0], test_input[1], test_input[2], test_input[3]
116    );
117
118    for (method_name, method) in attribution_methods {
119        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
120
121        // Set background data for gradient SHAP
122        let background_data = Array2::from_shape_fn((20, 4), |(_, j)| {
123            0.3f64.mul_add((j as f64 * 0.2).sin(), 0.5)
124        });
125        xai.set_background_data(background_data);
126
127        let explanation = xai.explain(&test_input)?;
128
129        if let Some(ref attributions) = explanation.feature_attributions {
130            println!("\n   {method_name} Attribution:");
131            for (i, &attr) in attributions.iter().enumerate() {
132                println!(
133                    "     Feature {}: {:+.4} {}",
134                    i,
135                    attr,
136                    if attr.abs() > 0.1 {
137                        if attr > 0.0 {
138                            "(strong positive)"
139                        } else {
140                            "(strong negative)"
141                        }
142                    } else {
143                        "(weak influence)"
144                    }
145                );
146            }
147
148            // Find most important feature
149            let max_idx = attributions
150                .iter()
151                .enumerate()
152                .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
153                .map_or(0, |(i, _)| i);
154
155            println!(
156                "     → Most important feature: Feature {} ({:.4})",
157                max_idx, attributions[max_idx]
158            );
159        }
160    }
161
162    Ok(())
163}
164
165/// Demonstrate circuit analysis and visualization
166fn circuit_analysis_demo() -> Result<()> {
167    let layers = vec![
168        QNNLayerType::EncodingLayer { num_features: 4 },
169        QNNLayerType::VariationalLayer { num_params: 6 },
170        QNNLayerType::EntanglementLayer {
171            connectivity: "full".to_string(),
172        },
173        QNNLayerType::VariationalLayer { num_params: 6 },
174        QNNLayerType::MeasurementLayer {
175            measurement_basis: "Pauli-Z".to_string(),
176        },
177    ];
178
179    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
180
181    let method = ExplanationMethod::CircuitVisualization {
182        include_measurements: true,
183        parameter_sensitivity: true,
184    };
185
186    let mut xai = QuantumExplainableAI::new(model, vec![method]);
187
188    println!("   Analyzing quantum circuit structure and parameter importance...");
189
190    let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
191    let explanation = xai.explain(&test_input)?;
192
193    if let Some(ref circuit) = explanation.circuit_explanation {
194        println!("\n   Circuit Analysis Results:");
195
196        // Parameter importance
197        println!("   Parameter Importance Scores:");
198        for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
199            if importance > 0.5 {
200                println!("     Parameter {i}: {importance:.3} (high importance)");
201            } else if importance > 0.2 {
202                println!("     Parameter {i}: {importance:.3} (medium importance)");
203            }
204        }
205
206        // Layer analysis
207        println!("\n   Layer-wise Analysis:");
208        for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
209            println!(
210                "     Layer {}: {}",
211                i,
212                format_layer_type(&layer_analysis.layer_type)
213            );
214            println!(
215                "       Information gain: {:.3}",
216                layer_analysis.information_gain
217            );
218            println!(
219                "       Entanglement generated: {:.3}",
220                layer_analysis.entanglement_generated
221            );
222
223            if layer_analysis.entanglement_generated > 0.5 {
224                println!("       → Significant entanglement layer");
225            }
226        }
227
228        // Gate contributions
229        println!("\n   Gate Contribution Analysis:");
230        for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
231            println!(
232                "     Gate {}: {} on qubits {:?}",
233                gate.gate_index, gate.gate_type, gate.qubits
234            );
235            println!("       Contribution: {:.3}", gate.contribution);
236
237            if let Some(ref params) = gate.parameters {
238                println!("       Parameters: {:.3}", params[0]);
239            }
240        }
241
242        // Critical path
243        println!("\n   Critical Path (most important parameters):");
244        print!("     ");
245        for (i, &param_idx) in circuit.critical_path.iter().enumerate() {
246            if i > 0 {
247                print!(" → ");
248            }
249            print!("P{param_idx}");
250        }
251        println!();
252
253        println!("   → This path represents the most influential quantum operations");
254    }
255
256    Ok(())
257}
258
259/// Demonstrate quantum state analysis
260fn quantum_state_demo() -> Result<()> {
261    let layers = vec![
262        QNNLayerType::EncodingLayer { num_features: 3 },
263        QNNLayerType::VariationalLayer { num_params: 9 },
264        QNNLayerType::EntanglementLayer {
265            connectivity: "circular".to_string(),
266        },
267        QNNLayerType::MeasurementLayer {
268            measurement_basis: "computational".to_string(),
269        },
270    ];
271
272    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
273
274    let method = ExplanationMethod::StateAnalysis {
275        entanglement_measures: true,
276        coherence_analysis: true,
277        superposition_analysis: true,
278    };
279
280    let mut xai = QuantumExplainableAI::new(model, vec![method]);
281
282    println!("   Analyzing quantum state properties...");
283
284    // Test different inputs to see state evolution
285    let test_inputs = [
286        Array1::from_vec(vec![0.0, 0.0, 0.0]),
287        Array1::from_vec(vec![1.0, 0.0, 0.0]),
288        Array1::from_vec(vec![0.5, 0.5, 0.5]),
289        Array1::from_vec(vec![1.0, 1.0, 1.0]),
290    ];
291
292    for (i, input) in test_inputs.iter().enumerate() {
293        println!(
294            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
295            i + 1,
296            input[0],
297            input[1],
298            input[2]
299        );
300
301        let explanation = xai.explain(input)?;
302
303        if let Some(ref state) = explanation.state_properties {
304            println!("     Quantum State Properties:");
305            println!(
306                "     - Entanglement entropy: {:.3}",
307                state.entanglement_entropy
308            );
309
310            // Coherence measures
311            for (measure_name, &value) in &state.coherence_measures {
312                println!("     - {measure_name}: {value:.3}");
313            }
314
315            // Superposition analysis
316            let max_component = state
317                .superposition_components
318                .iter()
319                .copied()
320                .fold(f64::NEG_INFINITY, f64::max);
321            println!("     - Max superposition component: {max_component:.3}");
322
323            // Measurement probabilities
324            let total_prob = state.measurement_probabilities.sum();
325            println!("     - Total measurement probability: {total_prob:.3}");
326
327            // Most likely measurement outcome
328            let most_likely = state
329                .measurement_probabilities
330                .iter()
331                .enumerate()
332                .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
333                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
334
335            println!(
336                "     - Most likely outcome: state {} with prob {:.3}",
337                most_likely.0, most_likely.1
338            );
339
340            // State fidelities
341            if let Some(highest_fidelity) = state
342                .state_fidelities
343                .values()
344                .copied()
345                .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
346            {
347                println!("     - Highest basis state fidelity: {highest_fidelity:.3}");
348            }
349
350            // Interpretation
351            if state.entanglement_entropy > 0.5 {
352                println!("     → Highly entangled state");
353            } else if state.entanglement_entropy > 0.1 {
354                println!("     → Moderately entangled state");
355            } else {
356                println!("     → Separable or weakly entangled state");
357            }
358        }
359    }
360
361    Ok(())
362}
363
364/// Demonstrate saliency mapping
365fn saliency_mapping_demo() -> Result<()> {
366    let layers = vec![
367        QNNLayerType::EncodingLayer { num_features: 4 },
368        QNNLayerType::VariationalLayer { num_params: 8 },
369        QNNLayerType::MeasurementLayer {
370            measurement_basis: "computational".to_string(),
371        },
372    ];
373
374    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
375
376    // Test different perturbation methods
377    let perturbation_methods = vec![
378        (
379            "Gaussian Noise",
380            PerturbationMethod::Gaussian { sigma: 0.1 },
381        ),
382        (
383            "Quantum Phase",
384            PerturbationMethod::QuantumPhase { magnitude: 0.2 },
385        ),
386        ("Feature Masking", PerturbationMethod::FeatureMasking),
387        (
388            "Parameter Perturbation",
389            PerturbationMethod::ParameterPerturbation { strength: 0.1 },
390        ),
391    ];
392
393    let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
394
395    println!("   Computing saliency maps with different perturbation methods...");
396    println!(
397        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
398        test_input[0], test_input[1], test_input[2], test_input[3]
399    );
400
401    for (method_name, perturbation_method) in perturbation_methods {
402        let method = ExplanationMethod::SaliencyMapping {
403            perturbation_method,
404            aggregation: AggregationMethod::Mean,
405        };
406
407        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
408        let explanation = xai.explain(&test_input)?;
409
410        if let Some(ref saliency) = explanation.saliency_map {
411            println!("\n   {method_name} Saliency Map:");
412
413            // Analyze saliency for each output
414            for output_idx in 0..saliency.ncols() {
415                println!("     Output {output_idx}:");
416                for input_idx in 0..saliency.nrows() {
417                    let saliency_score = saliency[[input_idx, output_idx]];
418                    if saliency_score > 0.1 {
419                        println!(
420                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (important)"
421                        );
422                    } else if saliency_score > 0.05 {
423                        println!(
424                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (moderate)"
425                        );
426                    }
427                }
428            }
429
430            // Find most salient feature-output pair
431            let mut max_saliency = 0.0;
432            let mut max_pair = (0, 0);
433
434            for i in 0..saliency.nrows() {
435                for j in 0..saliency.ncols() {
436                    if saliency[[i, j]] > max_saliency {
437                        max_saliency = saliency[[i, j]];
438                        max_pair = (i, j);
439                    }
440                }
441            }
442
443            println!(
444                "     → Most salient: Feature {} → Output {} ({:.3})",
445                max_pair.0, max_pair.1, max_saliency
446            );
447        }
448    }
449
450    Ok(())
451}
452
453/// Demonstrate Quantum LIME
454fn quantum_lime_demo() -> Result<()> {
455    let layers = vec![
456        QNNLayerType::EncodingLayer { num_features: 4 },
457        QNNLayerType::VariationalLayer { num_params: 10 },
458        QNNLayerType::EntanglementLayer {
459            connectivity: "circular".to_string(),
460        },
461        QNNLayerType::MeasurementLayer {
462            measurement_basis: "computational".to_string(),
463        },
464    ];
465
466    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
467
468    // Test different local models
469    let local_models = vec![
470        ("Linear Regression", LocalModelType::LinearRegression),
471        ("Decision Tree", LocalModelType::DecisionTree),
472        ("Quantum Linear", LocalModelType::QuantumLinear),
473    ];
474
475    let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
476
477    println!("   Quantum LIME: Local Interpretable Model-agnostic Explanations");
478    println!(
479        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
480        test_input[0], test_input[1], test_input[2], test_input[3]
481    );
482
483    for (model_name, local_model) in local_models {
484        let method = ExplanationMethod::QuantumLIME {
485            num_perturbations: 100,
486            kernel_width: 0.5,
487            local_model,
488        };
489
490        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
491        let explanation = xai.explain(&test_input)?;
492
493        if let Some(ref attributions) = explanation.feature_attributions {
494            println!("\n   LIME with {model_name}:");
495
496            for (i, &attr) in attributions.iter().enumerate() {
497                let impact = if attr.abs() > 0.3 {
498                    "high"
499                } else if attr.abs() > 0.1 {
500                    "medium"
501                } else {
502                    "low"
503                };
504
505                println!("     Feature {i}: {attr:+.3} ({impact} impact)");
506            }
507
508            // Local model interpretation
509            match model_name {
510                "Linear Regression" => {
511                    println!("     → Linear relationship approximation in local region");
512                }
513                "Decision Tree" => {
514                    println!("     → Rule-based approximation with thresholds");
515                }
516                "Quantum Linear" => {
517                    println!("     → Quantum-aware linear approximation");
518                }
519                _ => {}
520            }
521
522            // Compute local fidelity (simplified)
523            let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
524            println!("     → Local explanation complexity: {local_complexity:.3}");
525        }
526    }
527
528    Ok(())
529}
530
531/// Demonstrate Quantum SHAP
532fn quantum_shap_demo() -> Result<()> {
533    let layers = vec![
534        QNNLayerType::EncodingLayer { num_features: 3 },
535        QNNLayerType::VariationalLayer { num_params: 6 },
536        QNNLayerType::MeasurementLayer {
537            measurement_basis: "Pauli-Z".to_string(),
538        },
539    ];
540
541    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
542
543    let method = ExplanationMethod::QuantumSHAP {
544        num_coalitions: 100,
545        background_samples: 20,
546    };
547
548    let mut xai = QuantumExplainableAI::new(model, vec![method]);
549
550    // Set background data for SHAP
551    let background_data = Array2::from_shape_fn((50, 3), |(i, j)| {
552        0.3f64.mul_add(((i + j) as f64 * 0.1).sin(), 0.5)
553    });
554    xai.set_background_data(background_data);
555
556    println!("   Quantum SHAP: SHapley Additive exPlanations");
557
558    // Test multiple inputs
559    let test_inputs = [
560        Array1::from_vec(vec![0.1, 0.5, 0.9]),
561        Array1::from_vec(vec![0.8, 0.3, 0.6]),
562        Array1::from_vec(vec![0.4, 0.7, 0.2]),
563    ];
564
565    for (i, input) in test_inputs.iter().enumerate() {
566        println!(
567            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
568            i + 1,
569            input[0],
570            input[1],
571            input[2]
572        );
573
574        let explanation = xai.explain(input)?;
575
576        if let Some(ref shap_values) = explanation.feature_attributions {
577            println!("     SHAP Values:");
578
579            let mut total_shap = 0.0;
580            for (j, &value) in shap_values.iter().enumerate() {
581                total_shap += value;
582                println!("     - Feature {j}: {value:+.4}");
583            }
584
585            println!("     - Sum of SHAP values: {total_shap:.4}");
586
587            // Feature ranking
588            let mut indexed_shap: Vec<(usize, f64)> = shap_values
589                .iter()
590                .enumerate()
591                .map(|(idx, &val)| (idx, val.abs()))
592                .collect();
593            indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
594
595            println!("     Feature importance ranking:");
596            for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
597                let original_value = shap_values[*feature_idx];
598                println!(
599                    "     {}. Feature {}: {:.4} (|{:.4}|)",
600                    rank + 1,
601                    feature_idx,
602                    original_value,
603                    abs_value
604                );
605            }
606
607            // SHAP properties
608            println!(
609                "     → SHAP values satisfy efficiency property (sum to prediction difference)"
610            );
611            println!("     → Each value represents feature's average marginal contribution");
612        }
613    }
614
615    Ok(())
616}
617
618/// Demonstrate Layer-wise Relevance Propagation
619fn quantum_lrp_demo() -> Result<()> {
620    let layers = vec![
621        QNNLayerType::EncodingLayer { num_features: 4 },
622        QNNLayerType::VariationalLayer { num_params: 8 },
623        QNNLayerType::VariationalLayer { num_params: 6 },
624        QNNLayerType::MeasurementLayer {
625            measurement_basis: "computational".to_string(),
626        },
627    ];
628
629    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
630
631    // Test different LRP rules
632    let lrp_rules = vec![
633        ("Epsilon Rule", LRPRule::Epsilon),
634        ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
635        (
636            "Alpha-Beta Rule",
637            LRPRule::AlphaBeta {
638                alpha: 2.0,
639                beta: 1.0,
640            },
641        ),
642        ("Quantum Rule", LRPRule::QuantumRule),
643    ];
644
645    let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
646
647    println!("   Layer-wise Relevance Propagation for Quantum Circuits");
648    println!(
649        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
650        test_input[0], test_input[1], test_input[2], test_input[3]
651    );
652
653    for (rule_name, lrp_rule) in lrp_rules {
654        let method = ExplanationMethod::QuantumLRP {
655            propagation_rule: lrp_rule,
656            epsilon: 1e-6,
657        };
658
659        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
660        let explanation = xai.explain(&test_input)?;
661
662        if let Some(ref relevance) = explanation.feature_attributions {
663            println!("\n   LRP with {rule_name}:");
664
665            let total_relevance = relevance.sum();
666
667            for (i, &rel) in relevance.iter().enumerate() {
668                let percentage = if total_relevance.abs() > 1e-10 {
669                    rel / total_relevance * 100.0
670                } else {
671                    0.0
672                };
673
674                println!("     Feature {i}: {rel:.4} ({percentage:.1}% of total relevance)");
675            }
676
677            println!("     Total relevance: {total_relevance:.4}");
678
679            // Rule-specific interpretation
680            match rule_name {
681                "Epsilon Rule" => {
682                    println!("     → Distributes relevance proportionally to activations");
683                }
684                "Gamma Rule" => {
685                    println!("     → Emphasizes positive contributions");
686                }
687                "Alpha-Beta Rule" => {
688                    println!("     → Separates positive and negative contributions");
689                }
690                "Quantum Rule" => {
691                    println!("     → Accounts for quantum superposition and entanglement");
692                }
693                _ => {}
694            }
695        }
696    }
697
698    Ok(())
699}
700
701/// Comprehensive explanation demonstration
702fn comprehensive_explanation_demo() -> Result<()> {
703    let layers = vec![
704        QNNLayerType::EncodingLayer { num_features: 4 },
705        QNNLayerType::VariationalLayer { num_params: 12 },
706        QNNLayerType::EntanglementLayer {
707            connectivity: "full".to_string(),
708        },
709        QNNLayerType::VariationalLayer { num_params: 8 },
710        QNNLayerType::MeasurementLayer {
711            measurement_basis: "computational".to_string(),
712        },
713    ];
714
715    let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
716
717    // Use comprehensive explanation methods
718    let methods = vec![
719        ExplanationMethod::QuantumFeatureAttribution {
720            method: AttributionMethod::IntegratedGradients,
721            num_samples: 30,
722            baseline: Some(Array1::zeros(4)),
723        },
724        ExplanationMethod::CircuitVisualization {
725            include_measurements: true,
726            parameter_sensitivity: true,
727        },
728        ExplanationMethod::StateAnalysis {
729            entanglement_measures: true,
730            coherence_analysis: true,
731            superposition_analysis: true,
732        },
733        ExplanationMethod::ConceptActivation {
734            concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
735            activation_threshold: 0.3,
736        },
737    ];
738
739    let mut xai = QuantumExplainableAI::new(model, methods);
740
741    // Add concept vectors
742    xai.add_concept(
743        "pattern_A".to_string(),
744        Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
745    );
746    xai.add_concept(
747        "pattern_B".to_string(),
748        Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
749    );
750
751    // Set background data
752    let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
753        0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
754    });
755    xai.set_background_data(background_data);
756
757    println!("   Comprehensive Quantum Model Explanation");
758
759    // Test input representing a specific pattern
760    let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
761
762    println!(
763        "\n   Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
764        test_input[0], test_input[1], test_input[2], test_input[3]
765    );
766
767    let explanation = xai.explain(&test_input)?;
768
769    // Display comprehensive results
770    println!("\n   === COMPREHENSIVE EXPLANATION RESULTS ===");
771
772    // Feature attributions
773    if let Some(ref attributions) = explanation.feature_attributions {
774        println!("\n   Feature Attributions:");
775        for (i, &attr) in attributions.iter().enumerate() {
776            println!("   - Feature {i}: {attr:+.3}");
777        }
778    }
779
780    // Circuit analysis summary
781    if let Some(ref circuit) = explanation.circuit_explanation {
782        println!("\n   Circuit Analysis Summary:");
783        let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
784        println!("   - Average parameter importance: {avg_importance:.3}");
785        println!(
786            "   - Number of analyzed layers: {}",
787            circuit.layer_analysis.len()
788        );
789        println!("   - Critical path length: {}", circuit.critical_path.len());
790    }
791
792    // Quantum state properties
793    if let Some(ref state) = explanation.state_properties {
794        println!("\n   Quantum State Properties:");
795        println!(
796            "   - Entanglement entropy: {:.3}",
797            state.entanglement_entropy
798        );
799        println!(
800            "   - Coherence measures: {} types",
801            state.coherence_measures.len()
802        );
803
804        let max_measurement_prob = state
805            .measurement_probabilities
806            .iter()
807            .copied()
808            .fold(f64::NEG_INFINITY, f64::max);
809        println!("   - Max measurement probability: {max_measurement_prob:.3}");
810    }
811
812    // Concept activations
813    if let Some(ref concepts) = explanation.concept_activations {
814        println!("\n   Concept Activations:");
815        for (concept, &activation) in concepts {
816            let similarity = if activation > 0.7 {
817                "high"
818            } else if activation > 0.3 {
819                "medium"
820            } else {
821                "low"
822            };
823            println!("   - {concept}: {activation:.3} ({similarity} similarity)");
824        }
825    }
826
827    // Confidence scores
828    println!("\n   Explanation Confidence Scores:");
829    for (component, &confidence) in &explanation.confidence_scores {
830        println!("   - {component}: {confidence:.3}");
831    }
832
833    // Textual explanation
834    println!("\n   Generated Explanation:");
835    println!("{}", explanation.textual_explanation);
836
837    // Summary insights
838    println!("\n   === KEY INSIGHTS ===");
839
840    if let Some(ref attributions) = explanation.feature_attributions {
841        let max_attr_idx = attributions
842            .iter()
843            .enumerate()
844            .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
845            .map_or(0, |(i, _)| i);
846
847        println!(
848            "   • Most influential feature: Feature {} ({:.3})",
849            max_attr_idx, attributions[max_attr_idx]
850        );
851    }
852
853    if let Some(ref state) = explanation.state_properties {
854        if state.entanglement_entropy > 0.5 {
855            println!("   • Model creates significant quantum entanglement");
856        }
857
858        let coherence_level = state
859            .coherence_measures
860            .values()
861            .copied()
862            .fold(0.0, f64::max);
863        if coherence_level > 0.5 {
864            println!("   • High quantum coherence detected");
865        }
866    }
867
868    if let Some(ref concepts) = explanation.concept_activations {
869        if let Some((best_concept, &max_activation)) =
870            concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
871        {
872            if max_activation > 0.5 {
873                println!("   • Input strongly matches concept: {best_concept}");
874            }
875        }
876    }
877
878    println!("   • Explanation provides multi-faceted interpretation of quantum model behavior");
879
880    Ok(())
881}

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V