quantum_explainable_ai/
quantum_explainable_ai.rs

1#![allow(
2    clippy::pedantic,
3    clippy::unnecessary_wraps,
4    clippy::needless_range_loop,
5    clippy::useless_vec,
6    clippy::needless_collect,
7    clippy::too_many_arguments
8)]
9//! Quantum Explainable AI Example
10//!
11//! This example demonstrates various explainability and interpretability methods
12//! for quantum neural networks, including feature attribution, circuit analysis,
13//! quantum state analysis, and concept activation vectors.
14
15use quantrs2_ml::prelude::*;
16use quantrs2_ml::qnn::QNNLayerType;
17use scirs2_core::ndarray::{Array1, Array2};
18use std::collections::HashMap;
19
20fn main() -> Result<()> {
21    println!("=== Quantum Explainable AI Demo ===\n");
22
23    // Step 1: Feature attribution methods
24    println!("1. Quantum Feature Attribution...");
25    feature_attribution_demo()?;
26
27    // Step 2: Circuit visualization and analysis
28    println!("\n2. Circuit Visualization and Analysis...");
29    circuit_analysis_demo()?;
30
31    // Step 3: Quantum state analysis
32    println!("\n3. Quantum State Analysis...");
33    quantum_state_demo()?;
34
35    // Step 4: Saliency mapping
36    println!("\n4. Quantum Saliency Mapping...");
37    saliency_mapping_demo()?;
38
39    // Step 5: Quantum LIME explanations
40    println!("\n5. Quantum LIME (Local Interpretable Model-agnostic Explanations)...");
41    quantum_lime_demo()?;
42
43    // Step 6: Quantum SHAP values
44    println!("\n6. Quantum SHAP (SHapley Additive exPlanations)...");
45    quantum_shap_demo()?;
46
47    // Step 7: Layer-wise relevance propagation
48    println!("\n7. Layer-wise Relevance Propagation...");
49    quantum_lrp_demo()?;
50
51    // Step 8: Comprehensive explanation
52    println!("\n8. Comprehensive Explanation Analysis...");
53    comprehensive_explanation_demo()?;
54
55    println!("\n=== Quantum Explainable AI Demo Complete ===");
56
57    Ok(())
58}
59
60/// Demonstrate quantum feature attribution methods
61fn feature_attribution_demo() -> Result<()> {
62    // Create quantum model
63    let layers = vec![
64        QNNLayerType::EncodingLayer { num_features: 4 },
65        QNNLayerType::VariationalLayer { num_params: 12 },
66        QNNLayerType::EntanglementLayer {
67            connectivity: "circular".to_string(),
68        },
69        QNNLayerType::VariationalLayer { num_params: 8 },
70        QNNLayerType::MeasurementLayer {
71            measurement_basis: "computational".to_string(),
72        },
73    ];
74
75    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
76
77    println!(
78        "   Created quantum model with {} parameters",
79        model.parameters.len()
80    );
81
82    // Test different attribution methods
83    let attribution_methods = vec![
84        (
85            "Integrated Gradients",
86            ExplanationMethod::QuantumFeatureAttribution {
87                method: AttributionMethod::IntegratedGradients,
88                num_samples: 50,
89                baseline: Some(Array1::zeros(4)),
90            },
91        ),
92        (
93            "Gradient × Input",
94            ExplanationMethod::QuantumFeatureAttribution {
95                method: AttributionMethod::GradientInput,
96                num_samples: 1,
97                baseline: None,
98            },
99        ),
100        (
101            "Gradient SHAP",
102            ExplanationMethod::QuantumFeatureAttribution {
103                method: AttributionMethod::GradientSHAP,
104                num_samples: 30,
105                baseline: None,
106            },
107        ),
108        (
109            "Quantum Attribution",
110            ExplanationMethod::QuantumFeatureAttribution {
111                method: AttributionMethod::QuantumAttribution,
112                num_samples: 25,
113                baseline: None,
114            },
115        ),
116    ];
117
118    // Test input
119    let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
120
121    println!(
122        "\n   Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
123        test_input[0], test_input[1], test_input[2], test_input[3]
124    );
125
126    for (method_name, method) in attribution_methods {
127        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
128
129        // Set background data for gradient SHAP
130        let background_data = Array2::from_shape_fn((20, 4), |(_, j)| {
131            0.3f64.mul_add((j as f64 * 0.2).sin(), 0.5)
132        });
133        xai.set_background_data(background_data);
134
135        let explanation = xai.explain(&test_input)?;
136
137        if let Some(ref attributions) = explanation.feature_attributions {
138            println!("\n   {method_name} Attribution:");
139            for (i, &attr) in attributions.iter().enumerate() {
140                println!(
141                    "     Feature {}: {:+.4} {}",
142                    i,
143                    attr,
144                    if attr.abs() > 0.1 {
145                        if attr > 0.0 {
146                            "(strong positive)"
147                        } else {
148                            "(strong negative)"
149                        }
150                    } else {
151                        "(weak influence)"
152                    }
153                );
154            }
155
156            // Find most important feature
157            let max_idx = attributions
158                .iter()
159                .enumerate()
160                .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
161                .map_or(0, |(i, _)| i);
162
163            println!(
164                "     → Most important feature: Feature {} ({:.4})",
165                max_idx, attributions[max_idx]
166            );
167        }
168    }
169
170    Ok(())
171}
172
173/// Demonstrate circuit analysis and visualization
174fn circuit_analysis_demo() -> Result<()> {
175    let layers = vec![
176        QNNLayerType::EncodingLayer { num_features: 4 },
177        QNNLayerType::VariationalLayer { num_params: 6 },
178        QNNLayerType::EntanglementLayer {
179            connectivity: "full".to_string(),
180        },
181        QNNLayerType::VariationalLayer { num_params: 6 },
182        QNNLayerType::MeasurementLayer {
183            measurement_basis: "Pauli-Z".to_string(),
184        },
185    ];
186
187    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
188
189    let method = ExplanationMethod::CircuitVisualization {
190        include_measurements: true,
191        parameter_sensitivity: true,
192    };
193
194    let mut xai = QuantumExplainableAI::new(model, vec![method]);
195
196    println!("   Analyzing quantum circuit structure and parameter importance...");
197
198    let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
199    let explanation = xai.explain(&test_input)?;
200
201    if let Some(ref circuit) = explanation.circuit_explanation {
202        println!("\n   Circuit Analysis Results:");
203
204        // Parameter importance
205        println!("   Parameter Importance Scores:");
206        for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
207            if importance > 0.5 {
208                println!("     Parameter {i}: {importance:.3} (high importance)");
209            } else if importance > 0.2 {
210                println!("     Parameter {i}: {importance:.3} (medium importance)");
211            }
212        }
213
214        // Layer analysis
215        println!("\n   Layer-wise Analysis:");
216        for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
217            println!(
218                "     Layer {}: {}",
219                i,
220                format_layer_type(&layer_analysis.layer_type)
221            );
222            println!(
223                "       Information gain: {:.3}",
224                layer_analysis.information_gain
225            );
226            println!(
227                "       Entanglement generated: {:.3}",
228                layer_analysis.entanglement_generated
229            );
230
231            if layer_analysis.entanglement_generated > 0.5 {
232                println!("       → Significant entanglement layer");
233            }
234        }
235
236        // Gate contributions
237        println!("\n   Gate Contribution Analysis:");
238        for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
239            println!(
240                "     Gate {}: {} on qubits {:?}",
241                gate.gate_index, gate.gate_type, gate.qubits
242            );
243            println!("       Contribution: {:.3}", gate.contribution);
244
245            if let Some(ref params) = gate.parameters {
246                println!("       Parameters: {:.3}", params[0]);
247            }
248        }
249
250        // Critical path
251        println!("\n   Critical Path (most important parameters):");
252        print!("     ");
253        for (i, &param_idx) in circuit.critical_path.iter().enumerate() {
254            if i > 0 {
255                print!(" → ");
256            }
257            print!("P{param_idx}");
258        }
259        println!();
260
261        println!("   → This path represents the most influential quantum operations");
262    }
263
264    Ok(())
265}
266
267/// Demonstrate quantum state analysis
268fn quantum_state_demo() -> Result<()> {
269    let layers = vec![
270        QNNLayerType::EncodingLayer { num_features: 3 },
271        QNNLayerType::VariationalLayer { num_params: 9 },
272        QNNLayerType::EntanglementLayer {
273            connectivity: "circular".to_string(),
274        },
275        QNNLayerType::MeasurementLayer {
276            measurement_basis: "computational".to_string(),
277        },
278    ];
279
280    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
281
282    let method = ExplanationMethod::StateAnalysis {
283        entanglement_measures: true,
284        coherence_analysis: true,
285        superposition_analysis: true,
286    };
287
288    let mut xai = QuantumExplainableAI::new(model, vec![method]);
289
290    println!("   Analyzing quantum state properties...");
291
292    // Test different inputs to see state evolution
293    let test_inputs = [
294        Array1::from_vec(vec![0.0, 0.0, 0.0]),
295        Array1::from_vec(vec![1.0, 0.0, 0.0]),
296        Array1::from_vec(vec![0.5, 0.5, 0.5]),
297        Array1::from_vec(vec![1.0, 1.0, 1.0]),
298    ];
299
300    for (i, input) in test_inputs.iter().enumerate() {
301        println!(
302            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
303            i + 1,
304            input[0],
305            input[1],
306            input[2]
307        );
308
309        let explanation = xai.explain(input)?;
310
311        if let Some(ref state) = explanation.state_properties {
312            println!("     Quantum State Properties:");
313            println!(
314                "     - Entanglement entropy: {:.3}",
315                state.entanglement_entropy
316            );
317
318            // Coherence measures
319            for (measure_name, &value) in &state.coherence_measures {
320                println!("     - {measure_name}: {value:.3}");
321            }
322
323            // Superposition analysis
324            let max_component = state
325                .superposition_components
326                .iter()
327                .copied()
328                .fold(f64::NEG_INFINITY, f64::max);
329            println!("     - Max superposition component: {max_component:.3}");
330
331            // Measurement probabilities
332            let total_prob = state.measurement_probabilities.sum();
333            println!("     - Total measurement probability: {total_prob:.3}");
334
335            // Most likely measurement outcome
336            let most_likely = state
337                .measurement_probabilities
338                .iter()
339                .enumerate()
340                .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
341                .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
342
343            println!(
344                "     - Most likely outcome: state {} with prob {:.3}",
345                most_likely.0, most_likely.1
346            );
347
348            // State fidelities
349            if let Some(highest_fidelity) = state
350                .state_fidelities
351                .values()
352                .copied()
353                .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
354            {
355                println!("     - Highest basis state fidelity: {highest_fidelity:.3}");
356            }
357
358            // Interpretation
359            if state.entanglement_entropy > 0.5 {
360                println!("     → Highly entangled state");
361            } else if state.entanglement_entropy > 0.1 {
362                println!("     → Moderately entangled state");
363            } else {
364                println!("     → Separable or weakly entangled state");
365            }
366        }
367    }
368
369    Ok(())
370}
371
372/// Demonstrate saliency mapping
373fn saliency_mapping_demo() -> Result<()> {
374    let layers = vec![
375        QNNLayerType::EncodingLayer { num_features: 4 },
376        QNNLayerType::VariationalLayer { num_params: 8 },
377        QNNLayerType::MeasurementLayer {
378            measurement_basis: "computational".to_string(),
379        },
380    ];
381
382    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
383
384    // Test different perturbation methods
385    let perturbation_methods = vec![
386        (
387            "Gaussian Noise",
388            PerturbationMethod::Gaussian { sigma: 0.1 },
389        ),
390        (
391            "Quantum Phase",
392            PerturbationMethod::QuantumPhase { magnitude: 0.2 },
393        ),
394        ("Feature Masking", PerturbationMethod::FeatureMasking),
395        (
396            "Parameter Perturbation",
397            PerturbationMethod::ParameterPerturbation { strength: 0.1 },
398        ),
399    ];
400
401    let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
402
403    println!("   Computing saliency maps with different perturbation methods...");
404    println!(
405        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
406        test_input[0], test_input[1], test_input[2], test_input[3]
407    );
408
409    for (method_name, perturbation_method) in perturbation_methods {
410        let method = ExplanationMethod::SaliencyMapping {
411            perturbation_method,
412            aggregation: AggregationMethod::Mean,
413        };
414
415        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
416        let explanation = xai.explain(&test_input)?;
417
418        if let Some(ref saliency) = explanation.saliency_map {
419            println!("\n   {method_name} Saliency Map:");
420
421            // Analyze saliency for each output
422            for output_idx in 0..saliency.ncols() {
423                println!("     Output {output_idx}:");
424                for input_idx in 0..saliency.nrows() {
425                    let saliency_score = saliency[[input_idx, output_idx]];
426                    if saliency_score > 0.1 {
427                        println!(
428                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (important)"
429                        );
430                    } else if saliency_score > 0.05 {
431                        println!(
432                            "       Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (moderate)"
433                        );
434                    }
435                }
436            }
437
438            // Find most salient feature-output pair
439            let mut max_saliency = 0.0;
440            let mut max_pair = (0, 0);
441
442            for i in 0..saliency.nrows() {
443                for j in 0..saliency.ncols() {
444                    if saliency[[i, j]] > max_saliency {
445                        max_saliency = saliency[[i, j]];
446                        max_pair = (i, j);
447                    }
448                }
449            }
450
451            println!(
452                "     → Most salient: Feature {} → Output {} ({:.3})",
453                max_pair.0, max_pair.1, max_saliency
454            );
455        }
456    }
457
458    Ok(())
459}
460
461/// Demonstrate Quantum LIME
462fn quantum_lime_demo() -> Result<()> {
463    let layers = vec![
464        QNNLayerType::EncodingLayer { num_features: 4 },
465        QNNLayerType::VariationalLayer { num_params: 10 },
466        QNNLayerType::EntanglementLayer {
467            connectivity: "circular".to_string(),
468        },
469        QNNLayerType::MeasurementLayer {
470            measurement_basis: "computational".to_string(),
471        },
472    ];
473
474    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
475
476    // Test different local models
477    let local_models = vec![
478        ("Linear Regression", LocalModelType::LinearRegression),
479        ("Decision Tree", LocalModelType::DecisionTree),
480        ("Quantum Linear", LocalModelType::QuantumLinear),
481    ];
482
483    let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
484
485    println!("   Quantum LIME: Local Interpretable Model-agnostic Explanations");
486    println!(
487        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
488        test_input[0], test_input[1], test_input[2], test_input[3]
489    );
490
491    for (model_name, local_model) in local_models {
492        let method = ExplanationMethod::QuantumLIME {
493            num_perturbations: 100,
494            kernel_width: 0.5,
495            local_model,
496        };
497
498        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
499        let explanation = xai.explain(&test_input)?;
500
501        if let Some(ref attributions) = explanation.feature_attributions {
502            println!("\n   LIME with {model_name}:");
503
504            for (i, &attr) in attributions.iter().enumerate() {
505                let impact = if attr.abs() > 0.3 {
506                    "high"
507                } else if attr.abs() > 0.1 {
508                    "medium"
509                } else {
510                    "low"
511                };
512
513                println!("     Feature {i}: {attr:+.3} ({impact} impact)");
514            }
515
516            // Local model interpretation
517            match model_name {
518                "Linear Regression" => {
519                    println!("     → Linear relationship approximation in local region");
520                }
521                "Decision Tree" => {
522                    println!("     → Rule-based approximation with thresholds");
523                }
524                "Quantum Linear" => {
525                    println!("     → Quantum-aware linear approximation");
526                }
527                _ => {}
528            }
529
530            // Compute local fidelity (simplified)
531            let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
532            println!("     → Local explanation complexity: {local_complexity:.3}");
533        }
534    }
535
536    Ok(())
537}
538
539/// Demonstrate Quantum SHAP
540fn quantum_shap_demo() -> Result<()> {
541    let layers = vec![
542        QNNLayerType::EncodingLayer { num_features: 3 },
543        QNNLayerType::VariationalLayer { num_params: 6 },
544        QNNLayerType::MeasurementLayer {
545            measurement_basis: "Pauli-Z".to_string(),
546        },
547    ];
548
549    let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
550
551    let method = ExplanationMethod::QuantumSHAP {
552        num_coalitions: 100,
553        background_samples: 20,
554    };
555
556    let mut xai = QuantumExplainableAI::new(model, vec![method]);
557
558    // Set background data for SHAP
559    let background_data = Array2::from_shape_fn((50, 3), |(i, j)| {
560        0.3f64.mul_add(((i + j) as f64 * 0.1).sin(), 0.5)
561    });
562    xai.set_background_data(background_data);
563
564    println!("   Quantum SHAP: SHapley Additive exPlanations");
565
566    // Test multiple inputs
567    let test_inputs = [
568        Array1::from_vec(vec![0.1, 0.5, 0.9]),
569        Array1::from_vec(vec![0.8, 0.3, 0.6]),
570        Array1::from_vec(vec![0.4, 0.7, 0.2]),
571    ];
572
573    for (i, input) in test_inputs.iter().enumerate() {
574        println!(
575            "\n   Input {}: [{:.1}, {:.1}, {:.1}]",
576            i + 1,
577            input[0],
578            input[1],
579            input[2]
580        );
581
582        let explanation = xai.explain(input)?;
583
584        if let Some(ref shap_values) = explanation.feature_attributions {
585            println!("     SHAP Values:");
586
587            let mut total_shap = 0.0;
588            for (j, &value) in shap_values.iter().enumerate() {
589                total_shap += value;
590                println!("     - Feature {j}: {value:+.4}");
591            }
592
593            println!("     - Sum of SHAP values: {total_shap:.4}");
594
595            // Feature ranking
596            let mut indexed_shap: Vec<(usize, f64)> = shap_values
597                .iter()
598                .enumerate()
599                .map(|(idx, &val)| (idx, val.abs()))
600                .collect();
601            indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
602
603            println!("     Feature importance ranking:");
604            for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
605                let original_value = shap_values[*feature_idx];
606                println!(
607                    "     {}. Feature {}: {:.4} (|{:.4}|)",
608                    rank + 1,
609                    feature_idx,
610                    original_value,
611                    abs_value
612                );
613            }
614
615            // SHAP properties
616            println!(
617                "     → SHAP values satisfy efficiency property (sum to prediction difference)"
618            );
619            println!("     → Each value represents feature's average marginal contribution");
620        }
621    }
622
623    Ok(())
624}
625
626/// Demonstrate Layer-wise Relevance Propagation
627fn quantum_lrp_demo() -> Result<()> {
628    let layers = vec![
629        QNNLayerType::EncodingLayer { num_features: 4 },
630        QNNLayerType::VariationalLayer { num_params: 8 },
631        QNNLayerType::VariationalLayer { num_params: 6 },
632        QNNLayerType::MeasurementLayer {
633            measurement_basis: "computational".to_string(),
634        },
635    ];
636
637    let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
638
639    // Test different LRP rules
640    let lrp_rules = vec![
641        ("Epsilon Rule", LRPRule::Epsilon),
642        ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
643        (
644            "Alpha-Beta Rule",
645            LRPRule::AlphaBeta {
646                alpha: 2.0,
647                beta: 1.0,
648            },
649        ),
650        ("Quantum Rule", LRPRule::QuantumRule),
651    ];
652
653    let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
654
655    println!("   Layer-wise Relevance Propagation for Quantum Circuits");
656    println!(
657        "   Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
658        test_input[0], test_input[1], test_input[2], test_input[3]
659    );
660
661    for (rule_name, lrp_rule) in lrp_rules {
662        let method = ExplanationMethod::QuantumLRP {
663            propagation_rule: lrp_rule,
664            epsilon: 1e-6,
665        };
666
667        let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
668        let explanation = xai.explain(&test_input)?;
669
670        if let Some(ref relevance) = explanation.feature_attributions {
671            println!("\n   LRP with {rule_name}:");
672
673            let total_relevance = relevance.sum();
674
675            for (i, &rel) in relevance.iter().enumerate() {
676                let percentage = if total_relevance.abs() > 1e-10 {
677                    rel / total_relevance * 100.0
678                } else {
679                    0.0
680                };
681
682                println!("     Feature {i}: {rel:.4} ({percentage:.1}% of total relevance)");
683            }
684
685            println!("     Total relevance: {total_relevance:.4}");
686
687            // Rule-specific interpretation
688            match rule_name {
689                "Epsilon Rule" => {
690                    println!("     → Distributes relevance proportionally to activations");
691                }
692                "Gamma Rule" => {
693                    println!("     → Emphasizes positive contributions");
694                }
695                "Alpha-Beta Rule" => {
696                    println!("     → Separates positive and negative contributions");
697                }
698                "Quantum Rule" => {
699                    println!("     → Accounts for quantum superposition and entanglement");
700                }
701                _ => {}
702            }
703        }
704    }
705
706    Ok(())
707}
708
709/// Comprehensive explanation demonstration
710fn comprehensive_explanation_demo() -> Result<()> {
711    let layers = vec![
712        QNNLayerType::EncodingLayer { num_features: 4 },
713        QNNLayerType::VariationalLayer { num_params: 12 },
714        QNNLayerType::EntanglementLayer {
715            connectivity: "full".to_string(),
716        },
717        QNNLayerType::VariationalLayer { num_params: 8 },
718        QNNLayerType::MeasurementLayer {
719            measurement_basis: "computational".to_string(),
720        },
721    ];
722
723    let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
724
725    // Use comprehensive explanation methods
726    let methods = vec![
727        ExplanationMethod::QuantumFeatureAttribution {
728            method: AttributionMethod::IntegratedGradients,
729            num_samples: 30,
730            baseline: Some(Array1::zeros(4)),
731        },
732        ExplanationMethod::CircuitVisualization {
733            include_measurements: true,
734            parameter_sensitivity: true,
735        },
736        ExplanationMethod::StateAnalysis {
737            entanglement_measures: true,
738            coherence_analysis: true,
739            superposition_analysis: true,
740        },
741        ExplanationMethod::ConceptActivation {
742            concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
743            activation_threshold: 0.3,
744        },
745    ];
746
747    let mut xai = QuantumExplainableAI::new(model, methods);
748
749    // Add concept vectors
750    xai.add_concept(
751        "pattern_A".to_string(),
752        Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
753    );
754    xai.add_concept(
755        "pattern_B".to_string(),
756        Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
757    );
758
759    // Set background data
760    let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
761        0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
762    });
763    xai.set_background_data(background_data);
764
765    println!("   Comprehensive Quantum Model Explanation");
766
767    // Test input representing a specific pattern
768    let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
769
770    println!(
771        "\n   Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
772        test_input[0], test_input[1], test_input[2], test_input[3]
773    );
774
775    let explanation = xai.explain(&test_input)?;
776
777    // Display comprehensive results
778    println!("\n   === COMPREHENSIVE EXPLANATION RESULTS ===");
779
780    // Feature attributions
781    if let Some(ref attributions) = explanation.feature_attributions {
782        println!("\n   Feature Attributions:");
783        for (i, &attr) in attributions.iter().enumerate() {
784            println!("   - Feature {i}: {attr:+.3}");
785        }
786    }
787
788    // Circuit analysis summary
789    if let Some(ref circuit) = explanation.circuit_explanation {
790        println!("\n   Circuit Analysis Summary:");
791        let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
792        println!("   - Average parameter importance: {avg_importance:.3}");
793        println!(
794            "   - Number of analyzed layers: {}",
795            circuit.layer_analysis.len()
796        );
797        println!("   - Critical path length: {}", circuit.critical_path.len());
798    }
799
800    // Quantum state properties
801    if let Some(ref state) = explanation.state_properties {
802        println!("\n   Quantum State Properties:");
803        println!(
804            "   - Entanglement entropy: {:.3}",
805            state.entanglement_entropy
806        );
807        println!(
808            "   - Coherence measures: {} types",
809            state.coherence_measures.len()
810        );
811
812        let max_measurement_prob = state
813            .measurement_probabilities
814            .iter()
815            .copied()
816            .fold(f64::NEG_INFINITY, f64::max);
817        println!("   - Max measurement probability: {max_measurement_prob:.3}");
818    }
819
820    // Concept activations
821    if let Some(ref concepts) = explanation.concept_activations {
822        println!("\n   Concept Activations:");
823        for (concept, &activation) in concepts {
824            let similarity = if activation > 0.7 {
825                "high"
826            } else if activation > 0.3 {
827                "medium"
828            } else {
829                "low"
830            };
831            println!("   - {concept}: {activation:.3} ({similarity} similarity)");
832        }
833    }
834
835    // Confidence scores
836    println!("\n   Explanation Confidence Scores:");
837    for (component, &confidence) in &explanation.confidence_scores {
838        println!("   - {component}: {confidence:.3}");
839    }
840
841    // Textual explanation
842    println!("\n   Generated Explanation:");
843    println!("{}", explanation.textual_explanation);
844
845    // Summary insights
846    println!("\n   === KEY INSIGHTS ===");
847
848    if let Some(ref attributions) = explanation.feature_attributions {
849        let max_attr_idx = attributions
850            .iter()
851            .enumerate()
852            .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
853            .map_or(0, |(i, _)| i);
854
855        println!(
856            "   • Most influential feature: Feature {} ({:.3})",
857            max_attr_idx, attributions[max_attr_idx]
858        );
859    }
860
861    if let Some(ref state) = explanation.state_properties {
862        if state.entanglement_entropy > 0.5 {
863            println!("   • Model creates significant quantum entanglement");
864        }
865
866        let coherence_level = state
867            .coherence_measures
868            .values()
869            .copied()
870            .fold(0.0, f64::max);
871        if coherence_level > 0.5 {
872            println!("   • High quantum coherence detected");
873        }
874    }
875
876    if let Some(ref concepts) = explanation.concept_activations {
877        if let Some((best_concept, &max_activation)) =
878            concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
879        {
880            if max_activation > 0.5 {
881                println!("   • Input strongly matches concept: {best_concept}");
882            }
883        }
884    }
885
886    println!("   • Explanation provides multi-faceted interpretation of quantum model behavior");
887
888    Ok(())
889}
890
891/// Helper function to format layer type for display
892fn format_layer_type(layer_type: &QNNLayerType) -> String {
893    match layer_type {
894        QNNLayerType::EncodingLayer { num_features } => {
895            format!("Encoding Layer ({num_features} features)")
896        }
897        QNNLayerType::VariationalLayer { num_params } => {
898            format!("Variational Layer ({num_params} parameters)")
899        }
900        QNNLayerType::EntanglementLayer { connectivity } => {
901            format!("Entanglement Layer ({connectivity})")
902        }
903        QNNLayerType::MeasurementLayer { measurement_basis } => {
904            format!("Measurement Layer ({measurement_basis})")
905        }
906    }
907}