pub struct QuantumExplainableAI { /* private fields */ }
Expand description
Main quantum explainable AI engine
Implementations§
Source§impl QuantumExplainableAI
impl QuantumExplainableAI
Sourcepub fn new(model: QuantumNeuralNetwork, methods: Vec<ExplanationMethod>) -> Self
pub fn new(model: QuantumNeuralNetwork, methods: Vec<ExplanationMethod>) -> Self
Create a new quantum explainable AI instance
Examples found in repository?
examples/quantum_explainable_ai.rs (line 119)
53fn feature_attribution_demo() -> Result<()> {
54 // Create quantum model
55 let layers = vec![
56 QNNLayerType::EncodingLayer { num_features: 4 },
57 QNNLayerType::VariationalLayer { num_params: 12 },
58 QNNLayerType::EntanglementLayer {
59 connectivity: "circular".to_string(),
60 },
61 QNNLayerType::VariationalLayer { num_params: 8 },
62 QNNLayerType::MeasurementLayer {
63 measurement_basis: "computational".to_string(),
64 },
65 ];
66
67 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
68
69 println!(
70 " Created quantum model with {} parameters",
71 model.parameters.len()
72 );
73
74 // Test different attribution methods
75 let attribution_methods = vec![
76 (
77 "Integrated Gradients",
78 ExplanationMethod::QuantumFeatureAttribution {
79 method: AttributionMethod::IntegratedGradients,
80 num_samples: 50,
81 baseline: Some(Array1::zeros(4)),
82 },
83 ),
84 (
85 "Gradient × Input",
86 ExplanationMethod::QuantumFeatureAttribution {
87 method: AttributionMethod::GradientInput,
88 num_samples: 1,
89 baseline: None,
90 },
91 ),
92 (
93 "Gradient SHAP",
94 ExplanationMethod::QuantumFeatureAttribution {
95 method: AttributionMethod::GradientSHAP,
96 num_samples: 30,
97 baseline: None,
98 },
99 ),
100 (
101 "Quantum Attribution",
102 ExplanationMethod::QuantumFeatureAttribution {
103 method: AttributionMethod::QuantumAttribution,
104 num_samples: 25,
105 baseline: None,
106 },
107 ),
108 ];
109
110 // Test input
111 let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
112
113 println!(
114 "\n Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
115 test_input[0], test_input[1], test_input[2], test_input[3]
116 );
117
118 for (method_name, method) in attribution_methods {
119 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
120
121 // Set background data for gradient SHAP
122 let background_data =
123 Array2::from_shape_fn((20, 4), |(_, j)| 0.5 + 0.3 * (j as f64 * 0.2).sin());
124 xai.set_background_data(background_data);
125
126 let explanation = xai.explain(&test_input)?;
127
128 if let Some(ref attributions) = explanation.feature_attributions {
129 println!("\n {} Attribution:", method_name);
130 for (i, &attr) in attributions.iter().enumerate() {
131 println!(
132 " Feature {}: {:+.4} {}",
133 i,
134 attr,
135 if attr.abs() > 0.1 {
136 if attr > 0.0 {
137 "(strong positive)"
138 } else {
139 "(strong negative)"
140 }
141 } else {
142 "(weak influence)"
143 }
144 );
145 }
146
147 // Find most important feature
148 let max_idx = attributions
149 .iter()
150 .enumerate()
151 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
152 .map(|(i, _)| i)
153 .unwrap_or(0);
154
155 println!(
156 " → Most important feature: Feature {} ({:.4})",
157 max_idx, attributions[max_idx]
158 );
159 }
160 }
161
162 Ok(())
163}
164
165/// Demonstrate circuit analysis and visualization
166fn circuit_analysis_demo() -> Result<()> {
167 let layers = vec![
168 QNNLayerType::EncodingLayer { num_features: 4 },
169 QNNLayerType::VariationalLayer { num_params: 6 },
170 QNNLayerType::EntanglementLayer {
171 connectivity: "full".to_string(),
172 },
173 QNNLayerType::VariationalLayer { num_params: 6 },
174 QNNLayerType::MeasurementLayer {
175 measurement_basis: "Pauli-Z".to_string(),
176 },
177 ];
178
179 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
180
181 let method = ExplanationMethod::CircuitVisualization {
182 include_measurements: true,
183 parameter_sensitivity: true,
184 };
185
186 let mut xai = QuantumExplainableAI::new(model, vec![method]);
187
188 println!(" Analyzing quantum circuit structure and parameter importance...");
189
190 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
191 let explanation = xai.explain(&test_input)?;
192
193 if let Some(ref circuit) = explanation.circuit_explanation {
194 println!("\n Circuit Analysis Results:");
195
196 // Parameter importance
197 println!(" Parameter Importance Scores:");
198 for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
199 if importance > 0.5 {
200 println!(" Parameter {}: {:.3} (high importance)", i, importance);
201 } else if importance > 0.2 {
202 println!(
203 " Parameter {}: {:.3} (medium importance)",
204 i, importance
205 );
206 }
207 }
208
209 // Layer analysis
210 println!("\n Layer-wise Analysis:");
211 for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
212 println!(
213 " Layer {}: {}",
214 i,
215 format_layer_type(&layer_analysis.layer_type)
216 );
217 println!(
218 " Information gain: {:.3}",
219 layer_analysis.information_gain
220 );
221 println!(
222 " Entanglement generated: {:.3}",
223 layer_analysis.entanglement_generated
224 );
225
226 if layer_analysis.entanglement_generated > 0.5 {
227 println!(" → Significant entanglement layer");
228 }
229 }
230
231 // Gate contributions
232 println!("\n Gate Contribution Analysis:");
233 for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
234 println!(
235 " Gate {}: {} on qubits {:?}",
236 gate.gate_index, gate.gate_type, gate.qubits
237 );
238 println!(" Contribution: {:.3}", gate.contribution);
239
240 if let Some(ref params) = gate.parameters {
241 println!(" Parameters: {:.3}", params[0]);
242 }
243 }
244
245 // Critical path
246 println!("\n Critical Path (most important parameters):");
247 print!(" ");
248 for (i, ¶m_idx) in circuit.critical_path.iter().enumerate() {
249 if i > 0 {
250 print!(" → ");
251 }
252 print!("P{}", param_idx);
253 }
254 println!();
255
256 println!(" → This path represents the most influential quantum operations");
257 }
258
259 Ok(())
260}
261
262/// Demonstrate quantum state analysis
263fn quantum_state_demo() -> Result<()> {
264 let layers = vec![
265 QNNLayerType::EncodingLayer { num_features: 3 },
266 QNNLayerType::VariationalLayer { num_params: 9 },
267 QNNLayerType::EntanglementLayer {
268 connectivity: "circular".to_string(),
269 },
270 QNNLayerType::MeasurementLayer {
271 measurement_basis: "computational".to_string(),
272 },
273 ];
274
275 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
276
277 let method = ExplanationMethod::StateAnalysis {
278 entanglement_measures: true,
279 coherence_analysis: true,
280 superposition_analysis: true,
281 };
282
283 let mut xai = QuantumExplainableAI::new(model, vec![method]);
284
285 println!(" Analyzing quantum state properties...");
286
287 // Test different inputs to see state evolution
288 let test_inputs = vec![
289 Array1::from_vec(vec![0.0, 0.0, 0.0]),
290 Array1::from_vec(vec![1.0, 0.0, 0.0]),
291 Array1::from_vec(vec![0.5, 0.5, 0.5]),
292 Array1::from_vec(vec![1.0, 1.0, 1.0]),
293 ];
294
295 for (i, input) in test_inputs.iter().enumerate() {
296 println!(
297 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
298 i + 1,
299 input[0],
300 input[1],
301 input[2]
302 );
303
304 let explanation = xai.explain(input)?;
305
306 if let Some(ref state) = explanation.state_properties {
307 println!(" Quantum State Properties:");
308 println!(
309 " - Entanglement entropy: {:.3}",
310 state.entanglement_entropy
311 );
312
313 // Coherence measures
314 for (measure_name, &value) in &state.coherence_measures {
315 println!(" - {}: {:.3}", measure_name, value);
316 }
317
318 // Superposition analysis
319 let max_component = state
320 .superposition_components
321 .iter()
322 .cloned()
323 .fold(f64::NEG_INFINITY, f64::max);
324 println!(" - Max superposition component: {:.3}", max_component);
325
326 // Measurement probabilities
327 let total_prob = state.measurement_probabilities.sum();
328 println!(" - Total measurement probability: {:.3}", total_prob);
329
330 // Most likely measurement outcome
331 let most_likely = state
332 .measurement_probabilities
333 .iter()
334 .enumerate()
335 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
336 .map(|(idx, &prob)| (idx, prob))
337 .unwrap_or((0, 0.0));
338
339 println!(
340 " - Most likely outcome: state {} with prob {:.3}",
341 most_likely.0, most_likely.1
342 );
343
344 // State fidelities
345 if let Some(highest_fidelity) = state
346 .state_fidelities
347 .values()
348 .cloned()
349 .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
350 {
351 println!(
352 " - Highest basis state fidelity: {:.3}",
353 highest_fidelity
354 );
355 }
356
357 // Interpretation
358 if state.entanglement_entropy > 0.5 {
359 println!(" → Highly entangled state");
360 } else if state.entanglement_entropy > 0.1 {
361 println!(" → Moderately entangled state");
362 } else {
363 println!(" → Separable or weakly entangled state");
364 }
365 }
366 }
367
368 Ok(())
369}
370
371/// Demonstrate saliency mapping
372fn saliency_mapping_demo() -> Result<()> {
373 let layers = vec![
374 QNNLayerType::EncodingLayer { num_features: 4 },
375 QNNLayerType::VariationalLayer { num_params: 8 },
376 QNNLayerType::MeasurementLayer {
377 measurement_basis: "computational".to_string(),
378 },
379 ];
380
381 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
382
383 // Test different perturbation methods
384 let perturbation_methods = vec![
385 (
386 "Gaussian Noise",
387 PerturbationMethod::Gaussian { sigma: 0.1 },
388 ),
389 (
390 "Quantum Phase",
391 PerturbationMethod::QuantumPhase { magnitude: 0.2 },
392 ),
393 ("Feature Masking", PerturbationMethod::FeatureMasking),
394 (
395 "Parameter Perturbation",
396 PerturbationMethod::ParameterPerturbation { strength: 0.1 },
397 ),
398 ];
399
400 let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
401
402 println!(" Computing saliency maps with different perturbation methods...");
403 println!(
404 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
405 test_input[0], test_input[1], test_input[2], test_input[3]
406 );
407
408 for (method_name, perturbation_method) in perturbation_methods {
409 let method = ExplanationMethod::SaliencyMapping {
410 perturbation_method,
411 aggregation: AggregationMethod::Mean,
412 };
413
414 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
415 let explanation = xai.explain(&test_input)?;
416
417 if let Some(ref saliency) = explanation.saliency_map {
418 println!("\n {} Saliency Map:", method_name);
419
420 // Analyze saliency for each output
421 for output_idx in 0..saliency.ncols() {
422 println!(" Output {}:", output_idx);
423 for input_idx in 0..saliency.nrows() {
424 let saliency_score = saliency[[input_idx, output_idx]];
425 if saliency_score > 0.1 {
426 println!(
427 " Feature {} → Output {}: {:.3} (important)",
428 input_idx, output_idx, saliency_score
429 );
430 } else if saliency_score > 0.05 {
431 println!(
432 " Feature {} → Output {}: {:.3} (moderate)",
433 input_idx, output_idx, saliency_score
434 );
435 }
436 }
437 }
438
439 // Find most salient feature-output pair
440 let mut max_saliency = 0.0;
441 let mut max_pair = (0, 0);
442
443 for i in 0..saliency.nrows() {
444 for j in 0..saliency.ncols() {
445 if saliency[[i, j]] > max_saliency {
446 max_saliency = saliency[[i, j]];
447 max_pair = (i, j);
448 }
449 }
450 }
451
452 println!(
453 " → Most salient: Feature {} → Output {} ({:.3})",
454 max_pair.0, max_pair.1, max_saliency
455 );
456 }
457 }
458
459 Ok(())
460}
461
462/// Demonstrate Quantum LIME
463fn quantum_lime_demo() -> Result<()> {
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 10 },
467 QNNLayerType::EntanglementLayer {
468 connectivity: "circular".to_string(),
469 },
470 QNNLayerType::MeasurementLayer {
471 measurement_basis: "computational".to_string(),
472 },
473 ];
474
475 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
476
477 // Test different local models
478 let local_models = vec![
479 ("Linear Regression", LocalModelType::LinearRegression),
480 ("Decision Tree", LocalModelType::DecisionTree),
481 ("Quantum Linear", LocalModelType::QuantumLinear),
482 ];
483
484 let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
485
486 println!(" Quantum LIME: Local Interpretable Model-agnostic Explanations");
487 println!(
488 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
489 test_input[0], test_input[1], test_input[2], test_input[3]
490 );
491
492 for (model_name, local_model) in local_models {
493 let method = ExplanationMethod::QuantumLIME {
494 num_perturbations: 100,
495 kernel_width: 0.5,
496 local_model,
497 };
498
499 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
500 let explanation = xai.explain(&test_input)?;
501
502 if let Some(ref attributions) = explanation.feature_attributions {
503 println!("\n LIME with {}:", model_name);
504
505 for (i, &attr) in attributions.iter().enumerate() {
506 let impact = if attr.abs() > 0.3 {
507 "high"
508 } else if attr.abs() > 0.1 {
509 "medium"
510 } else {
511 "low"
512 };
513
514 println!(" Feature {}: {:+.3} ({} impact)", i, attr, impact);
515 }
516
517 // Local model interpretation
518 match model_name {
519 "Linear Regression" => {
520 println!(" → Linear relationship approximation in local region");
521 }
522 "Decision Tree" => {
523 println!(" → Rule-based approximation with thresholds");
524 }
525 "Quantum Linear" => {
526 println!(" → Quantum-aware linear approximation");
527 }
528 _ => {}
529 }
530
531 // Compute local fidelity (simplified)
532 let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
533 println!(
534 " → Local explanation complexity: {:.3}",
535 local_complexity
536 );
537 }
538 }
539
540 Ok(())
541}
542
543/// Demonstrate Quantum SHAP
544fn quantum_shap_demo() -> Result<()> {
545 let layers = vec![
546 QNNLayerType::EncodingLayer { num_features: 3 },
547 QNNLayerType::VariationalLayer { num_params: 6 },
548 QNNLayerType::MeasurementLayer {
549 measurement_basis: "Pauli-Z".to_string(),
550 },
551 ];
552
553 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
554
555 let method = ExplanationMethod::QuantumSHAP {
556 num_coalitions: 100,
557 background_samples: 20,
558 };
559
560 let mut xai = QuantumExplainableAI::new(model, vec![method]);
561
562 // Set background data for SHAP
563 let background_data =
564 Array2::from_shape_fn((50, 3), |(i, j)| 0.5 + 0.3 * ((i + j) as f64 * 0.1).sin());
565 xai.set_background_data(background_data);
566
567 println!(" Quantum SHAP: SHapley Additive exPlanations");
568
569 // Test multiple inputs
570 let test_inputs = vec![
571 Array1::from_vec(vec![0.1, 0.5, 0.9]),
572 Array1::from_vec(vec![0.8, 0.3, 0.6]),
573 Array1::from_vec(vec![0.4, 0.7, 0.2]),
574 ];
575
576 for (i, input) in test_inputs.iter().enumerate() {
577 println!(
578 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
579 i + 1,
580 input[0],
581 input[1],
582 input[2]
583 );
584
585 let explanation = xai.explain(input)?;
586
587 if let Some(ref shap_values) = explanation.feature_attributions {
588 println!(" SHAP Values:");
589
590 let mut total_shap = 0.0;
591 for (j, &value) in shap_values.iter().enumerate() {
592 total_shap += value;
593 println!(" - Feature {}: {:+.4}", j, value);
594 }
595
596 println!(" - Sum of SHAP values: {:.4}", total_shap);
597
598 // Feature ranking
599 let mut indexed_shap: Vec<(usize, f64)> = shap_values
600 .iter()
601 .enumerate()
602 .map(|(idx, &val)| (idx, val.abs()))
603 .collect();
604 indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
605
606 println!(" Feature importance ranking:");
607 for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
608 let original_value = shap_values[*feature_idx];
609 println!(
610 " {}. Feature {}: {:.4} (|{:.4}|)",
611 rank + 1,
612 feature_idx,
613 original_value,
614 abs_value
615 );
616 }
617
618 // SHAP properties
619 println!(
620 " → SHAP values satisfy efficiency property (sum to prediction difference)"
621 );
622 println!(" → Each value represents feature's average marginal contribution");
623 }
624 }
625
626 Ok(())
627}
628
629/// Demonstrate Layer-wise Relevance Propagation
630fn quantum_lrp_demo() -> Result<()> {
631 let layers = vec![
632 QNNLayerType::EncodingLayer { num_features: 4 },
633 QNNLayerType::VariationalLayer { num_params: 8 },
634 QNNLayerType::VariationalLayer { num_params: 6 },
635 QNNLayerType::MeasurementLayer {
636 measurement_basis: "computational".to_string(),
637 },
638 ];
639
640 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
641
642 // Test different LRP rules
643 let lrp_rules = vec![
644 ("Epsilon Rule", LRPRule::Epsilon),
645 ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
646 (
647 "Alpha-Beta Rule",
648 LRPRule::AlphaBeta {
649 alpha: 2.0,
650 beta: 1.0,
651 },
652 ),
653 ("Quantum Rule", LRPRule::QuantumRule),
654 ];
655
656 let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
657
658 println!(" Layer-wise Relevance Propagation for Quantum Circuits");
659 println!(
660 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
661 test_input[0], test_input[1], test_input[2], test_input[3]
662 );
663
664 for (rule_name, lrp_rule) in lrp_rules {
665 let method = ExplanationMethod::QuantumLRP {
666 propagation_rule: lrp_rule,
667 epsilon: 1e-6,
668 };
669
670 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
671 let explanation = xai.explain(&test_input)?;
672
673 if let Some(ref relevance) = explanation.feature_attributions {
674 println!("\n LRP with {}:", rule_name);
675
676 let total_relevance = relevance.sum();
677
678 for (i, &rel) in relevance.iter().enumerate() {
679 let percentage = if total_relevance.abs() > 1e-10 {
680 rel / total_relevance * 100.0
681 } else {
682 0.0
683 };
684
685 println!(
686 " Feature {}: {:.4} ({:.1}% of total relevance)",
687 i, rel, percentage
688 );
689 }
690
691 println!(" Total relevance: {:.4}", total_relevance);
692
693 // Rule-specific interpretation
694 match rule_name {
695 "Epsilon Rule" => {
696 println!(" → Distributes relevance proportionally to activations");
697 }
698 "Gamma Rule" => {
699 println!(" → Emphasizes positive contributions");
700 }
701 "Alpha-Beta Rule" => {
702 println!(" → Separates positive and negative contributions");
703 }
704 "Quantum Rule" => {
705 println!(" → Accounts for quantum superposition and entanglement");
706 }
707 _ => {}
708 }
709 }
710 }
711
712 Ok(())
713}
714
715/// Comprehensive explanation demonstration
716fn comprehensive_explanation_demo() -> Result<()> {
717 let layers = vec![
718 QNNLayerType::EncodingLayer { num_features: 4 },
719 QNNLayerType::VariationalLayer { num_params: 12 },
720 QNNLayerType::EntanglementLayer {
721 connectivity: "full".to_string(),
722 },
723 QNNLayerType::VariationalLayer { num_params: 8 },
724 QNNLayerType::MeasurementLayer {
725 measurement_basis: "computational".to_string(),
726 },
727 ];
728
729 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
730
731 // Use comprehensive explanation methods
732 let methods = vec![
733 ExplanationMethod::QuantumFeatureAttribution {
734 method: AttributionMethod::IntegratedGradients,
735 num_samples: 30,
736 baseline: Some(Array1::zeros(4)),
737 },
738 ExplanationMethod::CircuitVisualization {
739 include_measurements: true,
740 parameter_sensitivity: true,
741 },
742 ExplanationMethod::StateAnalysis {
743 entanglement_measures: true,
744 coherence_analysis: true,
745 superposition_analysis: true,
746 },
747 ExplanationMethod::ConceptActivation {
748 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
749 activation_threshold: 0.3,
750 },
751 ];
752
753 let mut xai = QuantumExplainableAI::new(model, methods);
754
755 // Add concept vectors
756 xai.add_concept(
757 "pattern_A".to_string(),
758 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
759 );
760 xai.add_concept(
761 "pattern_B".to_string(),
762 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
763 );
764
765 // Set background data
766 let background_data =
767 Array2::from_shape_fn((30, 4), |(i, j)| 0.3 + 0.4 * ((i * j) as f64 * 0.15).sin());
768 xai.set_background_data(background_data);
769
770 println!(" Comprehensive Quantum Model Explanation");
771
772 // Test input representing a specific pattern
773 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
774
775 println!(
776 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
777 test_input[0], test_input[1], test_input[2], test_input[3]
778 );
779
780 let explanation = xai.explain(&test_input)?;
781
782 // Display comprehensive results
783 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
784
785 // Feature attributions
786 if let Some(ref attributions) = explanation.feature_attributions {
787 println!("\n Feature Attributions:");
788 for (i, &attr) in attributions.iter().enumerate() {
789 println!(" - Feature {}: {:+.3}", i, attr);
790 }
791 }
792
793 // Circuit analysis summary
794 if let Some(ref circuit) = explanation.circuit_explanation {
795 println!("\n Circuit Analysis Summary:");
796 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
797 println!(" - Average parameter importance: {:.3}", avg_importance);
798 println!(
799 " - Number of analyzed layers: {}",
800 circuit.layer_analysis.len()
801 );
802 println!(" - Critical path length: {}", circuit.critical_path.len());
803 }
804
805 // Quantum state properties
806 if let Some(ref state) = explanation.state_properties {
807 println!("\n Quantum State Properties:");
808 println!(
809 " - Entanglement entropy: {:.3}",
810 state.entanglement_entropy
811 );
812 println!(
813 " - Coherence measures: {} types",
814 state.coherence_measures.len()
815 );
816
817 let max_measurement_prob = state
818 .measurement_probabilities
819 .iter()
820 .cloned()
821 .fold(f64::NEG_INFINITY, f64::max);
822 println!(
823 " - Max measurement probability: {:.3}",
824 max_measurement_prob
825 );
826 }
827
828 // Concept activations
829 if let Some(ref concepts) = explanation.concept_activations {
830 println!("\n Concept Activations:");
831 for (concept, &activation) in concepts {
832 let similarity = if activation > 0.7 {
833 "high"
834 } else if activation > 0.3 {
835 "medium"
836 } else {
837 "low"
838 };
839 println!(
840 " - {}: {:.3} ({} similarity)",
841 concept, activation, similarity
842 );
843 }
844 }
845
846 // Confidence scores
847 println!("\n Explanation Confidence Scores:");
848 for (component, &confidence) in &explanation.confidence_scores {
849 println!(" - {}: {:.3}", component, confidence);
850 }
851
852 // Textual explanation
853 println!("\n Generated Explanation:");
854 println!("{}", explanation.textual_explanation);
855
856 // Summary insights
857 println!("\n === KEY INSIGHTS ===");
858
859 if let Some(ref attributions) = explanation.feature_attributions {
860 let max_attr_idx = attributions
861 .iter()
862 .enumerate()
863 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
864 .map(|(i, _)| i)
865 .unwrap_or(0);
866
867 println!(
868 " • Most influential feature: Feature {} ({:.3})",
869 max_attr_idx, attributions[max_attr_idx]
870 );
871 }
872
873 if let Some(ref state) = explanation.state_properties {
874 if state.entanglement_entropy > 0.5 {
875 println!(" • Model creates significant quantum entanglement");
876 }
877
878 let coherence_level = state
879 .coherence_measures
880 .values()
881 .cloned()
882 .fold(0.0, f64::max);
883 if coherence_level > 0.5 {
884 println!(" • High quantum coherence detected");
885 }
886 }
887
888 if let Some(ref concepts) = explanation.concept_activations {
889 if let Some((best_concept, &max_activation)) =
890 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
891 {
892 if max_activation > 0.5 {
893 println!(" • Input strongly matches concept: {}", best_concept);
894 }
895 }
896 }
897
898 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
899
900 Ok(())
901}
Sourcepub fn set_background_data(&mut self, data: Array2<f64>)
pub fn set_background_data(&mut self, data: Array2<f64>)
Set background data for explanations
Examples found in repository?
examples/quantum_explainable_ai.rs (line 124)
53fn feature_attribution_demo() -> Result<()> {
54 // Create quantum model
55 let layers = vec![
56 QNNLayerType::EncodingLayer { num_features: 4 },
57 QNNLayerType::VariationalLayer { num_params: 12 },
58 QNNLayerType::EntanglementLayer {
59 connectivity: "circular".to_string(),
60 },
61 QNNLayerType::VariationalLayer { num_params: 8 },
62 QNNLayerType::MeasurementLayer {
63 measurement_basis: "computational".to_string(),
64 },
65 ];
66
67 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
68
69 println!(
70 " Created quantum model with {} parameters",
71 model.parameters.len()
72 );
73
74 // Test different attribution methods
75 let attribution_methods = vec![
76 (
77 "Integrated Gradients",
78 ExplanationMethod::QuantumFeatureAttribution {
79 method: AttributionMethod::IntegratedGradients,
80 num_samples: 50,
81 baseline: Some(Array1::zeros(4)),
82 },
83 ),
84 (
85 "Gradient × Input",
86 ExplanationMethod::QuantumFeatureAttribution {
87 method: AttributionMethod::GradientInput,
88 num_samples: 1,
89 baseline: None,
90 },
91 ),
92 (
93 "Gradient SHAP",
94 ExplanationMethod::QuantumFeatureAttribution {
95 method: AttributionMethod::GradientSHAP,
96 num_samples: 30,
97 baseline: None,
98 },
99 ),
100 (
101 "Quantum Attribution",
102 ExplanationMethod::QuantumFeatureAttribution {
103 method: AttributionMethod::QuantumAttribution,
104 num_samples: 25,
105 baseline: None,
106 },
107 ),
108 ];
109
110 // Test input
111 let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
112
113 println!(
114 "\n Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
115 test_input[0], test_input[1], test_input[2], test_input[3]
116 );
117
118 for (method_name, method) in attribution_methods {
119 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
120
121 // Set background data for gradient SHAP
122 let background_data =
123 Array2::from_shape_fn((20, 4), |(_, j)| 0.5 + 0.3 * (j as f64 * 0.2).sin());
124 xai.set_background_data(background_data);
125
126 let explanation = xai.explain(&test_input)?;
127
128 if let Some(ref attributions) = explanation.feature_attributions {
129 println!("\n {} Attribution:", method_name);
130 for (i, &attr) in attributions.iter().enumerate() {
131 println!(
132 " Feature {}: {:+.4} {}",
133 i,
134 attr,
135 if attr.abs() > 0.1 {
136 if attr > 0.0 {
137 "(strong positive)"
138 } else {
139 "(strong negative)"
140 }
141 } else {
142 "(weak influence)"
143 }
144 );
145 }
146
147 // Find most important feature
148 let max_idx = attributions
149 .iter()
150 .enumerate()
151 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
152 .map(|(i, _)| i)
153 .unwrap_or(0);
154
155 println!(
156 " → Most important feature: Feature {} ({:.4})",
157 max_idx, attributions[max_idx]
158 );
159 }
160 }
161
162 Ok(())
163}
164
165/// Demonstrate circuit analysis and visualization
166fn circuit_analysis_demo() -> Result<()> {
167 let layers = vec![
168 QNNLayerType::EncodingLayer { num_features: 4 },
169 QNNLayerType::VariationalLayer { num_params: 6 },
170 QNNLayerType::EntanglementLayer {
171 connectivity: "full".to_string(),
172 },
173 QNNLayerType::VariationalLayer { num_params: 6 },
174 QNNLayerType::MeasurementLayer {
175 measurement_basis: "Pauli-Z".to_string(),
176 },
177 ];
178
179 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
180
181 let method = ExplanationMethod::CircuitVisualization {
182 include_measurements: true,
183 parameter_sensitivity: true,
184 };
185
186 let mut xai = QuantumExplainableAI::new(model, vec![method]);
187
188 println!(" Analyzing quantum circuit structure and parameter importance...");
189
190 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
191 let explanation = xai.explain(&test_input)?;
192
193 if let Some(ref circuit) = explanation.circuit_explanation {
194 println!("\n Circuit Analysis Results:");
195
196 // Parameter importance
197 println!(" Parameter Importance Scores:");
198 for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
199 if importance > 0.5 {
200 println!(" Parameter {}: {:.3} (high importance)", i, importance);
201 } else if importance > 0.2 {
202 println!(
203 " Parameter {}: {:.3} (medium importance)",
204 i, importance
205 );
206 }
207 }
208
209 // Layer analysis
210 println!("\n Layer-wise Analysis:");
211 for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
212 println!(
213 " Layer {}: {}",
214 i,
215 format_layer_type(&layer_analysis.layer_type)
216 );
217 println!(
218 " Information gain: {:.3}",
219 layer_analysis.information_gain
220 );
221 println!(
222 " Entanglement generated: {:.3}",
223 layer_analysis.entanglement_generated
224 );
225
226 if layer_analysis.entanglement_generated > 0.5 {
227 println!(" → Significant entanglement layer");
228 }
229 }
230
231 // Gate contributions
232 println!("\n Gate Contribution Analysis:");
233 for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
234 println!(
235 " Gate {}: {} on qubits {:?}",
236 gate.gate_index, gate.gate_type, gate.qubits
237 );
238 println!(" Contribution: {:.3}", gate.contribution);
239
240 if let Some(ref params) = gate.parameters {
241 println!(" Parameters: {:.3}", params[0]);
242 }
243 }
244
245 // Critical path
246 println!("\n Critical Path (most important parameters):");
247 print!(" ");
248 for (i, ¶m_idx) in circuit.critical_path.iter().enumerate() {
249 if i > 0 {
250 print!(" → ");
251 }
252 print!("P{}", param_idx);
253 }
254 println!();
255
256 println!(" → This path represents the most influential quantum operations");
257 }
258
259 Ok(())
260}
261
262/// Demonstrate quantum state analysis
263fn quantum_state_demo() -> Result<()> {
264 let layers = vec![
265 QNNLayerType::EncodingLayer { num_features: 3 },
266 QNNLayerType::VariationalLayer { num_params: 9 },
267 QNNLayerType::EntanglementLayer {
268 connectivity: "circular".to_string(),
269 },
270 QNNLayerType::MeasurementLayer {
271 measurement_basis: "computational".to_string(),
272 },
273 ];
274
275 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
276
277 let method = ExplanationMethod::StateAnalysis {
278 entanglement_measures: true,
279 coherence_analysis: true,
280 superposition_analysis: true,
281 };
282
283 let mut xai = QuantumExplainableAI::new(model, vec![method]);
284
285 println!(" Analyzing quantum state properties...");
286
287 // Test different inputs to see state evolution
288 let test_inputs = vec![
289 Array1::from_vec(vec![0.0, 0.0, 0.0]),
290 Array1::from_vec(vec![1.0, 0.0, 0.0]),
291 Array1::from_vec(vec![0.5, 0.5, 0.5]),
292 Array1::from_vec(vec![1.0, 1.0, 1.0]),
293 ];
294
295 for (i, input) in test_inputs.iter().enumerate() {
296 println!(
297 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
298 i + 1,
299 input[0],
300 input[1],
301 input[2]
302 );
303
304 let explanation = xai.explain(input)?;
305
306 if let Some(ref state) = explanation.state_properties {
307 println!(" Quantum State Properties:");
308 println!(
309 " - Entanglement entropy: {:.3}",
310 state.entanglement_entropy
311 );
312
313 // Coherence measures
314 for (measure_name, &value) in &state.coherence_measures {
315 println!(" - {}: {:.3}", measure_name, value);
316 }
317
318 // Superposition analysis
319 let max_component = state
320 .superposition_components
321 .iter()
322 .cloned()
323 .fold(f64::NEG_INFINITY, f64::max);
324 println!(" - Max superposition component: {:.3}", max_component);
325
326 // Measurement probabilities
327 let total_prob = state.measurement_probabilities.sum();
328 println!(" - Total measurement probability: {:.3}", total_prob);
329
330 // Most likely measurement outcome
331 let most_likely = state
332 .measurement_probabilities
333 .iter()
334 .enumerate()
335 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
336 .map(|(idx, &prob)| (idx, prob))
337 .unwrap_or((0, 0.0));
338
339 println!(
340 " - Most likely outcome: state {} with prob {:.3}",
341 most_likely.0, most_likely.1
342 );
343
344 // State fidelities
345 if let Some(highest_fidelity) = state
346 .state_fidelities
347 .values()
348 .cloned()
349 .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
350 {
351 println!(
352 " - Highest basis state fidelity: {:.3}",
353 highest_fidelity
354 );
355 }
356
357 // Interpretation
358 if state.entanglement_entropy > 0.5 {
359 println!(" → Highly entangled state");
360 } else if state.entanglement_entropy > 0.1 {
361 println!(" → Moderately entangled state");
362 } else {
363 println!(" → Separable or weakly entangled state");
364 }
365 }
366 }
367
368 Ok(())
369}
370
371/// Demonstrate saliency mapping
372fn saliency_mapping_demo() -> Result<()> {
373 let layers = vec![
374 QNNLayerType::EncodingLayer { num_features: 4 },
375 QNNLayerType::VariationalLayer { num_params: 8 },
376 QNNLayerType::MeasurementLayer {
377 measurement_basis: "computational".to_string(),
378 },
379 ];
380
381 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
382
383 // Test different perturbation methods
384 let perturbation_methods = vec![
385 (
386 "Gaussian Noise",
387 PerturbationMethod::Gaussian { sigma: 0.1 },
388 ),
389 (
390 "Quantum Phase",
391 PerturbationMethod::QuantumPhase { magnitude: 0.2 },
392 ),
393 ("Feature Masking", PerturbationMethod::FeatureMasking),
394 (
395 "Parameter Perturbation",
396 PerturbationMethod::ParameterPerturbation { strength: 0.1 },
397 ),
398 ];
399
400 let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
401
402 println!(" Computing saliency maps with different perturbation methods...");
403 println!(
404 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
405 test_input[0], test_input[1], test_input[2], test_input[3]
406 );
407
408 for (method_name, perturbation_method) in perturbation_methods {
409 let method = ExplanationMethod::SaliencyMapping {
410 perturbation_method,
411 aggregation: AggregationMethod::Mean,
412 };
413
414 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
415 let explanation = xai.explain(&test_input)?;
416
417 if let Some(ref saliency) = explanation.saliency_map {
418 println!("\n {} Saliency Map:", method_name);
419
420 // Analyze saliency for each output
421 for output_idx in 0..saliency.ncols() {
422 println!(" Output {}:", output_idx);
423 for input_idx in 0..saliency.nrows() {
424 let saliency_score = saliency[[input_idx, output_idx]];
425 if saliency_score > 0.1 {
426 println!(
427 " Feature {} → Output {}: {:.3} (important)",
428 input_idx, output_idx, saliency_score
429 );
430 } else if saliency_score > 0.05 {
431 println!(
432 " Feature {} → Output {}: {:.3} (moderate)",
433 input_idx, output_idx, saliency_score
434 );
435 }
436 }
437 }
438
439 // Find most salient feature-output pair
440 let mut max_saliency = 0.0;
441 let mut max_pair = (0, 0);
442
443 for i in 0..saliency.nrows() {
444 for j in 0..saliency.ncols() {
445 if saliency[[i, j]] > max_saliency {
446 max_saliency = saliency[[i, j]];
447 max_pair = (i, j);
448 }
449 }
450 }
451
452 println!(
453 " → Most salient: Feature {} → Output {} ({:.3})",
454 max_pair.0, max_pair.1, max_saliency
455 );
456 }
457 }
458
459 Ok(())
460}
461
462/// Demonstrate Quantum LIME
463fn quantum_lime_demo() -> Result<()> {
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 10 },
467 QNNLayerType::EntanglementLayer {
468 connectivity: "circular".to_string(),
469 },
470 QNNLayerType::MeasurementLayer {
471 measurement_basis: "computational".to_string(),
472 },
473 ];
474
475 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
476
477 // Test different local models
478 let local_models = vec![
479 ("Linear Regression", LocalModelType::LinearRegression),
480 ("Decision Tree", LocalModelType::DecisionTree),
481 ("Quantum Linear", LocalModelType::QuantumLinear),
482 ];
483
484 let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
485
486 println!(" Quantum LIME: Local Interpretable Model-agnostic Explanations");
487 println!(
488 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
489 test_input[0], test_input[1], test_input[2], test_input[3]
490 );
491
492 for (model_name, local_model) in local_models {
493 let method = ExplanationMethod::QuantumLIME {
494 num_perturbations: 100,
495 kernel_width: 0.5,
496 local_model,
497 };
498
499 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
500 let explanation = xai.explain(&test_input)?;
501
502 if let Some(ref attributions) = explanation.feature_attributions {
503 println!("\n LIME with {}:", model_name);
504
505 for (i, &attr) in attributions.iter().enumerate() {
506 let impact = if attr.abs() > 0.3 {
507 "high"
508 } else if attr.abs() > 0.1 {
509 "medium"
510 } else {
511 "low"
512 };
513
514 println!(" Feature {}: {:+.3} ({} impact)", i, attr, impact);
515 }
516
517 // Local model interpretation
518 match model_name {
519 "Linear Regression" => {
520 println!(" → Linear relationship approximation in local region");
521 }
522 "Decision Tree" => {
523 println!(" → Rule-based approximation with thresholds");
524 }
525 "Quantum Linear" => {
526 println!(" → Quantum-aware linear approximation");
527 }
528 _ => {}
529 }
530
531 // Compute local fidelity (simplified)
532 let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
533 println!(
534 " → Local explanation complexity: {:.3}",
535 local_complexity
536 );
537 }
538 }
539
540 Ok(())
541}
542
543/// Demonstrate Quantum SHAP
544fn quantum_shap_demo() -> Result<()> {
545 let layers = vec![
546 QNNLayerType::EncodingLayer { num_features: 3 },
547 QNNLayerType::VariationalLayer { num_params: 6 },
548 QNNLayerType::MeasurementLayer {
549 measurement_basis: "Pauli-Z".to_string(),
550 },
551 ];
552
553 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
554
555 let method = ExplanationMethod::QuantumSHAP {
556 num_coalitions: 100,
557 background_samples: 20,
558 };
559
560 let mut xai = QuantumExplainableAI::new(model, vec![method]);
561
562 // Set background data for SHAP
563 let background_data =
564 Array2::from_shape_fn((50, 3), |(i, j)| 0.5 + 0.3 * ((i + j) as f64 * 0.1).sin());
565 xai.set_background_data(background_data);
566
567 println!(" Quantum SHAP: SHapley Additive exPlanations");
568
569 // Test multiple inputs
570 let test_inputs = vec![
571 Array1::from_vec(vec![0.1, 0.5, 0.9]),
572 Array1::from_vec(vec![0.8, 0.3, 0.6]),
573 Array1::from_vec(vec![0.4, 0.7, 0.2]),
574 ];
575
576 for (i, input) in test_inputs.iter().enumerate() {
577 println!(
578 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
579 i + 1,
580 input[0],
581 input[1],
582 input[2]
583 );
584
585 let explanation = xai.explain(input)?;
586
587 if let Some(ref shap_values) = explanation.feature_attributions {
588 println!(" SHAP Values:");
589
590 let mut total_shap = 0.0;
591 for (j, &value) in shap_values.iter().enumerate() {
592 total_shap += value;
593 println!(" - Feature {}: {:+.4}", j, value);
594 }
595
596 println!(" - Sum of SHAP values: {:.4}", total_shap);
597
598 // Feature ranking
599 let mut indexed_shap: Vec<(usize, f64)> = shap_values
600 .iter()
601 .enumerate()
602 .map(|(idx, &val)| (idx, val.abs()))
603 .collect();
604 indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
605
606 println!(" Feature importance ranking:");
607 for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
608 let original_value = shap_values[*feature_idx];
609 println!(
610 " {}. Feature {}: {:.4} (|{:.4}|)",
611 rank + 1,
612 feature_idx,
613 original_value,
614 abs_value
615 );
616 }
617
618 // SHAP properties
619 println!(
620 " → SHAP values satisfy efficiency property (sum to prediction difference)"
621 );
622 println!(" → Each value represents feature's average marginal contribution");
623 }
624 }
625
626 Ok(())
627}
628
629/// Demonstrate Layer-wise Relevance Propagation
630fn quantum_lrp_demo() -> Result<()> {
631 let layers = vec![
632 QNNLayerType::EncodingLayer { num_features: 4 },
633 QNNLayerType::VariationalLayer { num_params: 8 },
634 QNNLayerType::VariationalLayer { num_params: 6 },
635 QNNLayerType::MeasurementLayer {
636 measurement_basis: "computational".to_string(),
637 },
638 ];
639
640 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
641
642 // Test different LRP rules
643 let lrp_rules = vec![
644 ("Epsilon Rule", LRPRule::Epsilon),
645 ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
646 (
647 "Alpha-Beta Rule",
648 LRPRule::AlphaBeta {
649 alpha: 2.0,
650 beta: 1.0,
651 },
652 ),
653 ("Quantum Rule", LRPRule::QuantumRule),
654 ];
655
656 let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
657
658 println!(" Layer-wise Relevance Propagation for Quantum Circuits");
659 println!(
660 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
661 test_input[0], test_input[1], test_input[2], test_input[3]
662 );
663
664 for (rule_name, lrp_rule) in lrp_rules {
665 let method = ExplanationMethod::QuantumLRP {
666 propagation_rule: lrp_rule,
667 epsilon: 1e-6,
668 };
669
670 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
671 let explanation = xai.explain(&test_input)?;
672
673 if let Some(ref relevance) = explanation.feature_attributions {
674 println!("\n LRP with {}:", rule_name);
675
676 let total_relevance = relevance.sum();
677
678 for (i, &rel) in relevance.iter().enumerate() {
679 let percentage = if total_relevance.abs() > 1e-10 {
680 rel / total_relevance * 100.0
681 } else {
682 0.0
683 };
684
685 println!(
686 " Feature {}: {:.4} ({:.1}% of total relevance)",
687 i, rel, percentage
688 );
689 }
690
691 println!(" Total relevance: {:.4}", total_relevance);
692
693 // Rule-specific interpretation
694 match rule_name {
695 "Epsilon Rule" => {
696 println!(" → Distributes relevance proportionally to activations");
697 }
698 "Gamma Rule" => {
699 println!(" → Emphasizes positive contributions");
700 }
701 "Alpha-Beta Rule" => {
702 println!(" → Separates positive and negative contributions");
703 }
704 "Quantum Rule" => {
705 println!(" → Accounts for quantum superposition and entanglement");
706 }
707 _ => {}
708 }
709 }
710 }
711
712 Ok(())
713}
714
715/// Comprehensive explanation demonstration
716fn comprehensive_explanation_demo() -> Result<()> {
717 let layers = vec![
718 QNNLayerType::EncodingLayer { num_features: 4 },
719 QNNLayerType::VariationalLayer { num_params: 12 },
720 QNNLayerType::EntanglementLayer {
721 connectivity: "full".to_string(),
722 },
723 QNNLayerType::VariationalLayer { num_params: 8 },
724 QNNLayerType::MeasurementLayer {
725 measurement_basis: "computational".to_string(),
726 },
727 ];
728
729 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
730
731 // Use comprehensive explanation methods
732 let methods = vec![
733 ExplanationMethod::QuantumFeatureAttribution {
734 method: AttributionMethod::IntegratedGradients,
735 num_samples: 30,
736 baseline: Some(Array1::zeros(4)),
737 },
738 ExplanationMethod::CircuitVisualization {
739 include_measurements: true,
740 parameter_sensitivity: true,
741 },
742 ExplanationMethod::StateAnalysis {
743 entanglement_measures: true,
744 coherence_analysis: true,
745 superposition_analysis: true,
746 },
747 ExplanationMethod::ConceptActivation {
748 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
749 activation_threshold: 0.3,
750 },
751 ];
752
753 let mut xai = QuantumExplainableAI::new(model, methods);
754
755 // Add concept vectors
756 xai.add_concept(
757 "pattern_A".to_string(),
758 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
759 );
760 xai.add_concept(
761 "pattern_B".to_string(),
762 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
763 );
764
765 // Set background data
766 let background_data =
767 Array2::from_shape_fn((30, 4), |(i, j)| 0.3 + 0.4 * ((i * j) as f64 * 0.15).sin());
768 xai.set_background_data(background_data);
769
770 println!(" Comprehensive Quantum Model Explanation");
771
772 // Test input representing a specific pattern
773 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
774
775 println!(
776 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
777 test_input[0], test_input[1], test_input[2], test_input[3]
778 );
779
780 let explanation = xai.explain(&test_input)?;
781
782 // Display comprehensive results
783 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
784
785 // Feature attributions
786 if let Some(ref attributions) = explanation.feature_attributions {
787 println!("\n Feature Attributions:");
788 for (i, &attr) in attributions.iter().enumerate() {
789 println!(" - Feature {}: {:+.3}", i, attr);
790 }
791 }
792
793 // Circuit analysis summary
794 if let Some(ref circuit) = explanation.circuit_explanation {
795 println!("\n Circuit Analysis Summary:");
796 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
797 println!(" - Average parameter importance: {:.3}", avg_importance);
798 println!(
799 " - Number of analyzed layers: {}",
800 circuit.layer_analysis.len()
801 );
802 println!(" - Critical path length: {}", circuit.critical_path.len());
803 }
804
805 // Quantum state properties
806 if let Some(ref state) = explanation.state_properties {
807 println!("\n Quantum State Properties:");
808 println!(
809 " - Entanglement entropy: {:.3}",
810 state.entanglement_entropy
811 );
812 println!(
813 " - Coherence measures: {} types",
814 state.coherence_measures.len()
815 );
816
817 let max_measurement_prob = state
818 .measurement_probabilities
819 .iter()
820 .cloned()
821 .fold(f64::NEG_INFINITY, f64::max);
822 println!(
823 " - Max measurement probability: {:.3}",
824 max_measurement_prob
825 );
826 }
827
828 // Concept activations
829 if let Some(ref concepts) = explanation.concept_activations {
830 println!("\n Concept Activations:");
831 for (concept, &activation) in concepts {
832 let similarity = if activation > 0.7 {
833 "high"
834 } else if activation > 0.3 {
835 "medium"
836 } else {
837 "low"
838 };
839 println!(
840 " - {}: {:.3} ({} similarity)",
841 concept, activation, similarity
842 );
843 }
844 }
845
846 // Confidence scores
847 println!("\n Explanation Confidence Scores:");
848 for (component, &confidence) in &explanation.confidence_scores {
849 println!(" - {}: {:.3}", component, confidence);
850 }
851
852 // Textual explanation
853 println!("\n Generated Explanation:");
854 println!("{}", explanation.textual_explanation);
855
856 // Summary insights
857 println!("\n === KEY INSIGHTS ===");
858
859 if let Some(ref attributions) = explanation.feature_attributions {
860 let max_attr_idx = attributions
861 .iter()
862 .enumerate()
863 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
864 .map(|(i, _)| i)
865 .unwrap_or(0);
866
867 println!(
868 " • Most influential feature: Feature {} ({:.3})",
869 max_attr_idx, attributions[max_attr_idx]
870 );
871 }
872
873 if let Some(ref state) = explanation.state_properties {
874 if state.entanglement_entropy > 0.5 {
875 println!(" • Model creates significant quantum entanglement");
876 }
877
878 let coherence_level = state
879 .coherence_measures
880 .values()
881 .cloned()
882 .fold(0.0, f64::max);
883 if coherence_level > 0.5 {
884 println!(" • High quantum coherence detected");
885 }
886 }
887
888 if let Some(ref concepts) = explanation.concept_activations {
889 if let Some((best_concept, &max_activation)) =
890 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
891 {
892 if max_activation > 0.5 {
893 println!(" • Input strongly matches concept: {}", best_concept);
894 }
895 }
896 }
897
898 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
899
900 Ok(())
901}
Sourcepub fn add_concept(&mut self, name: String, vector: Array1<f64>)
pub fn add_concept(&mut self, name: String, vector: Array1<f64>)
Add concept vector
Examples found in repository?
examples/quantum_explainable_ai.rs (lines 756-759)
716fn comprehensive_explanation_demo() -> Result<()> {
717 let layers = vec![
718 QNNLayerType::EncodingLayer { num_features: 4 },
719 QNNLayerType::VariationalLayer { num_params: 12 },
720 QNNLayerType::EntanglementLayer {
721 connectivity: "full".to_string(),
722 },
723 QNNLayerType::VariationalLayer { num_params: 8 },
724 QNNLayerType::MeasurementLayer {
725 measurement_basis: "computational".to_string(),
726 },
727 ];
728
729 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
730
731 // Use comprehensive explanation methods
732 let methods = vec![
733 ExplanationMethod::QuantumFeatureAttribution {
734 method: AttributionMethod::IntegratedGradients,
735 num_samples: 30,
736 baseline: Some(Array1::zeros(4)),
737 },
738 ExplanationMethod::CircuitVisualization {
739 include_measurements: true,
740 parameter_sensitivity: true,
741 },
742 ExplanationMethod::StateAnalysis {
743 entanglement_measures: true,
744 coherence_analysis: true,
745 superposition_analysis: true,
746 },
747 ExplanationMethod::ConceptActivation {
748 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
749 activation_threshold: 0.3,
750 },
751 ];
752
753 let mut xai = QuantumExplainableAI::new(model, methods);
754
755 // Add concept vectors
756 xai.add_concept(
757 "pattern_A".to_string(),
758 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
759 );
760 xai.add_concept(
761 "pattern_B".to_string(),
762 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
763 );
764
765 // Set background data
766 let background_data =
767 Array2::from_shape_fn((30, 4), |(i, j)| 0.3 + 0.4 * ((i * j) as f64 * 0.15).sin());
768 xai.set_background_data(background_data);
769
770 println!(" Comprehensive Quantum Model Explanation");
771
772 // Test input representing a specific pattern
773 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
774
775 println!(
776 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
777 test_input[0], test_input[1], test_input[2], test_input[3]
778 );
779
780 let explanation = xai.explain(&test_input)?;
781
782 // Display comprehensive results
783 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
784
785 // Feature attributions
786 if let Some(ref attributions) = explanation.feature_attributions {
787 println!("\n Feature Attributions:");
788 for (i, &attr) in attributions.iter().enumerate() {
789 println!(" - Feature {}: {:+.3}", i, attr);
790 }
791 }
792
793 // Circuit analysis summary
794 if let Some(ref circuit) = explanation.circuit_explanation {
795 println!("\n Circuit Analysis Summary:");
796 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
797 println!(" - Average parameter importance: {:.3}", avg_importance);
798 println!(
799 " - Number of analyzed layers: {}",
800 circuit.layer_analysis.len()
801 );
802 println!(" - Critical path length: {}", circuit.critical_path.len());
803 }
804
805 // Quantum state properties
806 if let Some(ref state) = explanation.state_properties {
807 println!("\n Quantum State Properties:");
808 println!(
809 " - Entanglement entropy: {:.3}",
810 state.entanglement_entropy
811 );
812 println!(
813 " - Coherence measures: {} types",
814 state.coherence_measures.len()
815 );
816
817 let max_measurement_prob = state
818 .measurement_probabilities
819 .iter()
820 .cloned()
821 .fold(f64::NEG_INFINITY, f64::max);
822 println!(
823 " - Max measurement probability: {:.3}",
824 max_measurement_prob
825 );
826 }
827
828 // Concept activations
829 if let Some(ref concepts) = explanation.concept_activations {
830 println!("\n Concept Activations:");
831 for (concept, &activation) in concepts {
832 let similarity = if activation > 0.7 {
833 "high"
834 } else if activation > 0.3 {
835 "medium"
836 } else {
837 "low"
838 };
839 println!(
840 " - {}: {:.3} ({} similarity)",
841 concept, activation, similarity
842 );
843 }
844 }
845
846 // Confidence scores
847 println!("\n Explanation Confidence Scores:");
848 for (component, &confidence) in &explanation.confidence_scores {
849 println!(" - {}: {:.3}", component, confidence);
850 }
851
852 // Textual explanation
853 println!("\n Generated Explanation:");
854 println!("{}", explanation.textual_explanation);
855
856 // Summary insights
857 println!("\n === KEY INSIGHTS ===");
858
859 if let Some(ref attributions) = explanation.feature_attributions {
860 let max_attr_idx = attributions
861 .iter()
862 .enumerate()
863 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
864 .map(|(i, _)| i)
865 .unwrap_or(0);
866
867 println!(
868 " • Most influential feature: Feature {} ({:.3})",
869 max_attr_idx, attributions[max_attr_idx]
870 );
871 }
872
873 if let Some(ref state) = explanation.state_properties {
874 if state.entanglement_entropy > 0.5 {
875 println!(" • Model creates significant quantum entanglement");
876 }
877
878 let coherence_level = state
879 .coherence_measures
880 .values()
881 .cloned()
882 .fold(0.0, f64::max);
883 if coherence_level > 0.5 {
884 println!(" • High quantum coherence detected");
885 }
886 }
887
888 if let Some(ref concepts) = explanation.concept_activations {
889 if let Some((best_concept, &max_activation)) =
890 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
891 {
892 if max_activation > 0.5 {
893 println!(" • Input strongly matches concept: {}", best_concept);
894 }
895 }
896 }
897
898 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
899
900 Ok(())
901}
Sourcepub fn explain(&mut self, input: &Array1<f64>) -> Result<ExplanationResult>
pub fn explain(&mut self, input: &Array1<f64>) -> Result<ExplanationResult>
Generate comprehensive explanation for an input
Examples found in repository?
examples/quantum_explainable_ai.rs (line 126)
53fn feature_attribution_demo() -> Result<()> {
54 // Create quantum model
55 let layers = vec![
56 QNNLayerType::EncodingLayer { num_features: 4 },
57 QNNLayerType::VariationalLayer { num_params: 12 },
58 QNNLayerType::EntanglementLayer {
59 connectivity: "circular".to_string(),
60 },
61 QNNLayerType::VariationalLayer { num_params: 8 },
62 QNNLayerType::MeasurementLayer {
63 measurement_basis: "computational".to_string(),
64 },
65 ];
66
67 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
68
69 println!(
70 " Created quantum model with {} parameters",
71 model.parameters.len()
72 );
73
74 // Test different attribution methods
75 let attribution_methods = vec![
76 (
77 "Integrated Gradients",
78 ExplanationMethod::QuantumFeatureAttribution {
79 method: AttributionMethod::IntegratedGradients,
80 num_samples: 50,
81 baseline: Some(Array1::zeros(4)),
82 },
83 ),
84 (
85 "Gradient × Input",
86 ExplanationMethod::QuantumFeatureAttribution {
87 method: AttributionMethod::GradientInput,
88 num_samples: 1,
89 baseline: None,
90 },
91 ),
92 (
93 "Gradient SHAP",
94 ExplanationMethod::QuantumFeatureAttribution {
95 method: AttributionMethod::GradientSHAP,
96 num_samples: 30,
97 baseline: None,
98 },
99 ),
100 (
101 "Quantum Attribution",
102 ExplanationMethod::QuantumFeatureAttribution {
103 method: AttributionMethod::QuantumAttribution,
104 num_samples: 25,
105 baseline: None,
106 },
107 ),
108 ];
109
110 // Test input
111 let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
112
113 println!(
114 "\n Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
115 test_input[0], test_input[1], test_input[2], test_input[3]
116 );
117
118 for (method_name, method) in attribution_methods {
119 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
120
121 // Set background data for gradient SHAP
122 let background_data =
123 Array2::from_shape_fn((20, 4), |(_, j)| 0.5 + 0.3 * (j as f64 * 0.2).sin());
124 xai.set_background_data(background_data);
125
126 let explanation = xai.explain(&test_input)?;
127
128 if let Some(ref attributions) = explanation.feature_attributions {
129 println!("\n {} Attribution:", method_name);
130 for (i, &attr) in attributions.iter().enumerate() {
131 println!(
132 " Feature {}: {:+.4} {}",
133 i,
134 attr,
135 if attr.abs() > 0.1 {
136 if attr > 0.0 {
137 "(strong positive)"
138 } else {
139 "(strong negative)"
140 }
141 } else {
142 "(weak influence)"
143 }
144 );
145 }
146
147 // Find most important feature
148 let max_idx = attributions
149 .iter()
150 .enumerate()
151 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
152 .map(|(i, _)| i)
153 .unwrap_or(0);
154
155 println!(
156 " → Most important feature: Feature {} ({:.4})",
157 max_idx, attributions[max_idx]
158 );
159 }
160 }
161
162 Ok(())
163}
164
165/// Demonstrate circuit analysis and visualization
166fn circuit_analysis_demo() -> Result<()> {
167 let layers = vec![
168 QNNLayerType::EncodingLayer { num_features: 4 },
169 QNNLayerType::VariationalLayer { num_params: 6 },
170 QNNLayerType::EntanglementLayer {
171 connectivity: "full".to_string(),
172 },
173 QNNLayerType::VariationalLayer { num_params: 6 },
174 QNNLayerType::MeasurementLayer {
175 measurement_basis: "Pauli-Z".to_string(),
176 },
177 ];
178
179 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
180
181 let method = ExplanationMethod::CircuitVisualization {
182 include_measurements: true,
183 parameter_sensitivity: true,
184 };
185
186 let mut xai = QuantumExplainableAI::new(model, vec![method]);
187
188 println!(" Analyzing quantum circuit structure and parameter importance...");
189
190 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
191 let explanation = xai.explain(&test_input)?;
192
193 if let Some(ref circuit) = explanation.circuit_explanation {
194 println!("\n Circuit Analysis Results:");
195
196 // Parameter importance
197 println!(" Parameter Importance Scores:");
198 for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
199 if importance > 0.5 {
200 println!(" Parameter {}: {:.3} (high importance)", i, importance);
201 } else if importance > 0.2 {
202 println!(
203 " Parameter {}: {:.3} (medium importance)",
204 i, importance
205 );
206 }
207 }
208
209 // Layer analysis
210 println!("\n Layer-wise Analysis:");
211 for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
212 println!(
213 " Layer {}: {}",
214 i,
215 format_layer_type(&layer_analysis.layer_type)
216 );
217 println!(
218 " Information gain: {:.3}",
219 layer_analysis.information_gain
220 );
221 println!(
222 " Entanglement generated: {:.3}",
223 layer_analysis.entanglement_generated
224 );
225
226 if layer_analysis.entanglement_generated > 0.5 {
227 println!(" → Significant entanglement layer");
228 }
229 }
230
231 // Gate contributions
232 println!("\n Gate Contribution Analysis:");
233 for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
234 println!(
235 " Gate {}: {} on qubits {:?}",
236 gate.gate_index, gate.gate_type, gate.qubits
237 );
238 println!(" Contribution: {:.3}", gate.contribution);
239
240 if let Some(ref params) = gate.parameters {
241 println!(" Parameters: {:.3}", params[0]);
242 }
243 }
244
245 // Critical path
246 println!("\n Critical Path (most important parameters):");
247 print!(" ");
248 for (i, ¶m_idx) in circuit.critical_path.iter().enumerate() {
249 if i > 0 {
250 print!(" → ");
251 }
252 print!("P{}", param_idx);
253 }
254 println!();
255
256 println!(" → This path represents the most influential quantum operations");
257 }
258
259 Ok(())
260}
261
262/// Demonstrate quantum state analysis
263fn quantum_state_demo() -> Result<()> {
264 let layers = vec![
265 QNNLayerType::EncodingLayer { num_features: 3 },
266 QNNLayerType::VariationalLayer { num_params: 9 },
267 QNNLayerType::EntanglementLayer {
268 connectivity: "circular".to_string(),
269 },
270 QNNLayerType::MeasurementLayer {
271 measurement_basis: "computational".to_string(),
272 },
273 ];
274
275 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
276
277 let method = ExplanationMethod::StateAnalysis {
278 entanglement_measures: true,
279 coherence_analysis: true,
280 superposition_analysis: true,
281 };
282
283 let mut xai = QuantumExplainableAI::new(model, vec![method]);
284
285 println!(" Analyzing quantum state properties...");
286
287 // Test different inputs to see state evolution
288 let test_inputs = vec![
289 Array1::from_vec(vec![0.0, 0.0, 0.0]),
290 Array1::from_vec(vec![1.0, 0.0, 0.0]),
291 Array1::from_vec(vec![0.5, 0.5, 0.5]),
292 Array1::from_vec(vec![1.0, 1.0, 1.0]),
293 ];
294
295 for (i, input) in test_inputs.iter().enumerate() {
296 println!(
297 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
298 i + 1,
299 input[0],
300 input[1],
301 input[2]
302 );
303
304 let explanation = xai.explain(input)?;
305
306 if let Some(ref state) = explanation.state_properties {
307 println!(" Quantum State Properties:");
308 println!(
309 " - Entanglement entropy: {:.3}",
310 state.entanglement_entropy
311 );
312
313 // Coherence measures
314 for (measure_name, &value) in &state.coherence_measures {
315 println!(" - {}: {:.3}", measure_name, value);
316 }
317
318 // Superposition analysis
319 let max_component = state
320 .superposition_components
321 .iter()
322 .cloned()
323 .fold(f64::NEG_INFINITY, f64::max);
324 println!(" - Max superposition component: {:.3}", max_component);
325
326 // Measurement probabilities
327 let total_prob = state.measurement_probabilities.sum();
328 println!(" - Total measurement probability: {:.3}", total_prob);
329
330 // Most likely measurement outcome
331 let most_likely = state
332 .measurement_probabilities
333 .iter()
334 .enumerate()
335 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
336 .map(|(idx, &prob)| (idx, prob))
337 .unwrap_or((0, 0.0));
338
339 println!(
340 " - Most likely outcome: state {} with prob {:.3}",
341 most_likely.0, most_likely.1
342 );
343
344 // State fidelities
345 if let Some(highest_fidelity) = state
346 .state_fidelities
347 .values()
348 .cloned()
349 .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
350 {
351 println!(
352 " - Highest basis state fidelity: {:.3}",
353 highest_fidelity
354 );
355 }
356
357 // Interpretation
358 if state.entanglement_entropy > 0.5 {
359 println!(" → Highly entangled state");
360 } else if state.entanglement_entropy > 0.1 {
361 println!(" → Moderately entangled state");
362 } else {
363 println!(" → Separable or weakly entangled state");
364 }
365 }
366 }
367
368 Ok(())
369}
370
371/// Demonstrate saliency mapping
372fn saliency_mapping_demo() -> Result<()> {
373 let layers = vec![
374 QNNLayerType::EncodingLayer { num_features: 4 },
375 QNNLayerType::VariationalLayer { num_params: 8 },
376 QNNLayerType::MeasurementLayer {
377 measurement_basis: "computational".to_string(),
378 },
379 ];
380
381 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
382
383 // Test different perturbation methods
384 let perturbation_methods = vec![
385 (
386 "Gaussian Noise",
387 PerturbationMethod::Gaussian { sigma: 0.1 },
388 ),
389 (
390 "Quantum Phase",
391 PerturbationMethod::QuantumPhase { magnitude: 0.2 },
392 ),
393 ("Feature Masking", PerturbationMethod::FeatureMasking),
394 (
395 "Parameter Perturbation",
396 PerturbationMethod::ParameterPerturbation { strength: 0.1 },
397 ),
398 ];
399
400 let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
401
402 println!(" Computing saliency maps with different perturbation methods...");
403 println!(
404 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
405 test_input[0], test_input[1], test_input[2], test_input[3]
406 );
407
408 for (method_name, perturbation_method) in perturbation_methods {
409 let method = ExplanationMethod::SaliencyMapping {
410 perturbation_method,
411 aggregation: AggregationMethod::Mean,
412 };
413
414 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
415 let explanation = xai.explain(&test_input)?;
416
417 if let Some(ref saliency) = explanation.saliency_map {
418 println!("\n {} Saliency Map:", method_name);
419
420 // Analyze saliency for each output
421 for output_idx in 0..saliency.ncols() {
422 println!(" Output {}:", output_idx);
423 for input_idx in 0..saliency.nrows() {
424 let saliency_score = saliency[[input_idx, output_idx]];
425 if saliency_score > 0.1 {
426 println!(
427 " Feature {} → Output {}: {:.3} (important)",
428 input_idx, output_idx, saliency_score
429 );
430 } else if saliency_score > 0.05 {
431 println!(
432 " Feature {} → Output {}: {:.3} (moderate)",
433 input_idx, output_idx, saliency_score
434 );
435 }
436 }
437 }
438
439 // Find most salient feature-output pair
440 let mut max_saliency = 0.0;
441 let mut max_pair = (0, 0);
442
443 for i in 0..saliency.nrows() {
444 for j in 0..saliency.ncols() {
445 if saliency[[i, j]] > max_saliency {
446 max_saliency = saliency[[i, j]];
447 max_pair = (i, j);
448 }
449 }
450 }
451
452 println!(
453 " → Most salient: Feature {} → Output {} ({:.3})",
454 max_pair.0, max_pair.1, max_saliency
455 );
456 }
457 }
458
459 Ok(())
460}
461
462/// Demonstrate Quantum LIME
463fn quantum_lime_demo() -> Result<()> {
464 let layers = vec![
465 QNNLayerType::EncodingLayer { num_features: 4 },
466 QNNLayerType::VariationalLayer { num_params: 10 },
467 QNNLayerType::EntanglementLayer {
468 connectivity: "circular".to_string(),
469 },
470 QNNLayerType::MeasurementLayer {
471 measurement_basis: "computational".to_string(),
472 },
473 ];
474
475 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
476
477 // Test different local models
478 let local_models = vec![
479 ("Linear Regression", LocalModelType::LinearRegression),
480 ("Decision Tree", LocalModelType::DecisionTree),
481 ("Quantum Linear", LocalModelType::QuantumLinear),
482 ];
483
484 let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
485
486 println!(" Quantum LIME: Local Interpretable Model-agnostic Explanations");
487 println!(
488 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
489 test_input[0], test_input[1], test_input[2], test_input[3]
490 );
491
492 for (model_name, local_model) in local_models {
493 let method = ExplanationMethod::QuantumLIME {
494 num_perturbations: 100,
495 kernel_width: 0.5,
496 local_model,
497 };
498
499 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
500 let explanation = xai.explain(&test_input)?;
501
502 if let Some(ref attributions) = explanation.feature_attributions {
503 println!("\n LIME with {}:", model_name);
504
505 for (i, &attr) in attributions.iter().enumerate() {
506 let impact = if attr.abs() > 0.3 {
507 "high"
508 } else if attr.abs() > 0.1 {
509 "medium"
510 } else {
511 "low"
512 };
513
514 println!(" Feature {}: {:+.3} ({} impact)", i, attr, impact);
515 }
516
517 // Local model interpretation
518 match model_name {
519 "Linear Regression" => {
520 println!(" → Linear relationship approximation in local region");
521 }
522 "Decision Tree" => {
523 println!(" → Rule-based approximation with thresholds");
524 }
525 "Quantum Linear" => {
526 println!(" → Quantum-aware linear approximation");
527 }
528 _ => {}
529 }
530
531 // Compute local fidelity (simplified)
532 let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
533 println!(
534 " → Local explanation complexity: {:.3}",
535 local_complexity
536 );
537 }
538 }
539
540 Ok(())
541}
542
543/// Demonstrate Quantum SHAP
544fn quantum_shap_demo() -> Result<()> {
545 let layers = vec![
546 QNNLayerType::EncodingLayer { num_features: 3 },
547 QNNLayerType::VariationalLayer { num_params: 6 },
548 QNNLayerType::MeasurementLayer {
549 measurement_basis: "Pauli-Z".to_string(),
550 },
551 ];
552
553 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
554
555 let method = ExplanationMethod::QuantumSHAP {
556 num_coalitions: 100,
557 background_samples: 20,
558 };
559
560 let mut xai = QuantumExplainableAI::new(model, vec![method]);
561
562 // Set background data for SHAP
563 let background_data =
564 Array2::from_shape_fn((50, 3), |(i, j)| 0.5 + 0.3 * ((i + j) as f64 * 0.1).sin());
565 xai.set_background_data(background_data);
566
567 println!(" Quantum SHAP: SHapley Additive exPlanations");
568
569 // Test multiple inputs
570 let test_inputs = vec![
571 Array1::from_vec(vec![0.1, 0.5, 0.9]),
572 Array1::from_vec(vec![0.8, 0.3, 0.6]),
573 Array1::from_vec(vec![0.4, 0.7, 0.2]),
574 ];
575
576 for (i, input) in test_inputs.iter().enumerate() {
577 println!(
578 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
579 i + 1,
580 input[0],
581 input[1],
582 input[2]
583 );
584
585 let explanation = xai.explain(input)?;
586
587 if let Some(ref shap_values) = explanation.feature_attributions {
588 println!(" SHAP Values:");
589
590 let mut total_shap = 0.0;
591 for (j, &value) in shap_values.iter().enumerate() {
592 total_shap += value;
593 println!(" - Feature {}: {:+.4}", j, value);
594 }
595
596 println!(" - Sum of SHAP values: {:.4}", total_shap);
597
598 // Feature ranking
599 let mut indexed_shap: Vec<(usize, f64)> = shap_values
600 .iter()
601 .enumerate()
602 .map(|(idx, &val)| (idx, val.abs()))
603 .collect();
604 indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
605
606 println!(" Feature importance ranking:");
607 for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
608 let original_value = shap_values[*feature_idx];
609 println!(
610 " {}. Feature {}: {:.4} (|{:.4}|)",
611 rank + 1,
612 feature_idx,
613 original_value,
614 abs_value
615 );
616 }
617
618 // SHAP properties
619 println!(
620 " → SHAP values satisfy efficiency property (sum to prediction difference)"
621 );
622 println!(" → Each value represents feature's average marginal contribution");
623 }
624 }
625
626 Ok(())
627}
628
629/// Demonstrate Layer-wise Relevance Propagation
630fn quantum_lrp_demo() -> Result<()> {
631 let layers = vec![
632 QNNLayerType::EncodingLayer { num_features: 4 },
633 QNNLayerType::VariationalLayer { num_params: 8 },
634 QNNLayerType::VariationalLayer { num_params: 6 },
635 QNNLayerType::MeasurementLayer {
636 measurement_basis: "computational".to_string(),
637 },
638 ];
639
640 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
641
642 // Test different LRP rules
643 let lrp_rules = vec![
644 ("Epsilon Rule", LRPRule::Epsilon),
645 ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
646 (
647 "Alpha-Beta Rule",
648 LRPRule::AlphaBeta {
649 alpha: 2.0,
650 beta: 1.0,
651 },
652 ),
653 ("Quantum Rule", LRPRule::QuantumRule),
654 ];
655
656 let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
657
658 println!(" Layer-wise Relevance Propagation for Quantum Circuits");
659 println!(
660 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
661 test_input[0], test_input[1], test_input[2], test_input[3]
662 );
663
664 for (rule_name, lrp_rule) in lrp_rules {
665 let method = ExplanationMethod::QuantumLRP {
666 propagation_rule: lrp_rule,
667 epsilon: 1e-6,
668 };
669
670 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
671 let explanation = xai.explain(&test_input)?;
672
673 if let Some(ref relevance) = explanation.feature_attributions {
674 println!("\n LRP with {}:", rule_name);
675
676 let total_relevance = relevance.sum();
677
678 for (i, &rel) in relevance.iter().enumerate() {
679 let percentage = if total_relevance.abs() > 1e-10 {
680 rel / total_relevance * 100.0
681 } else {
682 0.0
683 };
684
685 println!(
686 " Feature {}: {:.4} ({:.1}% of total relevance)",
687 i, rel, percentage
688 );
689 }
690
691 println!(" Total relevance: {:.4}", total_relevance);
692
693 // Rule-specific interpretation
694 match rule_name {
695 "Epsilon Rule" => {
696 println!(" → Distributes relevance proportionally to activations");
697 }
698 "Gamma Rule" => {
699 println!(" → Emphasizes positive contributions");
700 }
701 "Alpha-Beta Rule" => {
702 println!(" → Separates positive and negative contributions");
703 }
704 "Quantum Rule" => {
705 println!(" → Accounts for quantum superposition and entanglement");
706 }
707 _ => {}
708 }
709 }
710 }
711
712 Ok(())
713}
714
715/// Comprehensive explanation demonstration
716fn comprehensive_explanation_demo() -> Result<()> {
717 let layers = vec![
718 QNNLayerType::EncodingLayer { num_features: 4 },
719 QNNLayerType::VariationalLayer { num_params: 12 },
720 QNNLayerType::EntanglementLayer {
721 connectivity: "full".to_string(),
722 },
723 QNNLayerType::VariationalLayer { num_params: 8 },
724 QNNLayerType::MeasurementLayer {
725 measurement_basis: "computational".to_string(),
726 },
727 ];
728
729 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
730
731 // Use comprehensive explanation methods
732 let methods = vec![
733 ExplanationMethod::QuantumFeatureAttribution {
734 method: AttributionMethod::IntegratedGradients,
735 num_samples: 30,
736 baseline: Some(Array1::zeros(4)),
737 },
738 ExplanationMethod::CircuitVisualization {
739 include_measurements: true,
740 parameter_sensitivity: true,
741 },
742 ExplanationMethod::StateAnalysis {
743 entanglement_measures: true,
744 coherence_analysis: true,
745 superposition_analysis: true,
746 },
747 ExplanationMethod::ConceptActivation {
748 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
749 activation_threshold: 0.3,
750 },
751 ];
752
753 let mut xai = QuantumExplainableAI::new(model, methods);
754
755 // Add concept vectors
756 xai.add_concept(
757 "pattern_A".to_string(),
758 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
759 );
760 xai.add_concept(
761 "pattern_B".to_string(),
762 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
763 );
764
765 // Set background data
766 let background_data =
767 Array2::from_shape_fn((30, 4), |(i, j)| 0.3 + 0.4 * ((i * j) as f64 * 0.15).sin());
768 xai.set_background_data(background_data);
769
770 println!(" Comprehensive Quantum Model Explanation");
771
772 // Test input representing a specific pattern
773 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
774
775 println!(
776 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
777 test_input[0], test_input[1], test_input[2], test_input[3]
778 );
779
780 let explanation = xai.explain(&test_input)?;
781
782 // Display comprehensive results
783 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
784
785 // Feature attributions
786 if let Some(ref attributions) = explanation.feature_attributions {
787 println!("\n Feature Attributions:");
788 for (i, &attr) in attributions.iter().enumerate() {
789 println!(" - Feature {}: {:+.3}", i, attr);
790 }
791 }
792
793 // Circuit analysis summary
794 if let Some(ref circuit) = explanation.circuit_explanation {
795 println!("\n Circuit Analysis Summary:");
796 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
797 println!(" - Average parameter importance: {:.3}", avg_importance);
798 println!(
799 " - Number of analyzed layers: {}",
800 circuit.layer_analysis.len()
801 );
802 println!(" - Critical path length: {}", circuit.critical_path.len());
803 }
804
805 // Quantum state properties
806 if let Some(ref state) = explanation.state_properties {
807 println!("\n Quantum State Properties:");
808 println!(
809 " - Entanglement entropy: {:.3}",
810 state.entanglement_entropy
811 );
812 println!(
813 " - Coherence measures: {} types",
814 state.coherence_measures.len()
815 );
816
817 let max_measurement_prob = state
818 .measurement_probabilities
819 .iter()
820 .cloned()
821 .fold(f64::NEG_INFINITY, f64::max);
822 println!(
823 " - Max measurement probability: {:.3}",
824 max_measurement_prob
825 );
826 }
827
828 // Concept activations
829 if let Some(ref concepts) = explanation.concept_activations {
830 println!("\n Concept Activations:");
831 for (concept, &activation) in concepts {
832 let similarity = if activation > 0.7 {
833 "high"
834 } else if activation > 0.3 {
835 "medium"
836 } else {
837 "low"
838 };
839 println!(
840 " - {}: {:.3} ({} similarity)",
841 concept, activation, similarity
842 );
843 }
844 }
845
846 // Confidence scores
847 println!("\n Explanation Confidence Scores:");
848 for (component, &confidence) in &explanation.confidence_scores {
849 println!(" - {}: {:.3}", component, confidence);
850 }
851
852 // Textual explanation
853 println!("\n Generated Explanation:");
854 println!("{}", explanation.textual_explanation);
855
856 // Summary insights
857 println!("\n === KEY INSIGHTS ===");
858
859 if let Some(ref attributions) = explanation.feature_attributions {
860 let max_attr_idx = attributions
861 .iter()
862 .enumerate()
863 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
864 .map(|(i, _)| i)
865 .unwrap_or(0);
866
867 println!(
868 " • Most influential feature: Feature {} ({:.3})",
869 max_attr_idx, attributions[max_attr_idx]
870 );
871 }
872
873 if let Some(ref state) = explanation.state_properties {
874 if state.entanglement_entropy > 0.5 {
875 println!(" • Model creates significant quantum entanglement");
876 }
877
878 let coherence_level = state
879 .coherence_measures
880 .values()
881 .cloned()
882 .fold(0.0, f64::max);
883 if coherence_level > 0.5 {
884 println!(" • High quantum coherence detected");
885 }
886 }
887
888 if let Some(ref concepts) = explanation.concept_activations {
889 if let Some((best_concept, &max_activation)) =
890 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
891 {
892 if max_activation > 0.5 {
893 println!(" • Input strongly matches concept: {}", best_concept);
894 }
895 }
896 }
897
898 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
899
900 Ok(())
901}
Auto Trait Implementations§
impl Freeze for QuantumExplainableAI
impl RefUnwindSafe for QuantumExplainableAI
impl Send for QuantumExplainableAI
impl Sync for QuantumExplainableAI
impl Unpin for QuantumExplainableAI
impl UnwindSafe for QuantumExplainableAI
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self
from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self
is actually part of its subset T
(and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset
but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self
to the equivalent element of its superset.