pub struct QuantumExplainableAI { /* private fields */ }Expand description
Main quantum explainable AI engine
Implementations§
Source§impl QuantumExplainableAI
impl QuantumExplainableAI
Sourcepub fn new(model: QuantumNeuralNetwork, methods: Vec<ExplanationMethod>) -> Self
pub fn new(model: QuantumNeuralNetwork, methods: Vec<ExplanationMethod>) -> Self
Create a new quantum explainable AI instance
Examples found in repository?
examples/quantum_explainable_ai.rs (line 120)
54fn feature_attribution_demo() -> Result<()> {
55 // Create quantum model
56 let layers = vec![
57 QNNLayerType::EncodingLayer { num_features: 4 },
58 QNNLayerType::VariationalLayer { num_params: 12 },
59 QNNLayerType::EntanglementLayer {
60 connectivity: "circular".to_string(),
61 },
62 QNNLayerType::VariationalLayer { num_params: 8 },
63 QNNLayerType::MeasurementLayer {
64 measurement_basis: "computational".to_string(),
65 },
66 ];
67
68 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
69
70 println!(
71 " Created quantum model with {} parameters",
72 model.parameters.len()
73 );
74
75 // Test different attribution methods
76 let attribution_methods = vec![
77 (
78 "Integrated Gradients",
79 ExplanationMethod::QuantumFeatureAttribution {
80 method: AttributionMethod::IntegratedGradients,
81 num_samples: 50,
82 baseline: Some(Array1::zeros(4)),
83 },
84 ),
85 (
86 "Gradient × Input",
87 ExplanationMethod::QuantumFeatureAttribution {
88 method: AttributionMethod::GradientInput,
89 num_samples: 1,
90 baseline: None,
91 },
92 ),
93 (
94 "Gradient SHAP",
95 ExplanationMethod::QuantumFeatureAttribution {
96 method: AttributionMethod::GradientSHAP,
97 num_samples: 30,
98 baseline: None,
99 },
100 ),
101 (
102 "Quantum Attribution",
103 ExplanationMethod::QuantumFeatureAttribution {
104 method: AttributionMethod::QuantumAttribution,
105 num_samples: 25,
106 baseline: None,
107 },
108 ),
109 ];
110
111 // Test input
112 let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
113
114 println!(
115 "\n Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
116 test_input[0], test_input[1], test_input[2], test_input[3]
117 );
118
119 for (method_name, method) in attribution_methods {
120 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
121
122 // Set background data for gradient SHAP
123 let background_data = Array2::from_shape_fn((20, 4), |(_, j)| {
124 0.3f64.mul_add((j as f64 * 0.2).sin(), 0.5)
125 });
126 xai.set_background_data(background_data);
127
128 let explanation = xai.explain(&test_input)?;
129
130 if let Some(ref attributions) = explanation.feature_attributions {
131 println!("\n {method_name} Attribution:");
132 for (i, &attr) in attributions.iter().enumerate() {
133 println!(
134 " Feature {}: {:+.4} {}",
135 i,
136 attr,
137 if attr.abs() > 0.1 {
138 if attr > 0.0 {
139 "(strong positive)"
140 } else {
141 "(strong negative)"
142 }
143 } else {
144 "(weak influence)"
145 }
146 );
147 }
148
149 // Find most important feature
150 let max_idx = attributions
151 .iter()
152 .enumerate()
153 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
154 .map_or(0, |(i, _)| i);
155
156 println!(
157 " → Most important feature: Feature {} ({:.4})",
158 max_idx, attributions[max_idx]
159 );
160 }
161 }
162
163 Ok(())
164}
165
166/// Demonstrate circuit analysis and visualization
167fn circuit_analysis_demo() -> Result<()> {
168 let layers = vec![
169 QNNLayerType::EncodingLayer { num_features: 4 },
170 QNNLayerType::VariationalLayer { num_params: 6 },
171 QNNLayerType::EntanglementLayer {
172 connectivity: "full".to_string(),
173 },
174 QNNLayerType::VariationalLayer { num_params: 6 },
175 QNNLayerType::MeasurementLayer {
176 measurement_basis: "Pauli-Z".to_string(),
177 },
178 ];
179
180 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
181
182 let method = ExplanationMethod::CircuitVisualization {
183 include_measurements: true,
184 parameter_sensitivity: true,
185 };
186
187 let mut xai = QuantumExplainableAI::new(model, vec![method]);
188
189 println!(" Analyzing quantum circuit structure and parameter importance...");
190
191 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
192 let explanation = xai.explain(&test_input)?;
193
194 if let Some(ref circuit) = explanation.circuit_explanation {
195 println!("\n Circuit Analysis Results:");
196
197 // Parameter importance
198 println!(" Parameter Importance Scores:");
199 for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
200 if importance > 0.5 {
201 println!(" Parameter {i}: {importance:.3} (high importance)");
202 } else if importance > 0.2 {
203 println!(" Parameter {i}: {importance:.3} (medium importance)");
204 }
205 }
206
207 // Layer analysis
208 println!("\n Layer-wise Analysis:");
209 for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
210 println!(
211 " Layer {}: {}",
212 i,
213 format_layer_type(&layer_analysis.layer_type)
214 );
215 println!(
216 " Information gain: {:.3}",
217 layer_analysis.information_gain
218 );
219 println!(
220 " Entanglement generated: {:.3}",
221 layer_analysis.entanglement_generated
222 );
223
224 if layer_analysis.entanglement_generated > 0.5 {
225 println!(" → Significant entanglement layer");
226 }
227 }
228
229 // Gate contributions
230 println!("\n Gate Contribution Analysis:");
231 for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
232 println!(
233 " Gate {}: {} on qubits {:?}",
234 gate.gate_index, gate.gate_type, gate.qubits
235 );
236 println!(" Contribution: {:.3}", gate.contribution);
237
238 if let Some(ref params) = gate.parameters {
239 println!(" Parameters: {:.3}", params[0]);
240 }
241 }
242
243 // Critical path
244 println!("\n Critical Path (most important parameters):");
245 print!(" ");
246 for (i, ¶m_idx) in circuit.critical_path.iter().enumerate() {
247 if i > 0 {
248 print!(" → ");
249 }
250 print!("P{param_idx}");
251 }
252 println!();
253
254 println!(" → This path represents the most influential quantum operations");
255 }
256
257 Ok(())
258}
259
260/// Demonstrate quantum state analysis
261fn quantum_state_demo() -> Result<()> {
262 let layers = vec![
263 QNNLayerType::EncodingLayer { num_features: 3 },
264 QNNLayerType::VariationalLayer { num_params: 9 },
265 QNNLayerType::EntanglementLayer {
266 connectivity: "circular".to_string(),
267 },
268 QNNLayerType::MeasurementLayer {
269 measurement_basis: "computational".to_string(),
270 },
271 ];
272
273 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
274
275 let method = ExplanationMethod::StateAnalysis {
276 entanglement_measures: true,
277 coherence_analysis: true,
278 superposition_analysis: true,
279 };
280
281 let mut xai = QuantumExplainableAI::new(model, vec![method]);
282
283 println!(" Analyzing quantum state properties...");
284
285 // Test different inputs to see state evolution
286 let test_inputs = [
287 Array1::from_vec(vec![0.0, 0.0, 0.0]),
288 Array1::from_vec(vec![1.0, 0.0, 0.0]),
289 Array1::from_vec(vec![0.5, 0.5, 0.5]),
290 Array1::from_vec(vec![1.0, 1.0, 1.0]),
291 ];
292
293 for (i, input) in test_inputs.iter().enumerate() {
294 println!(
295 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
296 i + 1,
297 input[0],
298 input[1],
299 input[2]
300 );
301
302 let explanation = xai.explain(input)?;
303
304 if let Some(ref state) = explanation.state_properties {
305 println!(" Quantum State Properties:");
306 println!(
307 " - Entanglement entropy: {:.3}",
308 state.entanglement_entropy
309 );
310
311 // Coherence measures
312 for (measure_name, &value) in &state.coherence_measures {
313 println!(" - {measure_name}: {value:.3}");
314 }
315
316 // Superposition analysis
317 let max_component = state
318 .superposition_components
319 .iter()
320 .copied()
321 .fold(f64::NEG_INFINITY, f64::max);
322 println!(" - Max superposition component: {max_component:.3}");
323
324 // Measurement probabilities
325 let total_prob = state.measurement_probabilities.sum();
326 println!(" - Total measurement probability: {total_prob:.3}");
327
328 // Most likely measurement outcome
329 let most_likely = state
330 .measurement_probabilities
331 .iter()
332 .enumerate()
333 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
334 .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
335
336 println!(
337 " - Most likely outcome: state {} with prob {:.3}",
338 most_likely.0, most_likely.1
339 );
340
341 // State fidelities
342 if let Some(highest_fidelity) = state
343 .state_fidelities
344 .values()
345 .copied()
346 .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
347 {
348 println!(" - Highest basis state fidelity: {highest_fidelity:.3}");
349 }
350
351 // Interpretation
352 if state.entanglement_entropy > 0.5 {
353 println!(" → Highly entangled state");
354 } else if state.entanglement_entropy > 0.1 {
355 println!(" → Moderately entangled state");
356 } else {
357 println!(" → Separable or weakly entangled state");
358 }
359 }
360 }
361
362 Ok(())
363}
364
365/// Demonstrate saliency mapping
366fn saliency_mapping_demo() -> Result<()> {
367 let layers = vec![
368 QNNLayerType::EncodingLayer { num_features: 4 },
369 QNNLayerType::VariationalLayer { num_params: 8 },
370 QNNLayerType::MeasurementLayer {
371 measurement_basis: "computational".to_string(),
372 },
373 ];
374
375 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
376
377 // Test different perturbation methods
378 let perturbation_methods = vec![
379 (
380 "Gaussian Noise",
381 PerturbationMethod::Gaussian { sigma: 0.1 },
382 ),
383 (
384 "Quantum Phase",
385 PerturbationMethod::QuantumPhase { magnitude: 0.2 },
386 ),
387 ("Feature Masking", PerturbationMethod::FeatureMasking),
388 (
389 "Parameter Perturbation",
390 PerturbationMethod::ParameterPerturbation { strength: 0.1 },
391 ),
392 ];
393
394 let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
395
396 println!(" Computing saliency maps with different perturbation methods...");
397 println!(
398 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
399 test_input[0], test_input[1], test_input[2], test_input[3]
400 );
401
402 for (method_name, perturbation_method) in perturbation_methods {
403 let method = ExplanationMethod::SaliencyMapping {
404 perturbation_method,
405 aggregation: AggregationMethod::Mean,
406 };
407
408 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
409 let explanation = xai.explain(&test_input)?;
410
411 if let Some(ref saliency) = explanation.saliency_map {
412 println!("\n {method_name} Saliency Map:");
413
414 // Analyze saliency for each output
415 for output_idx in 0..saliency.ncols() {
416 println!(" Output {output_idx}:");
417 for input_idx in 0..saliency.nrows() {
418 let saliency_score = saliency[[input_idx, output_idx]];
419 if saliency_score > 0.1 {
420 println!(
421 " Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (important)"
422 );
423 } else if saliency_score > 0.05 {
424 println!(
425 " Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (moderate)"
426 );
427 }
428 }
429 }
430
431 // Find most salient feature-output pair
432 let mut max_saliency = 0.0;
433 let mut max_pair = (0, 0);
434
435 for i in 0..saliency.nrows() {
436 for j in 0..saliency.ncols() {
437 if saliency[[i, j]] > max_saliency {
438 max_saliency = saliency[[i, j]];
439 max_pair = (i, j);
440 }
441 }
442 }
443
444 println!(
445 " → Most salient: Feature {} → Output {} ({:.3})",
446 max_pair.0, max_pair.1, max_saliency
447 );
448 }
449 }
450
451 Ok(())
452}
453
454/// Demonstrate Quantum LIME
455fn quantum_lime_demo() -> Result<()> {
456 let layers = vec![
457 QNNLayerType::EncodingLayer { num_features: 4 },
458 QNNLayerType::VariationalLayer { num_params: 10 },
459 QNNLayerType::EntanglementLayer {
460 connectivity: "circular".to_string(),
461 },
462 QNNLayerType::MeasurementLayer {
463 measurement_basis: "computational".to_string(),
464 },
465 ];
466
467 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
468
469 // Test different local models
470 let local_models = vec![
471 ("Linear Regression", LocalModelType::LinearRegression),
472 ("Decision Tree", LocalModelType::DecisionTree),
473 ("Quantum Linear", LocalModelType::QuantumLinear),
474 ];
475
476 let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
477
478 println!(" Quantum LIME: Local Interpretable Model-agnostic Explanations");
479 println!(
480 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
481 test_input[0], test_input[1], test_input[2], test_input[3]
482 );
483
484 for (model_name, local_model) in local_models {
485 let method = ExplanationMethod::QuantumLIME {
486 num_perturbations: 100,
487 kernel_width: 0.5,
488 local_model,
489 };
490
491 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
492 let explanation = xai.explain(&test_input)?;
493
494 if let Some(ref attributions) = explanation.feature_attributions {
495 println!("\n LIME with {model_name}:");
496
497 for (i, &attr) in attributions.iter().enumerate() {
498 let impact = if attr.abs() > 0.3 {
499 "high"
500 } else if attr.abs() > 0.1 {
501 "medium"
502 } else {
503 "low"
504 };
505
506 println!(" Feature {i}: {attr:+.3} ({impact} impact)");
507 }
508
509 // Local model interpretation
510 match model_name {
511 "Linear Regression" => {
512 println!(" → Linear relationship approximation in local region");
513 }
514 "Decision Tree" => {
515 println!(" → Rule-based approximation with thresholds");
516 }
517 "Quantum Linear" => {
518 println!(" → Quantum-aware linear approximation");
519 }
520 _ => {}
521 }
522
523 // Compute local fidelity (simplified)
524 let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
525 println!(" → Local explanation complexity: {local_complexity:.3}");
526 }
527 }
528
529 Ok(())
530}
531
532/// Demonstrate Quantum SHAP
533fn quantum_shap_demo() -> Result<()> {
534 let layers = vec![
535 QNNLayerType::EncodingLayer { num_features: 3 },
536 QNNLayerType::VariationalLayer { num_params: 6 },
537 QNNLayerType::MeasurementLayer {
538 measurement_basis: "Pauli-Z".to_string(),
539 },
540 ];
541
542 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
543
544 let method = ExplanationMethod::QuantumSHAP {
545 num_coalitions: 100,
546 background_samples: 20,
547 };
548
549 let mut xai = QuantumExplainableAI::new(model, vec![method]);
550
551 // Set background data for SHAP
552 let background_data = Array2::from_shape_fn((50, 3), |(i, j)| {
553 0.3f64.mul_add(((i + j) as f64 * 0.1).sin(), 0.5)
554 });
555 xai.set_background_data(background_data);
556
557 println!(" Quantum SHAP: SHapley Additive exPlanations");
558
559 // Test multiple inputs
560 let test_inputs = [
561 Array1::from_vec(vec![0.1, 0.5, 0.9]),
562 Array1::from_vec(vec![0.8, 0.3, 0.6]),
563 Array1::from_vec(vec![0.4, 0.7, 0.2]),
564 ];
565
566 for (i, input) in test_inputs.iter().enumerate() {
567 println!(
568 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
569 i + 1,
570 input[0],
571 input[1],
572 input[2]
573 );
574
575 let explanation = xai.explain(input)?;
576
577 if let Some(ref shap_values) = explanation.feature_attributions {
578 println!(" SHAP Values:");
579
580 let mut total_shap = 0.0;
581 for (j, &value) in shap_values.iter().enumerate() {
582 total_shap += value;
583 println!(" - Feature {j}: {value:+.4}");
584 }
585
586 println!(" - Sum of SHAP values: {total_shap:.4}");
587
588 // Feature ranking
589 let mut indexed_shap: Vec<(usize, f64)> = shap_values
590 .iter()
591 .enumerate()
592 .map(|(idx, &val)| (idx, val.abs()))
593 .collect();
594 indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
595
596 println!(" Feature importance ranking:");
597 for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
598 let original_value = shap_values[*feature_idx];
599 println!(
600 " {}. Feature {}: {:.4} (|{:.4}|)",
601 rank + 1,
602 feature_idx,
603 original_value,
604 abs_value
605 );
606 }
607
608 // SHAP properties
609 println!(
610 " → SHAP values satisfy efficiency property (sum to prediction difference)"
611 );
612 println!(" → Each value represents feature's average marginal contribution");
613 }
614 }
615
616 Ok(())
617}
618
619/// Demonstrate Layer-wise Relevance Propagation
620fn quantum_lrp_demo() -> Result<()> {
621 let layers = vec![
622 QNNLayerType::EncodingLayer { num_features: 4 },
623 QNNLayerType::VariationalLayer { num_params: 8 },
624 QNNLayerType::VariationalLayer { num_params: 6 },
625 QNNLayerType::MeasurementLayer {
626 measurement_basis: "computational".to_string(),
627 },
628 ];
629
630 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
631
632 // Test different LRP rules
633 let lrp_rules = vec![
634 ("Epsilon Rule", LRPRule::Epsilon),
635 ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
636 (
637 "Alpha-Beta Rule",
638 LRPRule::AlphaBeta {
639 alpha: 2.0,
640 beta: 1.0,
641 },
642 ),
643 ("Quantum Rule", LRPRule::QuantumRule),
644 ];
645
646 let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
647
648 println!(" Layer-wise Relevance Propagation for Quantum Circuits");
649 println!(
650 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
651 test_input[0], test_input[1], test_input[2], test_input[3]
652 );
653
654 for (rule_name, lrp_rule) in lrp_rules {
655 let method = ExplanationMethod::QuantumLRP {
656 propagation_rule: lrp_rule,
657 epsilon: 1e-6,
658 };
659
660 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
661 let explanation = xai.explain(&test_input)?;
662
663 if let Some(ref relevance) = explanation.feature_attributions {
664 println!("\n LRP with {rule_name}:");
665
666 let total_relevance = relevance.sum();
667
668 for (i, &rel) in relevance.iter().enumerate() {
669 let percentage = if total_relevance.abs() > 1e-10 {
670 rel / total_relevance * 100.0
671 } else {
672 0.0
673 };
674
675 println!(" Feature {i}: {rel:.4} ({percentage:.1}% of total relevance)");
676 }
677
678 println!(" Total relevance: {total_relevance:.4}");
679
680 // Rule-specific interpretation
681 match rule_name {
682 "Epsilon Rule" => {
683 println!(" → Distributes relevance proportionally to activations");
684 }
685 "Gamma Rule" => {
686 println!(" → Emphasizes positive contributions");
687 }
688 "Alpha-Beta Rule" => {
689 println!(" → Separates positive and negative contributions");
690 }
691 "Quantum Rule" => {
692 println!(" → Accounts for quantum superposition and entanglement");
693 }
694 _ => {}
695 }
696 }
697 }
698
699 Ok(())
700}
701
702/// Comprehensive explanation demonstration
703fn comprehensive_explanation_demo() -> Result<()> {
704 let layers = vec![
705 QNNLayerType::EncodingLayer { num_features: 4 },
706 QNNLayerType::VariationalLayer { num_params: 12 },
707 QNNLayerType::EntanglementLayer {
708 connectivity: "full".to_string(),
709 },
710 QNNLayerType::VariationalLayer { num_params: 8 },
711 QNNLayerType::MeasurementLayer {
712 measurement_basis: "computational".to_string(),
713 },
714 ];
715
716 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
717
718 // Use comprehensive explanation methods
719 let methods = vec![
720 ExplanationMethod::QuantumFeatureAttribution {
721 method: AttributionMethod::IntegratedGradients,
722 num_samples: 30,
723 baseline: Some(Array1::zeros(4)),
724 },
725 ExplanationMethod::CircuitVisualization {
726 include_measurements: true,
727 parameter_sensitivity: true,
728 },
729 ExplanationMethod::StateAnalysis {
730 entanglement_measures: true,
731 coherence_analysis: true,
732 superposition_analysis: true,
733 },
734 ExplanationMethod::ConceptActivation {
735 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
736 activation_threshold: 0.3,
737 },
738 ];
739
740 let mut xai = QuantumExplainableAI::new(model, methods);
741
742 // Add concept vectors
743 xai.add_concept(
744 "pattern_A".to_string(),
745 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
746 );
747 xai.add_concept(
748 "pattern_B".to_string(),
749 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
750 );
751
752 // Set background data
753 let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
754 0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
755 });
756 xai.set_background_data(background_data);
757
758 println!(" Comprehensive Quantum Model Explanation");
759
760 // Test input representing a specific pattern
761 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
762
763 println!(
764 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
765 test_input[0], test_input[1], test_input[2], test_input[3]
766 );
767
768 let explanation = xai.explain(&test_input)?;
769
770 // Display comprehensive results
771 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
772
773 // Feature attributions
774 if let Some(ref attributions) = explanation.feature_attributions {
775 println!("\n Feature Attributions:");
776 for (i, &attr) in attributions.iter().enumerate() {
777 println!(" - Feature {i}: {attr:+.3}");
778 }
779 }
780
781 // Circuit analysis summary
782 if let Some(ref circuit) = explanation.circuit_explanation {
783 println!("\n Circuit Analysis Summary:");
784 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
785 println!(" - Average parameter importance: {avg_importance:.3}");
786 println!(
787 " - Number of analyzed layers: {}",
788 circuit.layer_analysis.len()
789 );
790 println!(" - Critical path length: {}", circuit.critical_path.len());
791 }
792
793 // Quantum state properties
794 if let Some(ref state) = explanation.state_properties {
795 println!("\n Quantum State Properties:");
796 println!(
797 " - Entanglement entropy: {:.3}",
798 state.entanglement_entropy
799 );
800 println!(
801 " - Coherence measures: {} types",
802 state.coherence_measures.len()
803 );
804
805 let max_measurement_prob = state
806 .measurement_probabilities
807 .iter()
808 .copied()
809 .fold(f64::NEG_INFINITY, f64::max);
810 println!(" - Max measurement probability: {max_measurement_prob:.3}");
811 }
812
813 // Concept activations
814 if let Some(ref concepts) = explanation.concept_activations {
815 println!("\n Concept Activations:");
816 for (concept, &activation) in concepts {
817 let similarity = if activation > 0.7 {
818 "high"
819 } else if activation > 0.3 {
820 "medium"
821 } else {
822 "low"
823 };
824 println!(" - {concept}: {activation:.3} ({similarity} similarity)");
825 }
826 }
827
828 // Confidence scores
829 println!("\n Explanation Confidence Scores:");
830 for (component, &confidence) in &explanation.confidence_scores {
831 println!(" - {component}: {confidence:.3}");
832 }
833
834 // Textual explanation
835 println!("\n Generated Explanation:");
836 println!("{}", explanation.textual_explanation);
837
838 // Summary insights
839 println!("\n === KEY INSIGHTS ===");
840
841 if let Some(ref attributions) = explanation.feature_attributions {
842 let max_attr_idx = attributions
843 .iter()
844 .enumerate()
845 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
846 .map_or(0, |(i, _)| i);
847
848 println!(
849 " • Most influential feature: Feature {} ({:.3})",
850 max_attr_idx, attributions[max_attr_idx]
851 );
852 }
853
854 if let Some(ref state) = explanation.state_properties {
855 if state.entanglement_entropy > 0.5 {
856 println!(" • Model creates significant quantum entanglement");
857 }
858
859 let coherence_level = state
860 .coherence_measures
861 .values()
862 .copied()
863 .fold(0.0, f64::max);
864 if coherence_level > 0.5 {
865 println!(" • High quantum coherence detected");
866 }
867 }
868
869 if let Some(ref concepts) = explanation.concept_activations {
870 if let Some((best_concept, &max_activation)) =
871 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
872 {
873 if max_activation > 0.5 {
874 println!(" • Input strongly matches concept: {best_concept}");
875 }
876 }
877 }
878
879 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
880
881 Ok(())
882}Sourcepub fn set_background_data(&mut self, data: Array2<f64>)
pub fn set_background_data(&mut self, data: Array2<f64>)
Set background data for explanations
Examples found in repository?
examples/quantum_explainable_ai.rs (line 126)
54fn feature_attribution_demo() -> Result<()> {
55 // Create quantum model
56 let layers = vec![
57 QNNLayerType::EncodingLayer { num_features: 4 },
58 QNNLayerType::VariationalLayer { num_params: 12 },
59 QNNLayerType::EntanglementLayer {
60 connectivity: "circular".to_string(),
61 },
62 QNNLayerType::VariationalLayer { num_params: 8 },
63 QNNLayerType::MeasurementLayer {
64 measurement_basis: "computational".to_string(),
65 },
66 ];
67
68 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
69
70 println!(
71 " Created quantum model with {} parameters",
72 model.parameters.len()
73 );
74
75 // Test different attribution methods
76 let attribution_methods = vec![
77 (
78 "Integrated Gradients",
79 ExplanationMethod::QuantumFeatureAttribution {
80 method: AttributionMethod::IntegratedGradients,
81 num_samples: 50,
82 baseline: Some(Array1::zeros(4)),
83 },
84 ),
85 (
86 "Gradient × Input",
87 ExplanationMethod::QuantumFeatureAttribution {
88 method: AttributionMethod::GradientInput,
89 num_samples: 1,
90 baseline: None,
91 },
92 ),
93 (
94 "Gradient SHAP",
95 ExplanationMethod::QuantumFeatureAttribution {
96 method: AttributionMethod::GradientSHAP,
97 num_samples: 30,
98 baseline: None,
99 },
100 ),
101 (
102 "Quantum Attribution",
103 ExplanationMethod::QuantumFeatureAttribution {
104 method: AttributionMethod::QuantumAttribution,
105 num_samples: 25,
106 baseline: None,
107 },
108 ),
109 ];
110
111 // Test input
112 let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
113
114 println!(
115 "\n Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
116 test_input[0], test_input[1], test_input[2], test_input[3]
117 );
118
119 for (method_name, method) in attribution_methods {
120 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
121
122 // Set background data for gradient SHAP
123 let background_data = Array2::from_shape_fn((20, 4), |(_, j)| {
124 0.3f64.mul_add((j as f64 * 0.2).sin(), 0.5)
125 });
126 xai.set_background_data(background_data);
127
128 let explanation = xai.explain(&test_input)?;
129
130 if let Some(ref attributions) = explanation.feature_attributions {
131 println!("\n {method_name} Attribution:");
132 for (i, &attr) in attributions.iter().enumerate() {
133 println!(
134 " Feature {}: {:+.4} {}",
135 i,
136 attr,
137 if attr.abs() > 0.1 {
138 if attr > 0.0 {
139 "(strong positive)"
140 } else {
141 "(strong negative)"
142 }
143 } else {
144 "(weak influence)"
145 }
146 );
147 }
148
149 // Find most important feature
150 let max_idx = attributions
151 .iter()
152 .enumerate()
153 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
154 .map_or(0, |(i, _)| i);
155
156 println!(
157 " → Most important feature: Feature {} ({:.4})",
158 max_idx, attributions[max_idx]
159 );
160 }
161 }
162
163 Ok(())
164}
165
166/// Demonstrate circuit analysis and visualization
167fn circuit_analysis_demo() -> Result<()> {
168 let layers = vec![
169 QNNLayerType::EncodingLayer { num_features: 4 },
170 QNNLayerType::VariationalLayer { num_params: 6 },
171 QNNLayerType::EntanglementLayer {
172 connectivity: "full".to_string(),
173 },
174 QNNLayerType::VariationalLayer { num_params: 6 },
175 QNNLayerType::MeasurementLayer {
176 measurement_basis: "Pauli-Z".to_string(),
177 },
178 ];
179
180 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
181
182 let method = ExplanationMethod::CircuitVisualization {
183 include_measurements: true,
184 parameter_sensitivity: true,
185 };
186
187 let mut xai = QuantumExplainableAI::new(model, vec![method]);
188
189 println!(" Analyzing quantum circuit structure and parameter importance...");
190
191 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
192 let explanation = xai.explain(&test_input)?;
193
194 if let Some(ref circuit) = explanation.circuit_explanation {
195 println!("\n Circuit Analysis Results:");
196
197 // Parameter importance
198 println!(" Parameter Importance Scores:");
199 for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
200 if importance > 0.5 {
201 println!(" Parameter {i}: {importance:.3} (high importance)");
202 } else if importance > 0.2 {
203 println!(" Parameter {i}: {importance:.3} (medium importance)");
204 }
205 }
206
207 // Layer analysis
208 println!("\n Layer-wise Analysis:");
209 for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
210 println!(
211 " Layer {}: {}",
212 i,
213 format_layer_type(&layer_analysis.layer_type)
214 );
215 println!(
216 " Information gain: {:.3}",
217 layer_analysis.information_gain
218 );
219 println!(
220 " Entanglement generated: {:.3}",
221 layer_analysis.entanglement_generated
222 );
223
224 if layer_analysis.entanglement_generated > 0.5 {
225 println!(" → Significant entanglement layer");
226 }
227 }
228
229 // Gate contributions
230 println!("\n Gate Contribution Analysis:");
231 for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
232 println!(
233 " Gate {}: {} on qubits {:?}",
234 gate.gate_index, gate.gate_type, gate.qubits
235 );
236 println!(" Contribution: {:.3}", gate.contribution);
237
238 if let Some(ref params) = gate.parameters {
239 println!(" Parameters: {:.3}", params[0]);
240 }
241 }
242
243 // Critical path
244 println!("\n Critical Path (most important parameters):");
245 print!(" ");
246 for (i, ¶m_idx) in circuit.critical_path.iter().enumerate() {
247 if i > 0 {
248 print!(" → ");
249 }
250 print!("P{param_idx}");
251 }
252 println!();
253
254 println!(" → This path represents the most influential quantum operations");
255 }
256
257 Ok(())
258}
259
260/// Demonstrate quantum state analysis
261fn quantum_state_demo() -> Result<()> {
262 let layers = vec![
263 QNNLayerType::EncodingLayer { num_features: 3 },
264 QNNLayerType::VariationalLayer { num_params: 9 },
265 QNNLayerType::EntanglementLayer {
266 connectivity: "circular".to_string(),
267 },
268 QNNLayerType::MeasurementLayer {
269 measurement_basis: "computational".to_string(),
270 },
271 ];
272
273 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
274
275 let method = ExplanationMethod::StateAnalysis {
276 entanglement_measures: true,
277 coherence_analysis: true,
278 superposition_analysis: true,
279 };
280
281 let mut xai = QuantumExplainableAI::new(model, vec![method]);
282
283 println!(" Analyzing quantum state properties...");
284
285 // Test different inputs to see state evolution
286 let test_inputs = [
287 Array1::from_vec(vec![0.0, 0.0, 0.0]),
288 Array1::from_vec(vec![1.0, 0.0, 0.0]),
289 Array1::from_vec(vec![0.5, 0.5, 0.5]),
290 Array1::from_vec(vec![1.0, 1.0, 1.0]),
291 ];
292
293 for (i, input) in test_inputs.iter().enumerate() {
294 println!(
295 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
296 i + 1,
297 input[0],
298 input[1],
299 input[2]
300 );
301
302 let explanation = xai.explain(input)?;
303
304 if let Some(ref state) = explanation.state_properties {
305 println!(" Quantum State Properties:");
306 println!(
307 " - Entanglement entropy: {:.3}",
308 state.entanglement_entropy
309 );
310
311 // Coherence measures
312 for (measure_name, &value) in &state.coherence_measures {
313 println!(" - {measure_name}: {value:.3}");
314 }
315
316 // Superposition analysis
317 let max_component = state
318 .superposition_components
319 .iter()
320 .copied()
321 .fold(f64::NEG_INFINITY, f64::max);
322 println!(" - Max superposition component: {max_component:.3}");
323
324 // Measurement probabilities
325 let total_prob = state.measurement_probabilities.sum();
326 println!(" - Total measurement probability: {total_prob:.3}");
327
328 // Most likely measurement outcome
329 let most_likely = state
330 .measurement_probabilities
331 .iter()
332 .enumerate()
333 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
334 .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
335
336 println!(
337 " - Most likely outcome: state {} with prob {:.3}",
338 most_likely.0, most_likely.1
339 );
340
341 // State fidelities
342 if let Some(highest_fidelity) = state
343 .state_fidelities
344 .values()
345 .copied()
346 .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
347 {
348 println!(" - Highest basis state fidelity: {highest_fidelity:.3}");
349 }
350
351 // Interpretation
352 if state.entanglement_entropy > 0.5 {
353 println!(" → Highly entangled state");
354 } else if state.entanglement_entropy > 0.1 {
355 println!(" → Moderately entangled state");
356 } else {
357 println!(" → Separable or weakly entangled state");
358 }
359 }
360 }
361
362 Ok(())
363}
364
365/// Demonstrate saliency mapping
366fn saliency_mapping_demo() -> Result<()> {
367 let layers = vec![
368 QNNLayerType::EncodingLayer { num_features: 4 },
369 QNNLayerType::VariationalLayer { num_params: 8 },
370 QNNLayerType::MeasurementLayer {
371 measurement_basis: "computational".to_string(),
372 },
373 ];
374
375 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
376
377 // Test different perturbation methods
378 let perturbation_methods = vec![
379 (
380 "Gaussian Noise",
381 PerturbationMethod::Gaussian { sigma: 0.1 },
382 ),
383 (
384 "Quantum Phase",
385 PerturbationMethod::QuantumPhase { magnitude: 0.2 },
386 ),
387 ("Feature Masking", PerturbationMethod::FeatureMasking),
388 (
389 "Parameter Perturbation",
390 PerturbationMethod::ParameterPerturbation { strength: 0.1 },
391 ),
392 ];
393
394 let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
395
396 println!(" Computing saliency maps with different perturbation methods...");
397 println!(
398 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
399 test_input[0], test_input[1], test_input[2], test_input[3]
400 );
401
402 for (method_name, perturbation_method) in perturbation_methods {
403 let method = ExplanationMethod::SaliencyMapping {
404 perturbation_method,
405 aggregation: AggregationMethod::Mean,
406 };
407
408 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
409 let explanation = xai.explain(&test_input)?;
410
411 if let Some(ref saliency) = explanation.saliency_map {
412 println!("\n {method_name} Saliency Map:");
413
414 // Analyze saliency for each output
415 for output_idx in 0..saliency.ncols() {
416 println!(" Output {output_idx}:");
417 for input_idx in 0..saliency.nrows() {
418 let saliency_score = saliency[[input_idx, output_idx]];
419 if saliency_score > 0.1 {
420 println!(
421 " Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (important)"
422 );
423 } else if saliency_score > 0.05 {
424 println!(
425 " Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (moderate)"
426 );
427 }
428 }
429 }
430
431 // Find most salient feature-output pair
432 let mut max_saliency = 0.0;
433 let mut max_pair = (0, 0);
434
435 for i in 0..saliency.nrows() {
436 for j in 0..saliency.ncols() {
437 if saliency[[i, j]] > max_saliency {
438 max_saliency = saliency[[i, j]];
439 max_pair = (i, j);
440 }
441 }
442 }
443
444 println!(
445 " → Most salient: Feature {} → Output {} ({:.3})",
446 max_pair.0, max_pair.1, max_saliency
447 );
448 }
449 }
450
451 Ok(())
452}
453
454/// Demonstrate Quantum LIME
455fn quantum_lime_demo() -> Result<()> {
456 let layers = vec![
457 QNNLayerType::EncodingLayer { num_features: 4 },
458 QNNLayerType::VariationalLayer { num_params: 10 },
459 QNNLayerType::EntanglementLayer {
460 connectivity: "circular".to_string(),
461 },
462 QNNLayerType::MeasurementLayer {
463 measurement_basis: "computational".to_string(),
464 },
465 ];
466
467 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
468
469 // Test different local models
470 let local_models = vec![
471 ("Linear Regression", LocalModelType::LinearRegression),
472 ("Decision Tree", LocalModelType::DecisionTree),
473 ("Quantum Linear", LocalModelType::QuantumLinear),
474 ];
475
476 let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
477
478 println!(" Quantum LIME: Local Interpretable Model-agnostic Explanations");
479 println!(
480 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
481 test_input[0], test_input[1], test_input[2], test_input[3]
482 );
483
484 for (model_name, local_model) in local_models {
485 let method = ExplanationMethod::QuantumLIME {
486 num_perturbations: 100,
487 kernel_width: 0.5,
488 local_model,
489 };
490
491 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
492 let explanation = xai.explain(&test_input)?;
493
494 if let Some(ref attributions) = explanation.feature_attributions {
495 println!("\n LIME with {model_name}:");
496
497 for (i, &attr) in attributions.iter().enumerate() {
498 let impact = if attr.abs() > 0.3 {
499 "high"
500 } else if attr.abs() > 0.1 {
501 "medium"
502 } else {
503 "low"
504 };
505
506 println!(" Feature {i}: {attr:+.3} ({impact} impact)");
507 }
508
509 // Local model interpretation
510 match model_name {
511 "Linear Regression" => {
512 println!(" → Linear relationship approximation in local region");
513 }
514 "Decision Tree" => {
515 println!(" → Rule-based approximation with thresholds");
516 }
517 "Quantum Linear" => {
518 println!(" → Quantum-aware linear approximation");
519 }
520 _ => {}
521 }
522
523 // Compute local fidelity (simplified)
524 let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
525 println!(" → Local explanation complexity: {local_complexity:.3}");
526 }
527 }
528
529 Ok(())
530}
531
532/// Demonstrate Quantum SHAP
533fn quantum_shap_demo() -> Result<()> {
534 let layers = vec![
535 QNNLayerType::EncodingLayer { num_features: 3 },
536 QNNLayerType::VariationalLayer { num_params: 6 },
537 QNNLayerType::MeasurementLayer {
538 measurement_basis: "Pauli-Z".to_string(),
539 },
540 ];
541
542 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
543
544 let method = ExplanationMethod::QuantumSHAP {
545 num_coalitions: 100,
546 background_samples: 20,
547 };
548
549 let mut xai = QuantumExplainableAI::new(model, vec![method]);
550
551 // Set background data for SHAP
552 let background_data = Array2::from_shape_fn((50, 3), |(i, j)| {
553 0.3f64.mul_add(((i + j) as f64 * 0.1).sin(), 0.5)
554 });
555 xai.set_background_data(background_data);
556
557 println!(" Quantum SHAP: SHapley Additive exPlanations");
558
559 // Test multiple inputs
560 let test_inputs = [
561 Array1::from_vec(vec![0.1, 0.5, 0.9]),
562 Array1::from_vec(vec![0.8, 0.3, 0.6]),
563 Array1::from_vec(vec![0.4, 0.7, 0.2]),
564 ];
565
566 for (i, input) in test_inputs.iter().enumerate() {
567 println!(
568 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
569 i + 1,
570 input[0],
571 input[1],
572 input[2]
573 );
574
575 let explanation = xai.explain(input)?;
576
577 if let Some(ref shap_values) = explanation.feature_attributions {
578 println!(" SHAP Values:");
579
580 let mut total_shap = 0.0;
581 for (j, &value) in shap_values.iter().enumerate() {
582 total_shap += value;
583 println!(" - Feature {j}: {value:+.4}");
584 }
585
586 println!(" - Sum of SHAP values: {total_shap:.4}");
587
588 // Feature ranking
589 let mut indexed_shap: Vec<(usize, f64)> = shap_values
590 .iter()
591 .enumerate()
592 .map(|(idx, &val)| (idx, val.abs()))
593 .collect();
594 indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
595
596 println!(" Feature importance ranking:");
597 for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
598 let original_value = shap_values[*feature_idx];
599 println!(
600 " {}. Feature {}: {:.4} (|{:.4}|)",
601 rank + 1,
602 feature_idx,
603 original_value,
604 abs_value
605 );
606 }
607
608 // SHAP properties
609 println!(
610 " → SHAP values satisfy efficiency property (sum to prediction difference)"
611 );
612 println!(" → Each value represents feature's average marginal contribution");
613 }
614 }
615
616 Ok(())
617}
618
619/// Demonstrate Layer-wise Relevance Propagation
620fn quantum_lrp_demo() -> Result<()> {
621 let layers = vec![
622 QNNLayerType::EncodingLayer { num_features: 4 },
623 QNNLayerType::VariationalLayer { num_params: 8 },
624 QNNLayerType::VariationalLayer { num_params: 6 },
625 QNNLayerType::MeasurementLayer {
626 measurement_basis: "computational".to_string(),
627 },
628 ];
629
630 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
631
632 // Test different LRP rules
633 let lrp_rules = vec![
634 ("Epsilon Rule", LRPRule::Epsilon),
635 ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
636 (
637 "Alpha-Beta Rule",
638 LRPRule::AlphaBeta {
639 alpha: 2.0,
640 beta: 1.0,
641 },
642 ),
643 ("Quantum Rule", LRPRule::QuantumRule),
644 ];
645
646 let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
647
648 println!(" Layer-wise Relevance Propagation for Quantum Circuits");
649 println!(
650 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
651 test_input[0], test_input[1], test_input[2], test_input[3]
652 );
653
654 for (rule_name, lrp_rule) in lrp_rules {
655 let method = ExplanationMethod::QuantumLRP {
656 propagation_rule: lrp_rule,
657 epsilon: 1e-6,
658 };
659
660 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
661 let explanation = xai.explain(&test_input)?;
662
663 if let Some(ref relevance) = explanation.feature_attributions {
664 println!("\n LRP with {rule_name}:");
665
666 let total_relevance = relevance.sum();
667
668 for (i, &rel) in relevance.iter().enumerate() {
669 let percentage = if total_relevance.abs() > 1e-10 {
670 rel / total_relevance * 100.0
671 } else {
672 0.0
673 };
674
675 println!(" Feature {i}: {rel:.4} ({percentage:.1}% of total relevance)");
676 }
677
678 println!(" Total relevance: {total_relevance:.4}");
679
680 // Rule-specific interpretation
681 match rule_name {
682 "Epsilon Rule" => {
683 println!(" → Distributes relevance proportionally to activations");
684 }
685 "Gamma Rule" => {
686 println!(" → Emphasizes positive contributions");
687 }
688 "Alpha-Beta Rule" => {
689 println!(" → Separates positive and negative contributions");
690 }
691 "Quantum Rule" => {
692 println!(" → Accounts for quantum superposition and entanglement");
693 }
694 _ => {}
695 }
696 }
697 }
698
699 Ok(())
700}
701
702/// Comprehensive explanation demonstration
703fn comprehensive_explanation_demo() -> Result<()> {
704 let layers = vec![
705 QNNLayerType::EncodingLayer { num_features: 4 },
706 QNNLayerType::VariationalLayer { num_params: 12 },
707 QNNLayerType::EntanglementLayer {
708 connectivity: "full".to_string(),
709 },
710 QNNLayerType::VariationalLayer { num_params: 8 },
711 QNNLayerType::MeasurementLayer {
712 measurement_basis: "computational".to_string(),
713 },
714 ];
715
716 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
717
718 // Use comprehensive explanation methods
719 let methods = vec![
720 ExplanationMethod::QuantumFeatureAttribution {
721 method: AttributionMethod::IntegratedGradients,
722 num_samples: 30,
723 baseline: Some(Array1::zeros(4)),
724 },
725 ExplanationMethod::CircuitVisualization {
726 include_measurements: true,
727 parameter_sensitivity: true,
728 },
729 ExplanationMethod::StateAnalysis {
730 entanglement_measures: true,
731 coherence_analysis: true,
732 superposition_analysis: true,
733 },
734 ExplanationMethod::ConceptActivation {
735 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
736 activation_threshold: 0.3,
737 },
738 ];
739
740 let mut xai = QuantumExplainableAI::new(model, methods);
741
742 // Add concept vectors
743 xai.add_concept(
744 "pattern_A".to_string(),
745 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
746 );
747 xai.add_concept(
748 "pattern_B".to_string(),
749 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
750 );
751
752 // Set background data
753 let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
754 0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
755 });
756 xai.set_background_data(background_data);
757
758 println!(" Comprehensive Quantum Model Explanation");
759
760 // Test input representing a specific pattern
761 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
762
763 println!(
764 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
765 test_input[0], test_input[1], test_input[2], test_input[3]
766 );
767
768 let explanation = xai.explain(&test_input)?;
769
770 // Display comprehensive results
771 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
772
773 // Feature attributions
774 if let Some(ref attributions) = explanation.feature_attributions {
775 println!("\n Feature Attributions:");
776 for (i, &attr) in attributions.iter().enumerate() {
777 println!(" - Feature {i}: {attr:+.3}");
778 }
779 }
780
781 // Circuit analysis summary
782 if let Some(ref circuit) = explanation.circuit_explanation {
783 println!("\n Circuit Analysis Summary:");
784 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
785 println!(" - Average parameter importance: {avg_importance:.3}");
786 println!(
787 " - Number of analyzed layers: {}",
788 circuit.layer_analysis.len()
789 );
790 println!(" - Critical path length: {}", circuit.critical_path.len());
791 }
792
793 // Quantum state properties
794 if let Some(ref state) = explanation.state_properties {
795 println!("\n Quantum State Properties:");
796 println!(
797 " - Entanglement entropy: {:.3}",
798 state.entanglement_entropy
799 );
800 println!(
801 " - Coherence measures: {} types",
802 state.coherence_measures.len()
803 );
804
805 let max_measurement_prob = state
806 .measurement_probabilities
807 .iter()
808 .copied()
809 .fold(f64::NEG_INFINITY, f64::max);
810 println!(" - Max measurement probability: {max_measurement_prob:.3}");
811 }
812
813 // Concept activations
814 if let Some(ref concepts) = explanation.concept_activations {
815 println!("\n Concept Activations:");
816 for (concept, &activation) in concepts {
817 let similarity = if activation > 0.7 {
818 "high"
819 } else if activation > 0.3 {
820 "medium"
821 } else {
822 "low"
823 };
824 println!(" - {concept}: {activation:.3} ({similarity} similarity)");
825 }
826 }
827
828 // Confidence scores
829 println!("\n Explanation Confidence Scores:");
830 for (component, &confidence) in &explanation.confidence_scores {
831 println!(" - {component}: {confidence:.3}");
832 }
833
834 // Textual explanation
835 println!("\n Generated Explanation:");
836 println!("{}", explanation.textual_explanation);
837
838 // Summary insights
839 println!("\n === KEY INSIGHTS ===");
840
841 if let Some(ref attributions) = explanation.feature_attributions {
842 let max_attr_idx = attributions
843 .iter()
844 .enumerate()
845 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
846 .map_or(0, |(i, _)| i);
847
848 println!(
849 " • Most influential feature: Feature {} ({:.3})",
850 max_attr_idx, attributions[max_attr_idx]
851 );
852 }
853
854 if let Some(ref state) = explanation.state_properties {
855 if state.entanglement_entropy > 0.5 {
856 println!(" • Model creates significant quantum entanglement");
857 }
858
859 let coherence_level = state
860 .coherence_measures
861 .values()
862 .copied()
863 .fold(0.0, f64::max);
864 if coherence_level > 0.5 {
865 println!(" • High quantum coherence detected");
866 }
867 }
868
869 if let Some(ref concepts) = explanation.concept_activations {
870 if let Some((best_concept, &max_activation)) =
871 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
872 {
873 if max_activation > 0.5 {
874 println!(" • Input strongly matches concept: {best_concept}");
875 }
876 }
877 }
878
879 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
880
881 Ok(())
882}Sourcepub fn add_concept(&mut self, name: String, vector: Array1<f64>)
pub fn add_concept(&mut self, name: String, vector: Array1<f64>)
Add concept vector
Examples found in repository?
examples/quantum_explainable_ai.rs (lines 743-746)
703fn comprehensive_explanation_demo() -> Result<()> {
704 let layers = vec![
705 QNNLayerType::EncodingLayer { num_features: 4 },
706 QNNLayerType::VariationalLayer { num_params: 12 },
707 QNNLayerType::EntanglementLayer {
708 connectivity: "full".to_string(),
709 },
710 QNNLayerType::VariationalLayer { num_params: 8 },
711 QNNLayerType::MeasurementLayer {
712 measurement_basis: "computational".to_string(),
713 },
714 ];
715
716 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
717
718 // Use comprehensive explanation methods
719 let methods = vec![
720 ExplanationMethod::QuantumFeatureAttribution {
721 method: AttributionMethod::IntegratedGradients,
722 num_samples: 30,
723 baseline: Some(Array1::zeros(4)),
724 },
725 ExplanationMethod::CircuitVisualization {
726 include_measurements: true,
727 parameter_sensitivity: true,
728 },
729 ExplanationMethod::StateAnalysis {
730 entanglement_measures: true,
731 coherence_analysis: true,
732 superposition_analysis: true,
733 },
734 ExplanationMethod::ConceptActivation {
735 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
736 activation_threshold: 0.3,
737 },
738 ];
739
740 let mut xai = QuantumExplainableAI::new(model, methods);
741
742 // Add concept vectors
743 xai.add_concept(
744 "pattern_A".to_string(),
745 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
746 );
747 xai.add_concept(
748 "pattern_B".to_string(),
749 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
750 );
751
752 // Set background data
753 let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
754 0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
755 });
756 xai.set_background_data(background_data);
757
758 println!(" Comprehensive Quantum Model Explanation");
759
760 // Test input representing a specific pattern
761 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
762
763 println!(
764 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
765 test_input[0], test_input[1], test_input[2], test_input[3]
766 );
767
768 let explanation = xai.explain(&test_input)?;
769
770 // Display comprehensive results
771 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
772
773 // Feature attributions
774 if let Some(ref attributions) = explanation.feature_attributions {
775 println!("\n Feature Attributions:");
776 for (i, &attr) in attributions.iter().enumerate() {
777 println!(" - Feature {i}: {attr:+.3}");
778 }
779 }
780
781 // Circuit analysis summary
782 if let Some(ref circuit) = explanation.circuit_explanation {
783 println!("\n Circuit Analysis Summary:");
784 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
785 println!(" - Average parameter importance: {avg_importance:.3}");
786 println!(
787 " - Number of analyzed layers: {}",
788 circuit.layer_analysis.len()
789 );
790 println!(" - Critical path length: {}", circuit.critical_path.len());
791 }
792
793 // Quantum state properties
794 if let Some(ref state) = explanation.state_properties {
795 println!("\n Quantum State Properties:");
796 println!(
797 " - Entanglement entropy: {:.3}",
798 state.entanglement_entropy
799 );
800 println!(
801 " - Coherence measures: {} types",
802 state.coherence_measures.len()
803 );
804
805 let max_measurement_prob = state
806 .measurement_probabilities
807 .iter()
808 .copied()
809 .fold(f64::NEG_INFINITY, f64::max);
810 println!(" - Max measurement probability: {max_measurement_prob:.3}");
811 }
812
813 // Concept activations
814 if let Some(ref concepts) = explanation.concept_activations {
815 println!("\n Concept Activations:");
816 for (concept, &activation) in concepts {
817 let similarity = if activation > 0.7 {
818 "high"
819 } else if activation > 0.3 {
820 "medium"
821 } else {
822 "low"
823 };
824 println!(" - {concept}: {activation:.3} ({similarity} similarity)");
825 }
826 }
827
828 // Confidence scores
829 println!("\n Explanation Confidence Scores:");
830 for (component, &confidence) in &explanation.confidence_scores {
831 println!(" - {component}: {confidence:.3}");
832 }
833
834 // Textual explanation
835 println!("\n Generated Explanation:");
836 println!("{}", explanation.textual_explanation);
837
838 // Summary insights
839 println!("\n === KEY INSIGHTS ===");
840
841 if let Some(ref attributions) = explanation.feature_attributions {
842 let max_attr_idx = attributions
843 .iter()
844 .enumerate()
845 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
846 .map_or(0, |(i, _)| i);
847
848 println!(
849 " • Most influential feature: Feature {} ({:.3})",
850 max_attr_idx, attributions[max_attr_idx]
851 );
852 }
853
854 if let Some(ref state) = explanation.state_properties {
855 if state.entanglement_entropy > 0.5 {
856 println!(" • Model creates significant quantum entanglement");
857 }
858
859 let coherence_level = state
860 .coherence_measures
861 .values()
862 .copied()
863 .fold(0.0, f64::max);
864 if coherence_level > 0.5 {
865 println!(" • High quantum coherence detected");
866 }
867 }
868
869 if let Some(ref concepts) = explanation.concept_activations {
870 if let Some((best_concept, &max_activation)) =
871 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
872 {
873 if max_activation > 0.5 {
874 println!(" • Input strongly matches concept: {best_concept}");
875 }
876 }
877 }
878
879 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
880
881 Ok(())
882}Sourcepub fn explain(&mut self, input: &Array1<f64>) -> Result<ExplanationResult>
pub fn explain(&mut self, input: &Array1<f64>) -> Result<ExplanationResult>
Generate comprehensive explanation for an input
Examples found in repository?
examples/quantum_explainable_ai.rs (line 128)
54fn feature_attribution_demo() -> Result<()> {
55 // Create quantum model
56 let layers = vec![
57 QNNLayerType::EncodingLayer { num_features: 4 },
58 QNNLayerType::VariationalLayer { num_params: 12 },
59 QNNLayerType::EntanglementLayer {
60 connectivity: "circular".to_string(),
61 },
62 QNNLayerType::VariationalLayer { num_params: 8 },
63 QNNLayerType::MeasurementLayer {
64 measurement_basis: "computational".to_string(),
65 },
66 ];
67
68 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
69
70 println!(
71 " Created quantum model with {} parameters",
72 model.parameters.len()
73 );
74
75 // Test different attribution methods
76 let attribution_methods = vec![
77 (
78 "Integrated Gradients",
79 ExplanationMethod::QuantumFeatureAttribution {
80 method: AttributionMethod::IntegratedGradients,
81 num_samples: 50,
82 baseline: Some(Array1::zeros(4)),
83 },
84 ),
85 (
86 "Gradient × Input",
87 ExplanationMethod::QuantumFeatureAttribution {
88 method: AttributionMethod::GradientInput,
89 num_samples: 1,
90 baseline: None,
91 },
92 ),
93 (
94 "Gradient SHAP",
95 ExplanationMethod::QuantumFeatureAttribution {
96 method: AttributionMethod::GradientSHAP,
97 num_samples: 30,
98 baseline: None,
99 },
100 ),
101 (
102 "Quantum Attribution",
103 ExplanationMethod::QuantumFeatureAttribution {
104 method: AttributionMethod::QuantumAttribution,
105 num_samples: 25,
106 baseline: None,
107 },
108 ),
109 ];
110
111 // Test input
112 let test_input = Array1::from_vec(vec![0.8, 0.3, 0.9, 0.1]);
113
114 println!(
115 "\n Feature attribution analysis for input: [{:.1}, {:.1}, {:.1}, {:.1}]",
116 test_input[0], test_input[1], test_input[2], test_input[3]
117 );
118
119 for (method_name, method) in attribution_methods {
120 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
121
122 // Set background data for gradient SHAP
123 let background_data = Array2::from_shape_fn((20, 4), |(_, j)| {
124 0.3f64.mul_add((j as f64 * 0.2).sin(), 0.5)
125 });
126 xai.set_background_data(background_data);
127
128 let explanation = xai.explain(&test_input)?;
129
130 if let Some(ref attributions) = explanation.feature_attributions {
131 println!("\n {method_name} Attribution:");
132 for (i, &attr) in attributions.iter().enumerate() {
133 println!(
134 " Feature {}: {:+.4} {}",
135 i,
136 attr,
137 if attr.abs() > 0.1 {
138 if attr > 0.0 {
139 "(strong positive)"
140 } else {
141 "(strong negative)"
142 }
143 } else {
144 "(weak influence)"
145 }
146 );
147 }
148
149 // Find most important feature
150 let max_idx = attributions
151 .iter()
152 .enumerate()
153 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
154 .map_or(0, |(i, _)| i);
155
156 println!(
157 " → Most important feature: Feature {} ({:.4})",
158 max_idx, attributions[max_idx]
159 );
160 }
161 }
162
163 Ok(())
164}
165
166/// Demonstrate circuit analysis and visualization
167fn circuit_analysis_demo() -> Result<()> {
168 let layers = vec![
169 QNNLayerType::EncodingLayer { num_features: 4 },
170 QNNLayerType::VariationalLayer { num_params: 6 },
171 QNNLayerType::EntanglementLayer {
172 connectivity: "full".to_string(),
173 },
174 QNNLayerType::VariationalLayer { num_params: 6 },
175 QNNLayerType::MeasurementLayer {
176 measurement_basis: "Pauli-Z".to_string(),
177 },
178 ];
179
180 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
181
182 let method = ExplanationMethod::CircuitVisualization {
183 include_measurements: true,
184 parameter_sensitivity: true,
185 };
186
187 let mut xai = QuantumExplainableAI::new(model, vec![method]);
188
189 println!(" Analyzing quantum circuit structure and parameter importance...");
190
191 let test_input = Array1::from_vec(vec![0.6, 0.4, 0.7, 0.3]);
192 let explanation = xai.explain(&test_input)?;
193
194 if let Some(ref circuit) = explanation.circuit_explanation {
195 println!("\n Circuit Analysis Results:");
196
197 // Parameter importance
198 println!(" Parameter Importance Scores:");
199 for (i, &importance) in circuit.parameter_importance.iter().enumerate() {
200 if importance > 0.5 {
201 println!(" Parameter {i}: {importance:.3} (high importance)");
202 } else if importance > 0.2 {
203 println!(" Parameter {i}: {importance:.3} (medium importance)");
204 }
205 }
206
207 // Layer analysis
208 println!("\n Layer-wise Analysis:");
209 for (i, layer_analysis) in circuit.layer_analysis.iter().enumerate() {
210 println!(
211 " Layer {}: {}",
212 i,
213 format_layer_type(&layer_analysis.layer_type)
214 );
215 println!(
216 " Information gain: {:.3}",
217 layer_analysis.information_gain
218 );
219 println!(
220 " Entanglement generated: {:.3}",
221 layer_analysis.entanglement_generated
222 );
223
224 if layer_analysis.entanglement_generated > 0.5 {
225 println!(" → Significant entanglement layer");
226 }
227 }
228
229 // Gate contributions
230 println!("\n Gate Contribution Analysis:");
231 for (i, gate) in circuit.gate_contributions.iter().enumerate().take(5) {
232 println!(
233 " Gate {}: {} on qubits {:?}",
234 gate.gate_index, gate.gate_type, gate.qubits
235 );
236 println!(" Contribution: {:.3}", gate.contribution);
237
238 if let Some(ref params) = gate.parameters {
239 println!(" Parameters: {:.3}", params[0]);
240 }
241 }
242
243 // Critical path
244 println!("\n Critical Path (most important parameters):");
245 print!(" ");
246 for (i, ¶m_idx) in circuit.critical_path.iter().enumerate() {
247 if i > 0 {
248 print!(" → ");
249 }
250 print!("P{param_idx}");
251 }
252 println!();
253
254 println!(" → This path represents the most influential quantum operations");
255 }
256
257 Ok(())
258}
259
260/// Demonstrate quantum state analysis
261fn quantum_state_demo() -> Result<()> {
262 let layers = vec![
263 QNNLayerType::EncodingLayer { num_features: 3 },
264 QNNLayerType::VariationalLayer { num_params: 9 },
265 QNNLayerType::EntanglementLayer {
266 connectivity: "circular".to_string(),
267 },
268 QNNLayerType::MeasurementLayer {
269 measurement_basis: "computational".to_string(),
270 },
271 ];
272
273 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
274
275 let method = ExplanationMethod::StateAnalysis {
276 entanglement_measures: true,
277 coherence_analysis: true,
278 superposition_analysis: true,
279 };
280
281 let mut xai = QuantumExplainableAI::new(model, vec![method]);
282
283 println!(" Analyzing quantum state properties...");
284
285 // Test different inputs to see state evolution
286 let test_inputs = [
287 Array1::from_vec(vec![0.0, 0.0, 0.0]),
288 Array1::from_vec(vec![1.0, 0.0, 0.0]),
289 Array1::from_vec(vec![0.5, 0.5, 0.5]),
290 Array1::from_vec(vec![1.0, 1.0, 1.0]),
291 ];
292
293 for (i, input) in test_inputs.iter().enumerate() {
294 println!(
295 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
296 i + 1,
297 input[0],
298 input[1],
299 input[2]
300 );
301
302 let explanation = xai.explain(input)?;
303
304 if let Some(ref state) = explanation.state_properties {
305 println!(" Quantum State Properties:");
306 println!(
307 " - Entanglement entropy: {:.3}",
308 state.entanglement_entropy
309 );
310
311 // Coherence measures
312 for (measure_name, &value) in &state.coherence_measures {
313 println!(" - {measure_name}: {value:.3}");
314 }
315
316 // Superposition analysis
317 let max_component = state
318 .superposition_components
319 .iter()
320 .copied()
321 .fold(f64::NEG_INFINITY, f64::max);
322 println!(" - Max superposition component: {max_component:.3}");
323
324 // Measurement probabilities
325 let total_prob = state.measurement_probabilities.sum();
326 println!(" - Total measurement probability: {total_prob:.3}");
327
328 // Most likely measurement outcome
329 let most_likely = state
330 .measurement_probabilities
331 .iter()
332 .enumerate()
333 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
334 .map_or((0, 0.0), |(idx, &prob)| (idx, prob));
335
336 println!(
337 " - Most likely outcome: state {} with prob {:.3}",
338 most_likely.0, most_likely.1
339 );
340
341 // State fidelities
342 if let Some(highest_fidelity) = state
343 .state_fidelities
344 .values()
345 .copied()
346 .fold(None, |acc, x| Some(acc.map_or(x, |y| f64::max(x, y))))
347 {
348 println!(" - Highest basis state fidelity: {highest_fidelity:.3}");
349 }
350
351 // Interpretation
352 if state.entanglement_entropy > 0.5 {
353 println!(" → Highly entangled state");
354 } else if state.entanglement_entropy > 0.1 {
355 println!(" → Moderately entangled state");
356 } else {
357 println!(" → Separable or weakly entangled state");
358 }
359 }
360 }
361
362 Ok(())
363}
364
365/// Demonstrate saliency mapping
366fn saliency_mapping_demo() -> Result<()> {
367 let layers = vec![
368 QNNLayerType::EncodingLayer { num_features: 4 },
369 QNNLayerType::VariationalLayer { num_params: 8 },
370 QNNLayerType::MeasurementLayer {
371 measurement_basis: "computational".to_string(),
372 },
373 ];
374
375 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
376
377 // Test different perturbation methods
378 let perturbation_methods = vec![
379 (
380 "Gaussian Noise",
381 PerturbationMethod::Gaussian { sigma: 0.1 },
382 ),
383 (
384 "Quantum Phase",
385 PerturbationMethod::QuantumPhase { magnitude: 0.2 },
386 ),
387 ("Feature Masking", PerturbationMethod::FeatureMasking),
388 (
389 "Parameter Perturbation",
390 PerturbationMethod::ParameterPerturbation { strength: 0.1 },
391 ),
392 ];
393
394 let test_input = Array1::from_vec(vec![0.7, 0.2, 0.8, 0.4]);
395
396 println!(" Computing saliency maps with different perturbation methods...");
397 println!(
398 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
399 test_input[0], test_input[1], test_input[2], test_input[3]
400 );
401
402 for (method_name, perturbation_method) in perturbation_methods {
403 let method = ExplanationMethod::SaliencyMapping {
404 perturbation_method,
405 aggregation: AggregationMethod::Mean,
406 };
407
408 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
409 let explanation = xai.explain(&test_input)?;
410
411 if let Some(ref saliency) = explanation.saliency_map {
412 println!("\n {method_name} Saliency Map:");
413
414 // Analyze saliency for each output
415 for output_idx in 0..saliency.ncols() {
416 println!(" Output {output_idx}:");
417 for input_idx in 0..saliency.nrows() {
418 let saliency_score = saliency[[input_idx, output_idx]];
419 if saliency_score > 0.1 {
420 println!(
421 " Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (important)"
422 );
423 } else if saliency_score > 0.05 {
424 println!(
425 " Feature {input_idx} → Output {output_idx}: {saliency_score:.3} (moderate)"
426 );
427 }
428 }
429 }
430
431 // Find most salient feature-output pair
432 let mut max_saliency = 0.0;
433 let mut max_pair = (0, 0);
434
435 for i in 0..saliency.nrows() {
436 for j in 0..saliency.ncols() {
437 if saliency[[i, j]] > max_saliency {
438 max_saliency = saliency[[i, j]];
439 max_pair = (i, j);
440 }
441 }
442 }
443
444 println!(
445 " → Most salient: Feature {} → Output {} ({:.3})",
446 max_pair.0, max_pair.1, max_saliency
447 );
448 }
449 }
450
451 Ok(())
452}
453
454/// Demonstrate Quantum LIME
455fn quantum_lime_demo() -> Result<()> {
456 let layers = vec![
457 QNNLayerType::EncodingLayer { num_features: 4 },
458 QNNLayerType::VariationalLayer { num_params: 10 },
459 QNNLayerType::EntanglementLayer {
460 connectivity: "circular".to_string(),
461 },
462 QNNLayerType::MeasurementLayer {
463 measurement_basis: "computational".to_string(),
464 },
465 ];
466
467 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
468
469 // Test different local models
470 let local_models = vec![
471 ("Linear Regression", LocalModelType::LinearRegression),
472 ("Decision Tree", LocalModelType::DecisionTree),
473 ("Quantum Linear", LocalModelType::QuantumLinear),
474 ];
475
476 let test_input = Array1::from_vec(vec![0.6, 0.8, 0.2, 0.9]);
477
478 println!(" Quantum LIME: Local Interpretable Model-agnostic Explanations");
479 println!(
480 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
481 test_input[0], test_input[1], test_input[2], test_input[3]
482 );
483
484 for (model_name, local_model) in local_models {
485 let method = ExplanationMethod::QuantumLIME {
486 num_perturbations: 100,
487 kernel_width: 0.5,
488 local_model,
489 };
490
491 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
492 let explanation = xai.explain(&test_input)?;
493
494 if let Some(ref attributions) = explanation.feature_attributions {
495 println!("\n LIME with {model_name}:");
496
497 for (i, &attr) in attributions.iter().enumerate() {
498 let impact = if attr.abs() > 0.3 {
499 "high"
500 } else if attr.abs() > 0.1 {
501 "medium"
502 } else {
503 "low"
504 };
505
506 println!(" Feature {i}: {attr:+.3} ({impact} impact)");
507 }
508
509 // Local model interpretation
510 match model_name {
511 "Linear Regression" => {
512 println!(" → Linear relationship approximation in local region");
513 }
514 "Decision Tree" => {
515 println!(" → Rule-based approximation with thresholds");
516 }
517 "Quantum Linear" => {
518 println!(" → Quantum-aware linear approximation");
519 }
520 _ => {}
521 }
522
523 // Compute local fidelity (simplified)
524 let local_complexity = attributions.iter().map(|x| x.abs()).sum::<f64>();
525 println!(" → Local explanation complexity: {local_complexity:.3}");
526 }
527 }
528
529 Ok(())
530}
531
532/// Demonstrate Quantum SHAP
533fn quantum_shap_demo() -> Result<()> {
534 let layers = vec![
535 QNNLayerType::EncodingLayer { num_features: 3 },
536 QNNLayerType::VariationalLayer { num_params: 6 },
537 QNNLayerType::MeasurementLayer {
538 measurement_basis: "Pauli-Z".to_string(),
539 },
540 ];
541
542 let model = QuantumNeuralNetwork::new(layers, 3, 3, 2)?;
543
544 let method = ExplanationMethod::QuantumSHAP {
545 num_coalitions: 100,
546 background_samples: 20,
547 };
548
549 let mut xai = QuantumExplainableAI::new(model, vec![method]);
550
551 // Set background data for SHAP
552 let background_data = Array2::from_shape_fn((50, 3), |(i, j)| {
553 0.3f64.mul_add(((i + j) as f64 * 0.1).sin(), 0.5)
554 });
555 xai.set_background_data(background_data);
556
557 println!(" Quantum SHAP: SHapley Additive exPlanations");
558
559 // Test multiple inputs
560 let test_inputs = [
561 Array1::from_vec(vec![0.1, 0.5, 0.9]),
562 Array1::from_vec(vec![0.8, 0.3, 0.6]),
563 Array1::from_vec(vec![0.4, 0.7, 0.2]),
564 ];
565
566 for (i, input) in test_inputs.iter().enumerate() {
567 println!(
568 "\n Input {}: [{:.1}, {:.1}, {:.1}]",
569 i + 1,
570 input[0],
571 input[1],
572 input[2]
573 );
574
575 let explanation = xai.explain(input)?;
576
577 if let Some(ref shap_values) = explanation.feature_attributions {
578 println!(" SHAP Values:");
579
580 let mut total_shap = 0.0;
581 for (j, &value) in shap_values.iter().enumerate() {
582 total_shap += value;
583 println!(" - Feature {j}: {value:+.4}");
584 }
585
586 println!(" - Sum of SHAP values: {total_shap:.4}");
587
588 // Feature ranking
589 let mut indexed_shap: Vec<(usize, f64)> = shap_values
590 .iter()
591 .enumerate()
592 .map(|(idx, &val)| (idx, val.abs()))
593 .collect();
594 indexed_shap.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
595
596 println!(" Feature importance ranking:");
597 for (rank, (feature_idx, abs_value)) in indexed_shap.iter().enumerate() {
598 let original_value = shap_values[*feature_idx];
599 println!(
600 " {}. Feature {}: {:.4} (|{:.4}|)",
601 rank + 1,
602 feature_idx,
603 original_value,
604 abs_value
605 );
606 }
607
608 // SHAP properties
609 println!(
610 " → SHAP values satisfy efficiency property (sum to prediction difference)"
611 );
612 println!(" → Each value represents feature's average marginal contribution");
613 }
614 }
615
616 Ok(())
617}
618
619/// Demonstrate Layer-wise Relevance Propagation
620fn quantum_lrp_demo() -> Result<()> {
621 let layers = vec![
622 QNNLayerType::EncodingLayer { num_features: 4 },
623 QNNLayerType::VariationalLayer { num_params: 8 },
624 QNNLayerType::VariationalLayer { num_params: 6 },
625 QNNLayerType::MeasurementLayer {
626 measurement_basis: "computational".to_string(),
627 },
628 ];
629
630 let model = QuantumNeuralNetwork::new(layers, 4, 4, 2)?;
631
632 // Test different LRP rules
633 let lrp_rules = vec![
634 ("Epsilon Rule", LRPRule::Epsilon),
635 ("Gamma Rule", LRPRule::Gamma { gamma: 0.25 }),
636 (
637 "Alpha-Beta Rule",
638 LRPRule::AlphaBeta {
639 alpha: 2.0,
640 beta: 1.0,
641 },
642 ),
643 ("Quantum Rule", LRPRule::QuantumRule),
644 ];
645
646 let test_input = Array1::from_vec(vec![0.7, 0.1, 0.8, 0.4]);
647
648 println!(" Layer-wise Relevance Propagation for Quantum Circuits");
649 println!(
650 " Input: [{:.1}, {:.1}, {:.1}, {:.1}]",
651 test_input[0], test_input[1], test_input[2], test_input[3]
652 );
653
654 for (rule_name, lrp_rule) in lrp_rules {
655 let method = ExplanationMethod::QuantumLRP {
656 propagation_rule: lrp_rule,
657 epsilon: 1e-6,
658 };
659
660 let mut xai = QuantumExplainableAI::new(model.clone(), vec![method]);
661 let explanation = xai.explain(&test_input)?;
662
663 if let Some(ref relevance) = explanation.feature_attributions {
664 println!("\n LRP with {rule_name}:");
665
666 let total_relevance = relevance.sum();
667
668 for (i, &rel) in relevance.iter().enumerate() {
669 let percentage = if total_relevance.abs() > 1e-10 {
670 rel / total_relevance * 100.0
671 } else {
672 0.0
673 };
674
675 println!(" Feature {i}: {rel:.4} ({percentage:.1}% of total relevance)");
676 }
677
678 println!(" Total relevance: {total_relevance:.4}");
679
680 // Rule-specific interpretation
681 match rule_name {
682 "Epsilon Rule" => {
683 println!(" → Distributes relevance proportionally to activations");
684 }
685 "Gamma Rule" => {
686 println!(" → Emphasizes positive contributions");
687 }
688 "Alpha-Beta Rule" => {
689 println!(" → Separates positive and negative contributions");
690 }
691 "Quantum Rule" => {
692 println!(" → Accounts for quantum superposition and entanglement");
693 }
694 _ => {}
695 }
696 }
697 }
698
699 Ok(())
700}
701
702/// Comprehensive explanation demonstration
703fn comprehensive_explanation_demo() -> Result<()> {
704 let layers = vec![
705 QNNLayerType::EncodingLayer { num_features: 4 },
706 QNNLayerType::VariationalLayer { num_params: 12 },
707 QNNLayerType::EntanglementLayer {
708 connectivity: "full".to_string(),
709 },
710 QNNLayerType::VariationalLayer { num_params: 8 },
711 QNNLayerType::MeasurementLayer {
712 measurement_basis: "computational".to_string(),
713 },
714 ];
715
716 let model = QuantumNeuralNetwork::new(layers, 4, 4, 3)?;
717
718 // Use comprehensive explanation methods
719 let methods = vec![
720 ExplanationMethod::QuantumFeatureAttribution {
721 method: AttributionMethod::IntegratedGradients,
722 num_samples: 30,
723 baseline: Some(Array1::zeros(4)),
724 },
725 ExplanationMethod::CircuitVisualization {
726 include_measurements: true,
727 parameter_sensitivity: true,
728 },
729 ExplanationMethod::StateAnalysis {
730 entanglement_measures: true,
731 coherence_analysis: true,
732 superposition_analysis: true,
733 },
734 ExplanationMethod::ConceptActivation {
735 concept_datasets: vec!["pattern_A".to_string(), "pattern_B".to_string()],
736 activation_threshold: 0.3,
737 },
738 ];
739
740 let mut xai = QuantumExplainableAI::new(model, methods);
741
742 // Add concept vectors
743 xai.add_concept(
744 "pattern_A".to_string(),
745 Array1::from_vec(vec![1.0, 0.0, 1.0, 0.0]),
746 );
747 xai.add_concept(
748 "pattern_B".to_string(),
749 Array1::from_vec(vec![0.0, 1.0, 0.0, 1.0]),
750 );
751
752 // Set background data
753 let background_data = Array2::from_shape_fn((30, 4), |(i, j)| {
754 0.4f64.mul_add(((i * j) as f64 * 0.15).sin(), 0.3)
755 });
756 xai.set_background_data(background_data);
757
758 println!(" Comprehensive Quantum Model Explanation");
759
760 // Test input representing a specific pattern
761 let test_input = Array1::from_vec(vec![0.9, 0.1, 0.8, 0.2]); // Similar to pattern_A
762
763 println!(
764 "\n Analyzing input: [{:.1}, {:.1}, {:.1}, {:.1}]",
765 test_input[0], test_input[1], test_input[2], test_input[3]
766 );
767
768 let explanation = xai.explain(&test_input)?;
769
770 // Display comprehensive results
771 println!("\n === COMPREHENSIVE EXPLANATION RESULTS ===");
772
773 // Feature attributions
774 if let Some(ref attributions) = explanation.feature_attributions {
775 println!("\n Feature Attributions:");
776 for (i, &attr) in attributions.iter().enumerate() {
777 println!(" - Feature {i}: {attr:+.3}");
778 }
779 }
780
781 // Circuit analysis summary
782 if let Some(ref circuit) = explanation.circuit_explanation {
783 println!("\n Circuit Analysis Summary:");
784 let avg_importance = circuit.parameter_importance.mean().unwrap_or(0.0);
785 println!(" - Average parameter importance: {avg_importance:.3}");
786 println!(
787 " - Number of analyzed layers: {}",
788 circuit.layer_analysis.len()
789 );
790 println!(" - Critical path length: {}", circuit.critical_path.len());
791 }
792
793 // Quantum state properties
794 if let Some(ref state) = explanation.state_properties {
795 println!("\n Quantum State Properties:");
796 println!(
797 " - Entanglement entropy: {:.3}",
798 state.entanglement_entropy
799 );
800 println!(
801 " - Coherence measures: {} types",
802 state.coherence_measures.len()
803 );
804
805 let max_measurement_prob = state
806 .measurement_probabilities
807 .iter()
808 .copied()
809 .fold(f64::NEG_INFINITY, f64::max);
810 println!(" - Max measurement probability: {max_measurement_prob:.3}");
811 }
812
813 // Concept activations
814 if let Some(ref concepts) = explanation.concept_activations {
815 println!("\n Concept Activations:");
816 for (concept, &activation) in concepts {
817 let similarity = if activation > 0.7 {
818 "high"
819 } else if activation > 0.3 {
820 "medium"
821 } else {
822 "low"
823 };
824 println!(" - {concept}: {activation:.3} ({similarity} similarity)");
825 }
826 }
827
828 // Confidence scores
829 println!("\n Explanation Confidence Scores:");
830 for (component, &confidence) in &explanation.confidence_scores {
831 println!(" - {component}: {confidence:.3}");
832 }
833
834 // Textual explanation
835 println!("\n Generated Explanation:");
836 println!("{}", explanation.textual_explanation);
837
838 // Summary insights
839 println!("\n === KEY INSIGHTS ===");
840
841 if let Some(ref attributions) = explanation.feature_attributions {
842 let max_attr_idx = attributions
843 .iter()
844 .enumerate()
845 .max_by(|a, b| a.1.abs().partial_cmp(&b.1.abs()).unwrap())
846 .map_or(0, |(i, _)| i);
847
848 println!(
849 " • Most influential feature: Feature {} ({:.3})",
850 max_attr_idx, attributions[max_attr_idx]
851 );
852 }
853
854 if let Some(ref state) = explanation.state_properties {
855 if state.entanglement_entropy > 0.5 {
856 println!(" • Model creates significant quantum entanglement");
857 }
858
859 let coherence_level = state
860 .coherence_measures
861 .values()
862 .copied()
863 .fold(0.0, f64::max);
864 if coherence_level > 0.5 {
865 println!(" • High quantum coherence detected");
866 }
867 }
868
869 if let Some(ref concepts) = explanation.concept_activations {
870 if let Some((best_concept, &max_activation)) =
871 concepts.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
872 {
873 if max_activation > 0.5 {
874 println!(" • Input strongly matches concept: {best_concept}");
875 }
876 }
877 }
878
879 println!(" • Explanation provides multi-faceted interpretation of quantum model behavior");
880
881 Ok(())
882}Auto Trait Implementations§
impl Freeze for QuantumExplainableAI
impl RefUnwindSafe for QuantumExplainableAI
impl Send for QuantumExplainableAI
impl Sync for QuantumExplainableAI
impl Unpin for QuantumExplainableAI
impl UnwindSafe for QuantumExplainableAI
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.