pub struct ModelInterpreter<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> { /* private fields */ }
Expand description
Model interpreter for analyzing neural network decisions
This is the main orchestrator for all interpretation methods. It manages:
- Attribution method registration and dispatch
- Layer activation and gradient caching
- Integration between different interpretation techniques
- Unified interface for model analysis
Implementations§
Source§impl<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> ModelInterpreter<F>
impl<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> ModelInterpreter<F>
Sourcepub fn new() -> Self
pub fn new() -> Self
Create a new model interpreter
Examples found in repository?
355fn demonstrate_model_interpretation() -> Result<()> {
356 println!("🔍 Model Interpretation Demonstration");
357 println!("====================================\n");
358
359 // Create model interpreter
360 let mut interpreter = ModelInterpreter::<f64>::new();
361
362 // Add attribution methods
363 interpreter.add_attribution_method(AttributionMethod::Saliency);
364 interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365 baseline: BaselineMethod::Zero,
366 num_steps: 50,
367 });
368
369 // Simulate input data and gradients
370 let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372 let gradients =
373 Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375 // Cache gradients for attribution computation
376 interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378 println!("1. Computing feature attributions...");
379 println!(" Input shape: {:?}", input.shape());
380
381 // Compute saliency attribution
382 let saliency =
383 interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384 println!(" Saliency attribution shape: {:?}", saliency.shape());
385
386 // Compute integrated gradients
387 let integrated_grad = interpreter.compute_attribution(
388 &AttributionMethod::IntegratedGradients {
389 baseline: BaselineMethod::Zero,
390 num_steps: 50,
391 },
392 &input,
393 Some(1),
394 )?;
395 println!(
396 " Integrated gradients shape: {:?}",
397 integrated_grad.shape()
398 );
399
400 // Analyze layer activations
401 println!("\n2. Analyzing layer activations...");
402 let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403 if (i + j) % 7 == 0 {
404 0.0
405 } else {
406 ((i * j) as f64 / 100.0).tanh()
407 }
408 })
409 .into_dyn();
410
411 interpreter.analyze_layer_activations("conv_layer")?;
412
413 if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414 println!(" Layer statistics:");
415 println!(" Mean activation: {:.4}", stats.mean_activation);
416 println!(" Std activation: {:.4}", stats.std_activation);
417 println!(" Sparsity: {:.1}%", stats.sparsity);
418 println!(" Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419 }
420
421 // Generate comprehensive interpretation report
422 println!("\n3. Generating interpretation report...");
423 let report = interpreter.generate_report(&input)?;
424 println!("{}", report);
425
426 println!("✅ Model interpretation demonstration completed!\n");
427 Ok(())
428}
Sourcepub fn add_attribution_method(&mut self, method: AttributionMethod)
pub fn add_attribution_method(&mut self, method: AttributionMethod)
Add an attribution method to the interpreter
Examples found in repository?
355fn demonstrate_model_interpretation() -> Result<()> {
356 println!("🔍 Model Interpretation Demonstration");
357 println!("====================================\n");
358
359 // Create model interpreter
360 let mut interpreter = ModelInterpreter::<f64>::new();
361
362 // Add attribution methods
363 interpreter.add_attribution_method(AttributionMethod::Saliency);
364 interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365 baseline: BaselineMethod::Zero,
366 num_steps: 50,
367 });
368
369 // Simulate input data and gradients
370 let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372 let gradients =
373 Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375 // Cache gradients for attribution computation
376 interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378 println!("1. Computing feature attributions...");
379 println!(" Input shape: {:?}", input.shape());
380
381 // Compute saliency attribution
382 let saliency =
383 interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384 println!(" Saliency attribution shape: {:?}", saliency.shape());
385
386 // Compute integrated gradients
387 let integrated_grad = interpreter.compute_attribution(
388 &AttributionMethod::IntegratedGradients {
389 baseline: BaselineMethod::Zero,
390 num_steps: 50,
391 },
392 &input,
393 Some(1),
394 )?;
395 println!(
396 " Integrated gradients shape: {:?}",
397 integrated_grad.shape()
398 );
399
400 // Analyze layer activations
401 println!("\n2. Analyzing layer activations...");
402 let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403 if (i + j) % 7 == 0 {
404 0.0
405 } else {
406 ((i * j) as f64 / 100.0).tanh()
407 }
408 })
409 .into_dyn();
410
411 interpreter.analyze_layer_activations("conv_layer")?;
412
413 if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414 println!(" Layer statistics:");
415 println!(" Mean activation: {:.4}", stats.mean_activation);
416 println!(" Std activation: {:.4}", stats.std_activation);
417 println!(" Sparsity: {:.1}%", stats.sparsity);
418 println!(" Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419 }
420
421 // Generate comprehensive interpretation report
422 println!("\n3. Generating interpretation report...");
423 let report = interpreter.generate_report(&input)?;
424 println!("{}", report);
425
426 println!("✅ Model interpretation demonstration completed!\n");
427 Ok(())
428}
Sourcepub fn cache_activations(&mut self, layer_name: String, activations: ArrayD<F>)
pub fn cache_activations(&mut self, layer_name: String, activations: ArrayD<F>)
Cache layer activations for later analysis
Sourcepub fn cache_gradients(&mut self, layer_name: String, gradients: ArrayD<F>)
pub fn cache_gradients(&mut self, layer_name: String, gradients: ArrayD<F>)
Cache layer gradients for attribution computation
Examples found in repository?
355fn demonstrate_model_interpretation() -> Result<()> {
356 println!("🔍 Model Interpretation Demonstration");
357 println!("====================================\n");
358
359 // Create model interpreter
360 let mut interpreter = ModelInterpreter::<f64>::new();
361
362 // Add attribution methods
363 interpreter.add_attribution_method(AttributionMethod::Saliency);
364 interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365 baseline: BaselineMethod::Zero,
366 num_steps: 50,
367 });
368
369 // Simulate input data and gradients
370 let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372 let gradients =
373 Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375 // Cache gradients for attribution computation
376 interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378 println!("1. Computing feature attributions...");
379 println!(" Input shape: {:?}", input.shape());
380
381 // Compute saliency attribution
382 let saliency =
383 interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384 println!(" Saliency attribution shape: {:?}", saliency.shape());
385
386 // Compute integrated gradients
387 let integrated_grad = interpreter.compute_attribution(
388 &AttributionMethod::IntegratedGradients {
389 baseline: BaselineMethod::Zero,
390 num_steps: 50,
391 },
392 &input,
393 Some(1),
394 )?;
395 println!(
396 " Integrated gradients shape: {:?}",
397 integrated_grad.shape()
398 );
399
400 // Analyze layer activations
401 println!("\n2. Analyzing layer activations...");
402 let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403 if (i + j) % 7 == 0 {
404 0.0
405 } else {
406 ((i * j) as f64 / 100.0).tanh()
407 }
408 })
409 .into_dyn();
410
411 interpreter.analyze_layer_activations("conv_layer")?;
412
413 if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414 println!(" Layer statistics:");
415 println!(" Mean activation: {:.4}", stats.mean_activation);
416 println!(" Std activation: {:.4}", stats.std_activation);
417 println!(" Sparsity: {:.1}%", stats.sparsity);
418 println!(" Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419 }
420
421 // Generate comprehensive interpretation report
422 println!("\n3. Generating interpretation report...");
423 let report = interpreter.generate_report(&input)?;
424 println!("{}", report);
425
426 println!("✅ Model interpretation demonstration completed!\n");
427 Ok(())
428}
Sourcepub fn get_cached_activations(&self, layer_name: &str) -> Option<&ArrayD<F>>
pub fn get_cached_activations(&self, layer_name: &str) -> Option<&ArrayD<F>>
Get cached activations for a layer
Sourcepub fn get_cached_gradients(&self, layer_name: &str) -> Option<&ArrayD<F>>
pub fn get_cached_gradients(&self, layer_name: &str) -> Option<&ArrayD<F>>
Get cached gradients for a layer
Sourcepub fn clear_caches(&mut self)
pub fn clear_caches(&mut self)
Clear all caches
Sourcepub fn attribution_methods(&self) -> &[AttributionMethod]
pub fn attribution_methods(&self) -> &[AttributionMethod]
Get available attribution methods
Sourcepub fn has_layer_data(&self, layer_name: &str) -> bool
pub fn has_layer_data(&self, layer_name: &str) -> bool
Check if a specific layer has cached data
Sourcepub fn cached_layers(&self) -> Vec<String>
pub fn cached_layers(&self) -> Vec<String>
Get all cached layer names
Sourcepub fn set_counterfactual_generator(
&mut self,
generator: CounterfactualGenerator<F>,
)
pub fn set_counterfactual_generator( &mut self, generator: CounterfactualGenerator<F>, )
Set the counterfactual generator
Sourcepub fn counterfactual_generator(&self) -> Option<&CounterfactualGenerator<F>>
pub fn counterfactual_generator(&self) -> Option<&CounterfactualGenerator<F>>
Get the counterfactual generator
Sourcepub fn set_lime_explainer(&mut self, explainer: LIMEExplainer<F>)
pub fn set_lime_explainer(&mut self, explainer: LIMEExplainer<F>)
Set the LIME explainer
Sourcepub fn lime_explainer(&self) -> Option<&LIMEExplainer<F>>
pub fn lime_explainer(&self) -> Option<&LIMEExplainer<F>>
Get the LIME explainer
Sourcepub fn set_attention_visualizer(&mut self, visualizer: AttentionVisualizer<F>)
pub fn set_attention_visualizer(&mut self, visualizer: AttentionVisualizer<F>)
Set the attention visualizer
Sourcepub fn attention_visualizer(&self) -> Option<&AttentionVisualizer<F>>
pub fn attention_visualizer(&self) -> Option<&AttentionVisualizer<F>>
Get the attention visualizer
Sourcepub fn add_concept_vector(
&mut self,
name: String,
vector: ConceptActivationVector<F>,
)
pub fn add_concept_vector( &mut self, name: String, vector: ConceptActivationVector<F>, )
Add concept activation vector
Sourcepub fn get_concept_vector(
&self,
name: &str,
) -> Option<&ConceptActivationVector<F>>
pub fn get_concept_vector( &self, name: &str, ) -> Option<&ConceptActivationVector<F>>
Get concept activation vector
Sourcepub fn layer_statistics(&self) -> &HashMap<String, LayerAnalysisStats<F>>
pub fn layer_statistics(&self) -> &HashMap<String, LayerAnalysisStats<F>>
Get layer statistics
Examples found in repository?
355fn demonstrate_model_interpretation() -> Result<()> {
356 println!("🔍 Model Interpretation Demonstration");
357 println!("====================================\n");
358
359 // Create model interpreter
360 let mut interpreter = ModelInterpreter::<f64>::new();
361
362 // Add attribution methods
363 interpreter.add_attribution_method(AttributionMethod::Saliency);
364 interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365 baseline: BaselineMethod::Zero,
366 num_steps: 50,
367 });
368
369 // Simulate input data and gradients
370 let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372 let gradients =
373 Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375 // Cache gradients for attribution computation
376 interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378 println!("1. Computing feature attributions...");
379 println!(" Input shape: {:?}", input.shape());
380
381 // Compute saliency attribution
382 let saliency =
383 interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384 println!(" Saliency attribution shape: {:?}", saliency.shape());
385
386 // Compute integrated gradients
387 let integrated_grad = interpreter.compute_attribution(
388 &AttributionMethod::IntegratedGradients {
389 baseline: BaselineMethod::Zero,
390 num_steps: 50,
391 },
392 &input,
393 Some(1),
394 )?;
395 println!(
396 " Integrated gradients shape: {:?}",
397 integrated_grad.shape()
398 );
399
400 // Analyze layer activations
401 println!("\n2. Analyzing layer activations...");
402 let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403 if (i + j) % 7 == 0 {
404 0.0
405 } else {
406 ((i * j) as f64 / 100.0).tanh()
407 }
408 })
409 .into_dyn();
410
411 interpreter.analyze_layer_activations("conv_layer")?;
412
413 if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414 println!(" Layer statistics:");
415 println!(" Mean activation: {:.4}", stats.mean_activation);
416 println!(" Std activation: {:.4}", stats.std_activation);
417 println!(" Sparsity: {:.1}%", stats.sparsity);
418 println!(" Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419 }
420
421 // Generate comprehensive interpretation report
422 println!("\n3. Generating interpretation report...");
423 let report = interpreter.generate_report(&input)?;
424 println!("{}", report);
425
426 println!("✅ Model interpretation demonstration completed!\n");
427 Ok(())
428}
Sourcepub fn cache_layer_statistics(
&mut self,
layer_name: String,
stats: LayerAnalysisStats<F>,
)
pub fn cache_layer_statistics( &mut self, layer_name: String, stats: LayerAnalysisStats<F>, )
Cache layer statistics
Sourcepub fn compute_attribution(
&self,
method: &AttributionMethod,
input: &ArrayD<F>,
target_class: Option<usize>,
) -> Result<ArrayD<F>>
pub fn compute_attribution( &self, method: &AttributionMethod, input: &ArrayD<F>, target_class: Option<usize>, ) -> Result<ArrayD<F>>
Compute attribution using specified method
This is the main dispatch method that delegates to specific attribution implementations in the attribution module.
Examples found in repository?
355fn demonstrate_model_interpretation() -> Result<()> {
356 println!("🔍 Model Interpretation Demonstration");
357 println!("====================================\n");
358
359 // Create model interpreter
360 let mut interpreter = ModelInterpreter::<f64>::new();
361
362 // Add attribution methods
363 interpreter.add_attribution_method(AttributionMethod::Saliency);
364 interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365 baseline: BaselineMethod::Zero,
366 num_steps: 50,
367 });
368
369 // Simulate input data and gradients
370 let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372 let gradients =
373 Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375 // Cache gradients for attribution computation
376 interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378 println!("1. Computing feature attributions...");
379 println!(" Input shape: {:?}", input.shape());
380
381 // Compute saliency attribution
382 let saliency =
383 interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384 println!(" Saliency attribution shape: {:?}", saliency.shape());
385
386 // Compute integrated gradients
387 let integrated_grad = interpreter.compute_attribution(
388 &AttributionMethod::IntegratedGradients {
389 baseline: BaselineMethod::Zero,
390 num_steps: 50,
391 },
392 &input,
393 Some(1),
394 )?;
395 println!(
396 " Integrated gradients shape: {:?}",
397 integrated_grad.shape()
398 );
399
400 // Analyze layer activations
401 println!("\n2. Analyzing layer activations...");
402 let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403 if (i + j) % 7 == 0 {
404 0.0
405 } else {
406 ((i * j) as f64 / 100.0).tanh()
407 }
408 })
409 .into_dyn();
410
411 interpreter.analyze_layer_activations("conv_layer")?;
412
413 if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414 println!(" Layer statistics:");
415 println!(" Mean activation: {:.4}", stats.mean_activation);
416 println!(" Std activation: {:.4}", stats.std_activation);
417 println!(" Sparsity: {:.1}%", stats.sparsity);
418 println!(" Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419 }
420
421 // Generate comprehensive interpretation report
422 println!("\n3. Generating interpretation report...");
423 let report = interpreter.generate_report(&input)?;
424 println!("{}", report);
425
426 println!("✅ Model interpretation demonstration completed!\n");
427 Ok(())
428}
Sourcepub fn analyze_layer_activations(
&mut self,
layer_name: &str,
) -> Result<LayerAnalysisStats<F>>
pub fn analyze_layer_activations( &mut self, layer_name: &str, ) -> Result<LayerAnalysisStats<F>>
Analyze layer activations
Delegates to the analysis module for detailed layer analysis.
Examples found in repository?
355fn demonstrate_model_interpretation() -> Result<()> {
356 println!("🔍 Model Interpretation Demonstration");
357 println!("====================================\n");
358
359 // Create model interpreter
360 let mut interpreter = ModelInterpreter::<f64>::new();
361
362 // Add attribution methods
363 interpreter.add_attribution_method(AttributionMethod::Saliency);
364 interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365 baseline: BaselineMethod::Zero,
366 num_steps: 50,
367 });
368
369 // Simulate input data and gradients
370 let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372 let gradients =
373 Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375 // Cache gradients for attribution computation
376 interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378 println!("1. Computing feature attributions...");
379 println!(" Input shape: {:?}", input.shape());
380
381 // Compute saliency attribution
382 let saliency =
383 interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384 println!(" Saliency attribution shape: {:?}", saliency.shape());
385
386 // Compute integrated gradients
387 let integrated_grad = interpreter.compute_attribution(
388 &AttributionMethod::IntegratedGradients {
389 baseline: BaselineMethod::Zero,
390 num_steps: 50,
391 },
392 &input,
393 Some(1),
394 )?;
395 println!(
396 " Integrated gradients shape: {:?}",
397 integrated_grad.shape()
398 );
399
400 // Analyze layer activations
401 println!("\n2. Analyzing layer activations...");
402 let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403 if (i + j) % 7 == 0 {
404 0.0
405 } else {
406 ((i * j) as f64 / 100.0).tanh()
407 }
408 })
409 .into_dyn();
410
411 interpreter.analyze_layer_activations("conv_layer")?;
412
413 if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414 println!(" Layer statistics:");
415 println!(" Mean activation: {:.4}", stats.mean_activation);
416 println!(" Std activation: {:.4}", stats.std_activation);
417 println!(" Sparsity: {:.1}%", stats.sparsity);
418 println!(" Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419 }
420
421 // Generate comprehensive interpretation report
422 println!("\n3. Generating interpretation report...");
423 let report = interpreter.generate_report(&input)?;
424 println!("{}", report);
425
426 println!("✅ Model interpretation demonstration completed!\n");
427 Ok(())
428}
Sourcepub fn generate_report(
&self,
input: &ArrayD<F>,
) -> Result<ComprehensiveInterpretationReport<F>>
pub fn generate_report( &self, input: &ArrayD<F>, ) -> Result<ComprehensiveInterpretationReport<F>>
Generate comprehensive interpretation report
Delegates to the reporting module for unified reporting.
Examples found in repository?
355fn demonstrate_model_interpretation() -> Result<()> {
356 println!("🔍 Model Interpretation Demonstration");
357 println!("====================================\n");
358
359 // Create model interpreter
360 let mut interpreter = ModelInterpreter::<f64>::new();
361
362 // Add attribution methods
363 interpreter.add_attribution_method(AttributionMethod::Saliency);
364 interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365 baseline: BaselineMethod::Zero,
366 num_steps: 50,
367 });
368
369 // Simulate input data and gradients
370 let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372 let gradients =
373 Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375 // Cache gradients for attribution computation
376 interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378 println!("1. Computing feature attributions...");
379 println!(" Input shape: {:?}", input.shape());
380
381 // Compute saliency attribution
382 let saliency =
383 interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384 println!(" Saliency attribution shape: {:?}", saliency.shape());
385
386 // Compute integrated gradients
387 let integrated_grad = interpreter.compute_attribution(
388 &AttributionMethod::IntegratedGradients {
389 baseline: BaselineMethod::Zero,
390 num_steps: 50,
391 },
392 &input,
393 Some(1),
394 )?;
395 println!(
396 " Integrated gradients shape: {:?}",
397 integrated_grad.shape()
398 );
399
400 // Analyze layer activations
401 println!("\n2. Analyzing layer activations...");
402 let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403 if (i + j) % 7 == 0 {
404 0.0
405 } else {
406 ((i * j) as f64 / 100.0).tanh()
407 }
408 })
409 .into_dyn();
410
411 interpreter.analyze_layer_activations("conv_layer")?;
412
413 if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414 println!(" Layer statistics:");
415 println!(" Mean activation: {:.4}", stats.mean_activation);
416 println!(" Std activation: {:.4}", stats.std_activation);
417 println!(" Sparsity: {:.1}%", stats.sparsity);
418 println!(" Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419 }
420
421 // Generate comprehensive interpretation report
422 println!("\n3. Generating interpretation report...");
423 let report = interpreter.generate_report(&input)?;
424 println!("{}", report);
425
426 println!("✅ Model interpretation demonstration completed!\n");
427 Ok(())
428}
Trait Implementations§
Source§impl<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> Default for ModelInterpreter<F>
impl<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> Default for ModelInterpreter<F>
Auto Trait Implementations§
impl<F> Freeze for ModelInterpreter<F>
impl<F> RefUnwindSafe for ModelInterpreter<F>where
F: RefUnwindSafe,
impl<F> Send for ModelInterpreter<F>where
F: Send,
impl<F> Sync for ModelInterpreter<F>where
F: Sync,
impl<F> Unpin for ModelInterpreter<F>where
F: Unpin,
impl<F> UnwindSafe for ModelInterpreter<F>where
F: UnwindSafe + RefUnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more