Struct ModelInterpreter

Source
pub struct ModelInterpreter<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> { /* private fields */ }
Expand description

Model interpreter for analyzing neural network decisions

This is the main orchestrator for all interpretation methods. It manages:

  • Attribution method registration and dispatch
  • Layer activation and gradient caching
  • Integration between different interpretation techniques
  • Unified interface for model analysis

Implementations§

Source§

impl<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> ModelInterpreter<F>

Source

pub fn new() -> Self

Create a new model interpreter

Examples found in repository?
examples/neural_advanced_features.rs (line 360)
355fn demonstrate_model_interpretation() -> Result<()> {
356    println!("🔍 Model Interpretation Demonstration");
357    println!("====================================\n");
358
359    // Create model interpreter
360    let mut interpreter = ModelInterpreter::<f64>::new();
361
362    // Add attribution methods
363    interpreter.add_attribution_method(AttributionMethod::Saliency);
364    interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365        baseline: BaselineMethod::Zero,
366        num_steps: 50,
367    });
368
369    // Simulate input data and gradients
370    let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372    let gradients =
373        Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375    // Cache gradients for attribution computation
376    interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378    println!("1. Computing feature attributions...");
379    println!("   Input shape: {:?}", input.shape());
380
381    // Compute saliency attribution
382    let saliency =
383        interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384    println!("   Saliency attribution shape: {:?}", saliency.shape());
385
386    // Compute integrated gradients
387    let integrated_grad = interpreter.compute_attribution(
388        &AttributionMethod::IntegratedGradients {
389            baseline: BaselineMethod::Zero,
390            num_steps: 50,
391        },
392        &input,
393        Some(1),
394    )?;
395    println!(
396        "   Integrated gradients shape: {:?}",
397        integrated_grad.shape()
398    );
399
400    // Analyze layer activations
401    println!("\n2. Analyzing layer activations...");
402    let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403        if (i + j) % 7 == 0 {
404            0.0
405        } else {
406            ((i * j) as f64 / 100.0).tanh()
407        }
408    })
409    .into_dyn();
410
411    interpreter.analyze_layer_activations("conv_layer")?;
412
413    if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414        println!("   Layer statistics:");
415        println!("     Mean activation: {:.4}", stats.mean_activation);
416        println!("     Std activation: {:.4}", stats.std_activation);
417        println!("     Sparsity: {:.1}%", stats.sparsity);
418        println!("     Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419    }
420
421    // Generate comprehensive interpretation report
422    println!("\n3. Generating interpretation report...");
423    let report = interpreter.generate_report(&input)?;
424    println!("{}", report);
425
426    println!("✅ Model interpretation demonstration completed!\n");
427    Ok(())
428}
Source

pub fn add_attribution_method(&mut self, method: AttributionMethod)

Add an attribution method to the interpreter

Examples found in repository?
examples/neural_advanced_features.rs (line 363)
355fn demonstrate_model_interpretation() -> Result<()> {
356    println!("🔍 Model Interpretation Demonstration");
357    println!("====================================\n");
358
359    // Create model interpreter
360    let mut interpreter = ModelInterpreter::<f64>::new();
361
362    // Add attribution methods
363    interpreter.add_attribution_method(AttributionMethod::Saliency);
364    interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365        baseline: BaselineMethod::Zero,
366        num_steps: 50,
367    });
368
369    // Simulate input data and gradients
370    let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372    let gradients =
373        Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375    // Cache gradients for attribution computation
376    interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378    println!("1. Computing feature attributions...");
379    println!("   Input shape: {:?}", input.shape());
380
381    // Compute saliency attribution
382    let saliency =
383        interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384    println!("   Saliency attribution shape: {:?}", saliency.shape());
385
386    // Compute integrated gradients
387    let integrated_grad = interpreter.compute_attribution(
388        &AttributionMethod::IntegratedGradients {
389            baseline: BaselineMethod::Zero,
390            num_steps: 50,
391        },
392        &input,
393        Some(1),
394    )?;
395    println!(
396        "   Integrated gradients shape: {:?}",
397        integrated_grad.shape()
398    );
399
400    // Analyze layer activations
401    println!("\n2. Analyzing layer activations...");
402    let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403        if (i + j) % 7 == 0 {
404            0.0
405        } else {
406            ((i * j) as f64 / 100.0).tanh()
407        }
408    })
409    .into_dyn();
410
411    interpreter.analyze_layer_activations("conv_layer")?;
412
413    if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414        println!("   Layer statistics:");
415        println!("     Mean activation: {:.4}", stats.mean_activation);
416        println!("     Std activation: {:.4}", stats.std_activation);
417        println!("     Sparsity: {:.1}%", stats.sparsity);
418        println!("     Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419    }
420
421    // Generate comprehensive interpretation report
422    println!("\n3. Generating interpretation report...");
423    let report = interpreter.generate_report(&input)?;
424    println!("{}", report);
425
426    println!("✅ Model interpretation demonstration completed!\n");
427    Ok(())
428}
Source

pub fn cache_activations(&mut self, layer_name: String, activations: ArrayD<F>)

Cache layer activations for later analysis

Source

pub fn cache_gradients(&mut self, layer_name: String, gradients: ArrayD<F>)

Cache layer gradients for attribution computation

Examples found in repository?
examples/neural_advanced_features.rs (line 376)
355fn demonstrate_model_interpretation() -> Result<()> {
356    println!("🔍 Model Interpretation Demonstration");
357    println!("====================================\n");
358
359    // Create model interpreter
360    let mut interpreter = ModelInterpreter::<f64>::new();
361
362    // Add attribution methods
363    interpreter.add_attribution_method(AttributionMethod::Saliency);
364    interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365        baseline: BaselineMethod::Zero,
366        num_steps: 50,
367    });
368
369    // Simulate input data and gradients
370    let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372    let gradients =
373        Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375    // Cache gradients for attribution computation
376    interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378    println!("1. Computing feature attributions...");
379    println!("   Input shape: {:?}", input.shape());
380
381    // Compute saliency attribution
382    let saliency =
383        interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384    println!("   Saliency attribution shape: {:?}", saliency.shape());
385
386    // Compute integrated gradients
387    let integrated_grad = interpreter.compute_attribution(
388        &AttributionMethod::IntegratedGradients {
389            baseline: BaselineMethod::Zero,
390            num_steps: 50,
391        },
392        &input,
393        Some(1),
394    )?;
395    println!(
396        "   Integrated gradients shape: {:?}",
397        integrated_grad.shape()
398    );
399
400    // Analyze layer activations
401    println!("\n2. Analyzing layer activations...");
402    let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403        if (i + j) % 7 == 0 {
404            0.0
405        } else {
406            ((i * j) as f64 / 100.0).tanh()
407        }
408    })
409    .into_dyn();
410
411    interpreter.analyze_layer_activations("conv_layer")?;
412
413    if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414        println!("   Layer statistics:");
415        println!("     Mean activation: {:.4}", stats.mean_activation);
416        println!("     Std activation: {:.4}", stats.std_activation);
417        println!("     Sparsity: {:.1}%", stats.sparsity);
418        println!("     Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419    }
420
421    // Generate comprehensive interpretation report
422    println!("\n3. Generating interpretation report...");
423    let report = interpreter.generate_report(&input)?;
424    println!("{}", report);
425
426    println!("✅ Model interpretation demonstration completed!\n");
427    Ok(())
428}
Source

pub fn get_cached_activations(&self, layer_name: &str) -> Option<&ArrayD<F>>

Get cached activations for a layer

Source

pub fn get_cached_gradients(&self, layer_name: &str) -> Option<&ArrayD<F>>

Get cached gradients for a layer

Source

pub fn clear_caches(&mut self)

Clear all caches

Source

pub fn attribution_methods(&self) -> &[AttributionMethod]

Get available attribution methods

Source

pub fn has_layer_data(&self, layer_name: &str) -> bool

Check if a specific layer has cached data

Source

pub fn cached_layers(&self) -> Vec<String>

Get all cached layer names

Source

pub fn set_counterfactual_generator( &mut self, generator: CounterfactualGenerator<F>, )

Set the counterfactual generator

Source

pub fn counterfactual_generator(&self) -> Option<&CounterfactualGenerator<F>>

Get the counterfactual generator

Source

pub fn set_lime_explainer(&mut self, explainer: LIMEExplainer<F>)

Set the LIME explainer

Source

pub fn lime_explainer(&self) -> Option<&LIMEExplainer<F>>

Get the LIME explainer

Source

pub fn set_attention_visualizer(&mut self, visualizer: AttentionVisualizer<F>)

Set the attention visualizer

Source

pub fn attention_visualizer(&self) -> Option<&AttentionVisualizer<F>>

Get the attention visualizer

Source

pub fn add_concept_vector( &mut self, name: String, vector: ConceptActivationVector<F>, )

Add concept activation vector

Source

pub fn get_concept_vector( &self, name: &str, ) -> Option<&ConceptActivationVector<F>>

Get concept activation vector

Source

pub fn layer_statistics(&self) -> &HashMap<String, LayerAnalysisStats<F>>

Get layer statistics

Examples found in repository?
examples/neural_advanced_features.rs (line 413)
355fn demonstrate_model_interpretation() -> Result<()> {
356    println!("🔍 Model Interpretation Demonstration");
357    println!("====================================\n");
358
359    // Create model interpreter
360    let mut interpreter = ModelInterpreter::<f64>::new();
361
362    // Add attribution methods
363    interpreter.add_attribution_method(AttributionMethod::Saliency);
364    interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365        baseline: BaselineMethod::Zero,
366        num_steps: 50,
367    });
368
369    // Simulate input data and gradients
370    let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372    let gradients =
373        Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375    // Cache gradients for attribution computation
376    interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378    println!("1. Computing feature attributions...");
379    println!("   Input shape: {:?}", input.shape());
380
381    // Compute saliency attribution
382    let saliency =
383        interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384    println!("   Saliency attribution shape: {:?}", saliency.shape());
385
386    // Compute integrated gradients
387    let integrated_grad = interpreter.compute_attribution(
388        &AttributionMethod::IntegratedGradients {
389            baseline: BaselineMethod::Zero,
390            num_steps: 50,
391        },
392        &input,
393        Some(1),
394    )?;
395    println!(
396        "   Integrated gradients shape: {:?}",
397        integrated_grad.shape()
398    );
399
400    // Analyze layer activations
401    println!("\n2. Analyzing layer activations...");
402    let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403        if (i + j) % 7 == 0 {
404            0.0
405        } else {
406            ((i * j) as f64 / 100.0).tanh()
407        }
408    })
409    .into_dyn();
410
411    interpreter.analyze_layer_activations("conv_layer")?;
412
413    if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414        println!("   Layer statistics:");
415        println!("     Mean activation: {:.4}", stats.mean_activation);
416        println!("     Std activation: {:.4}", stats.std_activation);
417        println!("     Sparsity: {:.1}%", stats.sparsity);
418        println!("     Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419    }
420
421    // Generate comprehensive interpretation report
422    println!("\n3. Generating interpretation report...");
423    let report = interpreter.generate_report(&input)?;
424    println!("{}", report);
425
426    println!("✅ Model interpretation demonstration completed!\n");
427    Ok(())
428}
Source

pub fn cache_layer_statistics( &mut self, layer_name: String, stats: LayerAnalysisStats<F>, )

Cache layer statistics

Source

pub fn compute_attribution( &self, method: &AttributionMethod, input: &ArrayD<F>, target_class: Option<usize>, ) -> Result<ArrayD<F>>

Compute attribution using specified method

This is the main dispatch method that delegates to specific attribution implementations in the attribution module.

Examples found in repository?
examples/neural_advanced_features.rs (line 383)
355fn demonstrate_model_interpretation() -> Result<()> {
356    println!("🔍 Model Interpretation Demonstration");
357    println!("====================================\n");
358
359    // Create model interpreter
360    let mut interpreter = ModelInterpreter::<f64>::new();
361
362    // Add attribution methods
363    interpreter.add_attribution_method(AttributionMethod::Saliency);
364    interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365        baseline: BaselineMethod::Zero,
366        num_steps: 50,
367    });
368
369    // Simulate input data and gradients
370    let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372    let gradients =
373        Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375    // Cache gradients for attribution computation
376    interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378    println!("1. Computing feature attributions...");
379    println!("   Input shape: {:?}", input.shape());
380
381    // Compute saliency attribution
382    let saliency =
383        interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384    println!("   Saliency attribution shape: {:?}", saliency.shape());
385
386    // Compute integrated gradients
387    let integrated_grad = interpreter.compute_attribution(
388        &AttributionMethod::IntegratedGradients {
389            baseline: BaselineMethod::Zero,
390            num_steps: 50,
391        },
392        &input,
393        Some(1),
394    )?;
395    println!(
396        "   Integrated gradients shape: {:?}",
397        integrated_grad.shape()
398    );
399
400    // Analyze layer activations
401    println!("\n2. Analyzing layer activations...");
402    let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403        if (i + j) % 7 == 0 {
404            0.0
405        } else {
406            ((i * j) as f64 / 100.0).tanh()
407        }
408    })
409    .into_dyn();
410
411    interpreter.analyze_layer_activations("conv_layer")?;
412
413    if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414        println!("   Layer statistics:");
415        println!("     Mean activation: {:.4}", stats.mean_activation);
416        println!("     Std activation: {:.4}", stats.std_activation);
417        println!("     Sparsity: {:.1}%", stats.sparsity);
418        println!("     Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419    }
420
421    // Generate comprehensive interpretation report
422    println!("\n3. Generating interpretation report...");
423    let report = interpreter.generate_report(&input)?;
424    println!("{}", report);
425
426    println!("✅ Model interpretation demonstration completed!\n");
427    Ok(())
428}
Source

pub fn analyze_layer_activations( &mut self, layer_name: &str, ) -> Result<LayerAnalysisStats<F>>

Analyze layer activations

Delegates to the analysis module for detailed layer analysis.

Examples found in repository?
examples/neural_advanced_features.rs (line 411)
355fn demonstrate_model_interpretation() -> Result<()> {
356    println!("🔍 Model Interpretation Demonstration");
357    println!("====================================\n");
358
359    // Create model interpreter
360    let mut interpreter = ModelInterpreter::<f64>::new();
361
362    // Add attribution methods
363    interpreter.add_attribution_method(AttributionMethod::Saliency);
364    interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365        baseline: BaselineMethod::Zero,
366        num_steps: 50,
367    });
368
369    // Simulate input data and gradients
370    let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372    let gradients =
373        Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375    // Cache gradients for attribution computation
376    interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378    println!("1. Computing feature attributions...");
379    println!("   Input shape: {:?}", input.shape());
380
381    // Compute saliency attribution
382    let saliency =
383        interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384    println!("   Saliency attribution shape: {:?}", saliency.shape());
385
386    // Compute integrated gradients
387    let integrated_grad = interpreter.compute_attribution(
388        &AttributionMethod::IntegratedGradients {
389            baseline: BaselineMethod::Zero,
390            num_steps: 50,
391        },
392        &input,
393        Some(1),
394    )?;
395    println!(
396        "   Integrated gradients shape: {:?}",
397        integrated_grad.shape()
398    );
399
400    // Analyze layer activations
401    println!("\n2. Analyzing layer activations...");
402    let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403        if (i + j) % 7 == 0 {
404            0.0
405        } else {
406            ((i * j) as f64 / 100.0).tanh()
407        }
408    })
409    .into_dyn();
410
411    interpreter.analyze_layer_activations("conv_layer")?;
412
413    if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414        println!("   Layer statistics:");
415        println!("     Mean activation: {:.4}", stats.mean_activation);
416        println!("     Std activation: {:.4}", stats.std_activation);
417        println!("     Sparsity: {:.1}%", stats.sparsity);
418        println!("     Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419    }
420
421    // Generate comprehensive interpretation report
422    println!("\n3. Generating interpretation report...");
423    let report = interpreter.generate_report(&input)?;
424    println!("{}", report);
425
426    println!("✅ Model interpretation demonstration completed!\n");
427    Ok(())
428}
Source

pub fn generate_report( &self, input: &ArrayD<F>, ) -> Result<ComprehensiveInterpretationReport<F>>

Generate comprehensive interpretation report

Delegates to the reporting module for unified reporting.

Examples found in repository?
examples/neural_advanced_features.rs (line 423)
355fn demonstrate_model_interpretation() -> Result<()> {
356    println!("🔍 Model Interpretation Demonstration");
357    println!("====================================\n");
358
359    // Create model interpreter
360    let mut interpreter = ModelInterpreter::<f64>::new();
361
362    // Add attribution methods
363    interpreter.add_attribution_method(AttributionMethod::Saliency);
364    interpreter.add_attribution_method(AttributionMethod::IntegratedGradients {
365        baseline: BaselineMethod::Zero,
366        num_steps: 50,
367    });
368
369    // Simulate input data and gradients
370    let input = Array2::from_shape_fn((2, 10), |(i, j)| (i as f64 + j as f64) / 10.0).into_dyn();
371
372    let gradients =
373        Array2::from_shape_fn((2, 10), |(i, j)| ((i + j) as f64 / 20.0).sin()).into_dyn();
374
375    // Cache gradients for attribution computation
376    interpreter.cache_gradients("input_gradient".to_string(), gradients.clone());
377
378    println!("1. Computing feature attributions...");
379    println!("   Input shape: {:?}", input.shape());
380
381    // Compute saliency attribution
382    let saliency =
383        interpreter.compute_attribution(&AttributionMethod::Saliency, &input, Some(1))?;
384    println!("   Saliency attribution shape: {:?}", saliency.shape());
385
386    // Compute integrated gradients
387    let integrated_grad = interpreter.compute_attribution(
388        &AttributionMethod::IntegratedGradients {
389            baseline: BaselineMethod::Zero,
390            num_steps: 50,
391        },
392        &input,
393        Some(1),
394    )?;
395    println!(
396        "   Integrated gradients shape: {:?}",
397        integrated_grad.shape()
398    );
399
400    // Analyze layer activations
401    println!("\n2. Analyzing layer activations...");
402    let _layer_activations = Array2::from_shape_fn((20, 64), |(i, j)| {
403        if (i + j) % 7 == 0 {
404            0.0
405        } else {
406            ((i * j) as f64 / 100.0).tanh()
407        }
408    })
409    .into_dyn();
410
411    interpreter.analyze_layer_activations("conv_layer")?;
412
413    if let Some(stats) = interpreter.layer_statistics().get("conv_layer") {
414        println!("   Layer statistics:");
415        println!("     Mean activation: {:.4}", stats.mean_activation);
416        println!("     Std activation: {:.4}", stats.std_activation);
417        println!("     Sparsity: {:.1}%", stats.sparsity);
418        println!("     Dead neurons: {:.1}%", stats.dead_neuron_percentage);
419    }
420
421    // Generate comprehensive interpretation report
422    println!("\n3. Generating interpretation report...");
423    let report = interpreter.generate_report(&input)?;
424    println!("{}", report);
425
426    println!("✅ Model interpretation demonstration completed!\n");
427    Ok(())
428}

Trait Implementations§

Source§

impl<F: Float + Debug + 'static + ScalarOperand + FromPrimitive + Sum + Clone + Copy> Default for ModelInterpreter<F>

Source§

fn default() -> Self

Returns the “default value” for a type. Read more

Auto Trait Implementations§

§

impl<F> Freeze for ModelInterpreter<F>

§

impl<F> RefUnwindSafe for ModelInterpreter<F>
where F: RefUnwindSafe,

§

impl<F> Send for ModelInterpreter<F>
where F: Send,

§

impl<F> Sync for ModelInterpreter<F>
where F: Sync,

§

impl<F> Unpin for ModelInterpreter<F>
where F: Unpin,

§

impl<F> UnwindSafe for ModelInterpreter<F>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V