Struct ModelEvaluator

Source
pub struct ModelEvaluator<F: Float + Debug + 'static + Sum + Clone + Copy + FromPrimitive> { /* private fields */ }
Expand description

Enhanced model evaluator

Implementations§

Source§

impl<F: Float + Debug + 'static + Sum + Clone + Copy + FromPrimitive> ModelEvaluator<F>

Source

pub fn new() -> Self

Create a new model evaluator

Source

pub fn add_metric(&mut self, metric: EvaluationMetric)

Add evaluation metric

Source

pub fn set_cross_validation(&mut self, strategy: CrossValidationStrategy)

Set cross-validation strategy

Source

pub fn enable_bootstrap(&mut self, n_samples: usize)

Enable bootstrap confidence intervals

Source

pub fn set_significance_level(&mut self, level: f64)

Set significance level for statistical tests

Source

pub fn evaluate( &mut self, y_true: &ArrayD<F>, y_pred: &ArrayD<F>, model_name: Option<String>, ) -> Result<EvaluationResults<F>>

Evaluate model predictions

Examples found in repository?
examples/neural_advanced_features.rs (lines 131-135)
104fn demonstrate_enhanced_evaluation() -> Result<()> {
105    println!("📊 Enhanced Model Evaluation Demonstration");
106    println!("=========================================\n");
107
108    // Create comprehensive evaluation pipeline
109    let mut evaluator = EvaluationBuilder::<f64>::new()
110        .with_classification_metrics()
111        .with_regression_metrics()
112        .with_cross_validation(CrossValidationStrategy::KFold {
113            k: 5,
114            shuffle: true,
115        })
116        .with_bootstrap(1000)
117        .build();
118
119    // Generate sample predictions and ground truth
120    let y_true_class =
121        Array::from_vec(vec![0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]).into_dyn();
122    let y_pred_class =
123        Array::from_vec(vec![0.1, 0.9, 0.2, 0.8, 0.7, 0.3, 0.9, 0.1, 0.8, 0.2]).into_dyn();
124
125    let y_true_reg =
126        Array::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]).into_dyn();
127    let y_pred_reg =
128        Array::from_vec(vec![1.1, 1.9, 3.2, 3.8, 5.1, 5.9, 7.1, 7.9, 9.2, 9.8]).into_dyn();
129
130    println!("1. Evaluating classification model...");
131    let class_results = evaluator.evaluate(
132        &y_true_class,
133        &y_pred_class,
134        Some("classifier_model".to_string()),
135    )?;
136    println!(
137        "   Classification metrics computed: {}",
138        class_results.scores.len()
139    );
140
141    for (metric, score) in &class_results.scores {
142        println!("   {}: {:.4}", metric, score.value);
143    }
144
145    println!("\n2. Evaluating regression model...");
146    let reg_results = evaluator.evaluate(
147        &y_true_reg,
148        &y_pred_reg,
149        Some("regression_model".to_string()),
150    )?;
151    println!(
152        "   Regression metrics computed: {}",
153        reg_results.scores.len()
154    );
155
156    for (metric, score) in &reg_results.scores {
157        println!("   {}: {:.4}", metric, score.value);
158    }
159
160    // Generate comprehensive report
161    println!("\n3. Comprehensive Evaluation Report:");
162    println!("{}", evaluator.generate_report(&class_results));
163
164    // Compare models
165    println!("4. Statistical Model Comparison:");
166    let comparison = evaluator.compare_models("classifier_model", "regression_model")?;
167    if let Some(t_test) = &comparison.t_test {
168        println!(
169            "   T-test: t={:.3}, p={:.3}, significant={}",
170            t_test.t_statistic, t_test.p_value, t_test.significant
171        );
172    }
173
174    println!("✅ Enhanced evaluation demonstration completed!\n");
175    Ok(())
176}
Source

pub fn compare_models( &mut self, model1_name: &str, model2_name: &str, ) -> Result<StatisticalTestResults<F>>

Compare two models using statistical tests

Examples found in repository?
examples/neural_advanced_features.rs (line 166)
104fn demonstrate_enhanced_evaluation() -> Result<()> {
105    println!("📊 Enhanced Model Evaluation Demonstration");
106    println!("=========================================\n");
107
108    // Create comprehensive evaluation pipeline
109    let mut evaluator = EvaluationBuilder::<f64>::new()
110        .with_classification_metrics()
111        .with_regression_metrics()
112        .with_cross_validation(CrossValidationStrategy::KFold {
113            k: 5,
114            shuffle: true,
115        })
116        .with_bootstrap(1000)
117        .build();
118
119    // Generate sample predictions and ground truth
120    let y_true_class =
121        Array::from_vec(vec![0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]).into_dyn();
122    let y_pred_class =
123        Array::from_vec(vec![0.1, 0.9, 0.2, 0.8, 0.7, 0.3, 0.9, 0.1, 0.8, 0.2]).into_dyn();
124
125    let y_true_reg =
126        Array::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]).into_dyn();
127    let y_pred_reg =
128        Array::from_vec(vec![1.1, 1.9, 3.2, 3.8, 5.1, 5.9, 7.1, 7.9, 9.2, 9.8]).into_dyn();
129
130    println!("1. Evaluating classification model...");
131    let class_results = evaluator.evaluate(
132        &y_true_class,
133        &y_pred_class,
134        Some("classifier_model".to_string()),
135    )?;
136    println!(
137        "   Classification metrics computed: {}",
138        class_results.scores.len()
139    );
140
141    for (metric, score) in &class_results.scores {
142        println!("   {}: {:.4}", metric, score.value);
143    }
144
145    println!("\n2. Evaluating regression model...");
146    let reg_results = evaluator.evaluate(
147        &y_true_reg,
148        &y_pred_reg,
149        Some("regression_model".to_string()),
150    )?;
151    println!(
152        "   Regression metrics computed: {}",
153        reg_results.scores.len()
154    );
155
156    for (metric, score) in &reg_results.scores {
157        println!("   {}: {:.4}", metric, score.value);
158    }
159
160    // Generate comprehensive report
161    println!("\n3. Comprehensive Evaluation Report:");
162    println!("{}", evaluator.generate_report(&class_results));
163
164    // Compare models
165    println!("4. Statistical Model Comparison:");
166    let comparison = evaluator.compare_models("classifier_model", "regression_model")?;
167    if let Some(t_test) = &comparison.t_test {
168        println!(
169            "   T-test: t={:.3}, p={:.3}, significant={}",
170            t_test.t_statistic, t_test.p_value, t_test.significant
171        );
172    }
173
174    println!("✅ Enhanced evaluation demonstration completed!\n");
175    Ok(())
176}
Source

pub fn generate_report(&self, results: &EvaluationResults<F>) -> String

Generate comprehensive evaluation report

Examples found in repository?
examples/neural_advanced_features.rs (line 162)
104fn demonstrate_enhanced_evaluation() -> Result<()> {
105    println!("📊 Enhanced Model Evaluation Demonstration");
106    println!("=========================================\n");
107
108    // Create comprehensive evaluation pipeline
109    let mut evaluator = EvaluationBuilder::<f64>::new()
110        .with_classification_metrics()
111        .with_regression_metrics()
112        .with_cross_validation(CrossValidationStrategy::KFold {
113            k: 5,
114            shuffle: true,
115        })
116        .with_bootstrap(1000)
117        .build();
118
119    // Generate sample predictions and ground truth
120    let y_true_class =
121        Array::from_vec(vec![0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]).into_dyn();
122    let y_pred_class =
123        Array::from_vec(vec![0.1, 0.9, 0.2, 0.8, 0.7, 0.3, 0.9, 0.1, 0.8, 0.2]).into_dyn();
124
125    let y_true_reg =
126        Array::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]).into_dyn();
127    let y_pred_reg =
128        Array::from_vec(vec![1.1, 1.9, 3.2, 3.8, 5.1, 5.9, 7.1, 7.9, 9.2, 9.8]).into_dyn();
129
130    println!("1. Evaluating classification model...");
131    let class_results = evaluator.evaluate(
132        &y_true_class,
133        &y_pred_class,
134        Some("classifier_model".to_string()),
135    )?;
136    println!(
137        "   Classification metrics computed: {}",
138        class_results.scores.len()
139    );
140
141    for (metric, score) in &class_results.scores {
142        println!("   {}: {:.4}", metric, score.value);
143    }
144
145    println!("\n2. Evaluating regression model...");
146    let reg_results = evaluator.evaluate(
147        &y_true_reg,
148        &y_pred_reg,
149        Some("regression_model".to_string()),
150    )?;
151    println!(
152        "   Regression metrics computed: {}",
153        reg_results.scores.len()
154    );
155
156    for (metric, score) in &reg_results.scores {
157        println!("   {}: {:.4}", metric, score.value);
158    }
159
160    // Generate comprehensive report
161    println!("\n3. Comprehensive Evaluation Report:");
162    println!("{}", evaluator.generate_report(&class_results));
163
164    // Compare models
165    println!("4. Statistical Model Comparison:");
166    let comparison = evaluator.compare_models("classifier_model", "regression_model")?;
167    if let Some(t_test) = &comparison.t_test {
168        println!(
169            "   T-test: t={:.3}, p={:.3}, significant={}",
170            t_test.t_statistic, t_test.p_value, t_test.significant
171        );
172    }
173
174    println!("✅ Enhanced evaluation demonstration completed!\n");
175    Ok(())
176}
Source

pub fn get_cached_results( &self, model_name: &str, ) -> Option<&EvaluationResults<F>>

Get cached evaluation results

Source

pub fn clear_cache(&mut self)

Clear results cache

Trait Implementations§

Source§

impl<F: Float + Debug + 'static + Sum + Clone + Copy + FromPrimitive> Default for ModelEvaluator<F>

Source§

fn default() -> Self

Returns the “default value” for a type. Read more

Auto Trait Implementations§

§

impl<F> Freeze for ModelEvaluator<F>

§

impl<F> RefUnwindSafe for ModelEvaluator<F>
where F: RefUnwindSafe,

§

impl<F> Send for ModelEvaluator<F>
where F: Send,

§

impl<F> Sync for ModelEvaluator<F>
where F: Sync,

§

impl<F> Unpin for ModelEvaluator<F>
where F: Unpin,

§

impl<F> UnwindSafe for ModelEvaluator<F>
where F: UnwindSafe,

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V