pub struct ModelEvaluator<F: Float + Debug + 'static + Sum + Clone + Copy + FromPrimitive> { /* private fields */ }
Expand description
Enhanced model evaluator
Implementations§
Source§impl<F: Float + Debug + 'static + Sum + Clone + Copy + FromPrimitive> ModelEvaluator<F>
impl<F: Float + Debug + 'static + Sum + Clone + Copy + FromPrimitive> ModelEvaluator<F>
Sourcepub fn add_metric(&mut self, metric: EvaluationMetric)
pub fn add_metric(&mut self, metric: EvaluationMetric)
Add evaluation metric
Sourcepub fn set_cross_validation(&mut self, strategy: CrossValidationStrategy)
pub fn set_cross_validation(&mut self, strategy: CrossValidationStrategy)
Set cross-validation strategy
Sourcepub fn enable_bootstrap(&mut self, n_samples: usize)
pub fn enable_bootstrap(&mut self, n_samples: usize)
Enable bootstrap confidence intervals
Sourcepub fn set_significance_level(&mut self, level: f64)
pub fn set_significance_level(&mut self, level: f64)
Set significance level for statistical tests
Sourcepub fn evaluate(
&mut self,
y_true: &ArrayD<F>,
y_pred: &ArrayD<F>,
model_name: Option<String>,
) -> Result<EvaluationResults<F>>
pub fn evaluate( &mut self, y_true: &ArrayD<F>, y_pred: &ArrayD<F>, model_name: Option<String>, ) -> Result<EvaluationResults<F>>
Evaluate model predictions
Examples found in repository?
examples/neural_advanced_features.rs (lines 131-135)
104fn demonstrate_enhanced_evaluation() -> Result<()> {
105 println!("📊 Enhanced Model Evaluation Demonstration");
106 println!("=========================================\n");
107
108 // Create comprehensive evaluation pipeline
109 let mut evaluator = EvaluationBuilder::<f64>::new()
110 .with_classification_metrics()
111 .with_regression_metrics()
112 .with_cross_validation(CrossValidationStrategy::KFold {
113 k: 5,
114 shuffle: true,
115 })
116 .with_bootstrap(1000)
117 .build();
118
119 // Generate sample predictions and ground truth
120 let y_true_class =
121 Array::from_vec(vec![0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]).into_dyn();
122 let y_pred_class =
123 Array::from_vec(vec![0.1, 0.9, 0.2, 0.8, 0.7, 0.3, 0.9, 0.1, 0.8, 0.2]).into_dyn();
124
125 let y_true_reg =
126 Array::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]).into_dyn();
127 let y_pred_reg =
128 Array::from_vec(vec![1.1, 1.9, 3.2, 3.8, 5.1, 5.9, 7.1, 7.9, 9.2, 9.8]).into_dyn();
129
130 println!("1. Evaluating classification model...");
131 let class_results = evaluator.evaluate(
132 &y_true_class,
133 &y_pred_class,
134 Some("classifier_model".to_string()),
135 )?;
136 println!(
137 " Classification metrics computed: {}",
138 class_results.scores.len()
139 );
140
141 for (metric, score) in &class_results.scores {
142 println!(" {}: {:.4}", metric, score.value);
143 }
144
145 println!("\n2. Evaluating regression model...");
146 let reg_results = evaluator.evaluate(
147 &y_true_reg,
148 &y_pred_reg,
149 Some("regression_model".to_string()),
150 )?;
151 println!(
152 " Regression metrics computed: {}",
153 reg_results.scores.len()
154 );
155
156 for (metric, score) in ®_results.scores {
157 println!(" {}: {:.4}", metric, score.value);
158 }
159
160 // Generate comprehensive report
161 println!("\n3. Comprehensive Evaluation Report:");
162 println!("{}", evaluator.generate_report(&class_results));
163
164 // Compare models
165 println!("4. Statistical Model Comparison:");
166 let comparison = evaluator.compare_models("classifier_model", "regression_model")?;
167 if let Some(t_test) = &comparison.t_test {
168 println!(
169 " T-test: t={:.3}, p={:.3}, significant={}",
170 t_test.t_statistic, t_test.p_value, t_test.significant
171 );
172 }
173
174 println!("✅ Enhanced evaluation demonstration completed!\n");
175 Ok(())
176}
Sourcepub fn compare_models(
&mut self,
model1_name: &str,
model2_name: &str,
) -> Result<StatisticalTestResults<F>>
pub fn compare_models( &mut self, model1_name: &str, model2_name: &str, ) -> Result<StatisticalTestResults<F>>
Compare two models using statistical tests
Examples found in repository?
examples/neural_advanced_features.rs (line 166)
104fn demonstrate_enhanced_evaluation() -> Result<()> {
105 println!("📊 Enhanced Model Evaluation Demonstration");
106 println!("=========================================\n");
107
108 // Create comprehensive evaluation pipeline
109 let mut evaluator = EvaluationBuilder::<f64>::new()
110 .with_classification_metrics()
111 .with_regression_metrics()
112 .with_cross_validation(CrossValidationStrategy::KFold {
113 k: 5,
114 shuffle: true,
115 })
116 .with_bootstrap(1000)
117 .build();
118
119 // Generate sample predictions and ground truth
120 let y_true_class =
121 Array::from_vec(vec![0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]).into_dyn();
122 let y_pred_class =
123 Array::from_vec(vec![0.1, 0.9, 0.2, 0.8, 0.7, 0.3, 0.9, 0.1, 0.8, 0.2]).into_dyn();
124
125 let y_true_reg =
126 Array::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]).into_dyn();
127 let y_pred_reg =
128 Array::from_vec(vec![1.1, 1.9, 3.2, 3.8, 5.1, 5.9, 7.1, 7.9, 9.2, 9.8]).into_dyn();
129
130 println!("1. Evaluating classification model...");
131 let class_results = evaluator.evaluate(
132 &y_true_class,
133 &y_pred_class,
134 Some("classifier_model".to_string()),
135 )?;
136 println!(
137 " Classification metrics computed: {}",
138 class_results.scores.len()
139 );
140
141 for (metric, score) in &class_results.scores {
142 println!(" {}: {:.4}", metric, score.value);
143 }
144
145 println!("\n2. Evaluating regression model...");
146 let reg_results = evaluator.evaluate(
147 &y_true_reg,
148 &y_pred_reg,
149 Some("regression_model".to_string()),
150 )?;
151 println!(
152 " Regression metrics computed: {}",
153 reg_results.scores.len()
154 );
155
156 for (metric, score) in ®_results.scores {
157 println!(" {}: {:.4}", metric, score.value);
158 }
159
160 // Generate comprehensive report
161 println!("\n3. Comprehensive Evaluation Report:");
162 println!("{}", evaluator.generate_report(&class_results));
163
164 // Compare models
165 println!("4. Statistical Model Comparison:");
166 let comparison = evaluator.compare_models("classifier_model", "regression_model")?;
167 if let Some(t_test) = &comparison.t_test {
168 println!(
169 " T-test: t={:.3}, p={:.3}, significant={}",
170 t_test.t_statistic, t_test.p_value, t_test.significant
171 );
172 }
173
174 println!("✅ Enhanced evaluation demonstration completed!\n");
175 Ok(())
176}
Sourcepub fn generate_report(&self, results: &EvaluationResults<F>) -> String
pub fn generate_report(&self, results: &EvaluationResults<F>) -> String
Generate comprehensive evaluation report
Examples found in repository?
examples/neural_advanced_features.rs (line 162)
104fn demonstrate_enhanced_evaluation() -> Result<()> {
105 println!("📊 Enhanced Model Evaluation Demonstration");
106 println!("=========================================\n");
107
108 // Create comprehensive evaluation pipeline
109 let mut evaluator = EvaluationBuilder::<f64>::new()
110 .with_classification_metrics()
111 .with_regression_metrics()
112 .with_cross_validation(CrossValidationStrategy::KFold {
113 k: 5,
114 shuffle: true,
115 })
116 .with_bootstrap(1000)
117 .build();
118
119 // Generate sample predictions and ground truth
120 let y_true_class =
121 Array::from_vec(vec![0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]).into_dyn();
122 let y_pred_class =
123 Array::from_vec(vec![0.1, 0.9, 0.2, 0.8, 0.7, 0.3, 0.9, 0.1, 0.8, 0.2]).into_dyn();
124
125 let y_true_reg =
126 Array::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]).into_dyn();
127 let y_pred_reg =
128 Array::from_vec(vec![1.1, 1.9, 3.2, 3.8, 5.1, 5.9, 7.1, 7.9, 9.2, 9.8]).into_dyn();
129
130 println!("1. Evaluating classification model...");
131 let class_results = evaluator.evaluate(
132 &y_true_class,
133 &y_pred_class,
134 Some("classifier_model".to_string()),
135 )?;
136 println!(
137 " Classification metrics computed: {}",
138 class_results.scores.len()
139 );
140
141 for (metric, score) in &class_results.scores {
142 println!(" {}: {:.4}", metric, score.value);
143 }
144
145 println!("\n2. Evaluating regression model...");
146 let reg_results = evaluator.evaluate(
147 &y_true_reg,
148 &y_pred_reg,
149 Some("regression_model".to_string()),
150 )?;
151 println!(
152 " Regression metrics computed: {}",
153 reg_results.scores.len()
154 );
155
156 for (metric, score) in ®_results.scores {
157 println!(" {}: {:.4}", metric, score.value);
158 }
159
160 // Generate comprehensive report
161 println!("\n3. Comprehensive Evaluation Report:");
162 println!("{}", evaluator.generate_report(&class_results));
163
164 // Compare models
165 println!("4. Statistical Model Comparison:");
166 let comparison = evaluator.compare_models("classifier_model", "regression_model")?;
167 if let Some(t_test) = &comparison.t_test {
168 println!(
169 " T-test: t={:.3}, p={:.3}, significant={}",
170 t_test.t_statistic, t_test.p_value, t_test.significant
171 );
172 }
173
174 println!("✅ Enhanced evaluation demonstration completed!\n");
175 Ok(())
176}
Sourcepub fn get_cached_results(
&self,
model_name: &str,
) -> Option<&EvaluationResults<F>>
pub fn get_cached_results( &self, model_name: &str, ) -> Option<&EvaluationResults<F>>
Get cached evaluation results
Sourcepub fn clear_cache(&mut self)
pub fn clear_cache(&mut self)
Clear results cache
Trait Implementations§
Auto Trait Implementations§
impl<F> Freeze for ModelEvaluator<F>
impl<F> RefUnwindSafe for ModelEvaluator<F>where
F: RefUnwindSafe,
impl<F> Send for ModelEvaluator<F>where
F: Send,
impl<F> Sync for ModelEvaluator<F>where
F: Sync,
impl<F> Unpin for ModelEvaluator<F>where
F: Unpin,
impl<F> UnwindSafe for ModelEvaluator<F>where
F: UnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more