1use scirs2_core::ndarray::{ArrayView1, ArrayView2};
8use sklears_core::error::{Result as SklResult, SklearsError};
9type Result<T> = SklResult<T>;
10
11impl From<QualityError> for SklearsError {
12 fn from(err: QualityError) -> Self {
13 SklearsError::FitError(format!("Quality assessment error: {}", err))
14 }
15}
16use thiserror::Error;
17
18#[derive(Debug, Error)]
19pub enum QualityError {
20 #[error("Feature matrix is empty")]
21 EmptyFeatureMatrix,
22 #[error("Target array is empty")]
23 EmptyTarget,
24 #[error("Feature and target lengths do not match")]
25 LengthMismatch,
26 #[error("Invalid feature indices")]
27 InvalidFeatureIndices,
28 #[error("Insufficient data for quality assessment")]
29 InsufficientData,
30}
31
32#[derive(Debug, Clone)]
34pub struct SelectionQuality {
35 n_features_selected: usize,
36 n_features_total: usize,
37 selection_ratio: f64,
38 feature_efficiency: f64,
39 information_density: f64,
40}
41
42impl SelectionQuality {
43 pub fn new(
45 n_features_selected: usize,
46 n_features_total: usize,
47 feature_efficiency: f64,
48 information_density: f64,
49 ) -> Self {
50 let selection_ratio = if n_features_total > 0 {
51 n_features_selected as f64 / n_features_total as f64
52 } else {
53 0.0
54 };
55
56 Self {
57 n_features_selected,
58 n_features_total,
59 selection_ratio,
60 feature_efficiency,
61 information_density,
62 }
63 }
64
65 pub fn assess(&self) -> QualityAssessmentResult {
67 let compactness_score = self.assess_compactness();
68 let efficiency_score = self.assess_efficiency();
69 let information_score = self.assess_information();
70 let balance_score = self.assess_balance();
71
72 let overall_score =
73 (compactness_score + efficiency_score + information_score + balance_score) / 4.0;
74
75 QualityAssessmentResult {
76 overall_quality_score: overall_score,
77 compactness_score,
78 efficiency_score,
79 information_score,
80 balance_score,
81 selection_ratio: self.selection_ratio,
82 n_features_selected: self.n_features_selected,
83 n_features_total: self.n_features_total,
84 }
85 }
86
87 fn assess_compactness(&self) -> f64 {
89 (1.0 - self.selection_ratio).max(0.0)
91 }
92
93 fn assess_efficiency(&self) -> f64 {
95 self.feature_efficiency.clamp(0.0, 1.0)
96 }
97
98 fn assess_information(&self) -> f64 {
100 self.information_density.clamp(0.0, 1.0)
101 }
102
103 fn assess_balance(&self) -> f64 {
105 let optimal_ratio = 0.2;
107 let deviation = (self.selection_ratio - optimal_ratio).abs();
108 (1.0 - deviation * 2.0).max(0.0)
109 }
110}
111
112#[derive(Debug, Clone)]
114pub struct PredictivePerformance {
115 pub accuracy: f64,
116 pub precision: f64,
117 pub recall: f64,
118 pub f1_score: f64,
119 pub auc_roc: f64,
120}
121
122impl PredictivePerformance {
123 pub fn new(accuracy: f64, precision: f64, recall: f64, f1_score: f64, auc_roc: f64) -> Self {
125 Self {
126 accuracy,
127 precision,
128 recall,
129 f1_score,
130 auc_roc,
131 }
132 }
133
134 pub fn overall_score(&self) -> f64 {
136 (self.accuracy + self.precision + self.recall + self.f1_score + self.auc_roc) / 5.0
137 }
138
139 pub fn assess_quality(&self) -> &'static str {
141 let overall = self.overall_score();
142 match overall {
143 x if x >= 0.9 => "Excellent",
144 x if x >= 0.8 => "Very Good",
145 x if x >= 0.7 => "Good",
146 x if x >= 0.6 => "Acceptable",
147 x if x >= 0.5 => "Poor",
148 _ => "Very Poor",
149 }
150 }
151}
152
153#[derive(Debug, Clone)]
155pub struct ModelComplexity {
156 pub n_features: usize,
157 pub n_parameters: usize,
158 pub training_time: f64,
159 pub prediction_time: f64,
160 pub memory_usage: usize,
161}
162
163impl ModelComplexity {
164 pub fn new(
166 n_features: usize,
167 n_parameters: usize,
168 training_time: f64,
169 prediction_time: f64,
170 memory_usage: usize,
171 ) -> Self {
172 Self {
173 n_features,
174 n_parameters,
175 training_time,
176 prediction_time,
177 memory_usage,
178 }
179 }
180
181 pub fn complexity_score(&self) -> f64 {
183 let normalized_features = (self.n_features as f64 / 1000.0).min(1.0);
184 let normalized_parameters = (self.n_parameters as f64 / 10000.0).min(1.0);
185 let normalized_time = (self.training_time / 3600.0).min(1.0); let normalized_memory = (self.memory_usage as f64 / 1_000_000_000.0).min(1.0); (normalized_features + normalized_parameters + normalized_time + normalized_memory) / 4.0
189 }
190
191 pub fn assess_complexity(&self) -> &'static str {
193 let score = self.complexity_score();
194 match score {
195 x if x >= 0.8 => "Very High Complexity",
196 x if x >= 0.6 => "High Complexity",
197 x if x >= 0.4 => "Moderate Complexity",
198 x if x >= 0.2 => "Low Complexity",
199 _ => "Very Low Complexity",
200 }
201 }
202}
203
204#[derive(Debug, Clone)]
206pub struct InterpretabilityMetrics {
207 pub feature_importance_clarity: f64,
208 pub feature_interaction_complexity: f64,
209 pub model_transparency: f64,
210 pub explanation_quality: f64,
211}
212
213impl InterpretabilityMetrics {
214 pub fn new(
216 feature_importance_clarity: f64,
217 feature_interaction_complexity: f64,
218 model_transparency: f64,
219 explanation_quality: f64,
220 ) -> Self {
221 Self {
222 feature_importance_clarity,
223 feature_interaction_complexity,
224 model_transparency,
225 explanation_quality,
226 }
227 }
228
229 pub fn interpretability_score(&self) -> f64 {
231 let clarity_score = self.feature_importance_clarity;
232 let complexity_score = 1.0 - self.feature_interaction_complexity; let transparency_score = self.model_transparency;
234 let explanation_score = self.explanation_quality;
235
236 (clarity_score + complexity_score + transparency_score + explanation_score) / 4.0
237 }
238
239 pub fn assess_interpretability(&self) -> &'static str {
241 let score = self.interpretability_score();
242 match score {
243 x if x >= 0.8 => "Highly Interpretable",
244 x if x >= 0.6 => "Moderately Interpretable",
245 x if x >= 0.4 => "Somewhat Interpretable",
246 x if x >= 0.2 => "Poorly Interpretable",
247 _ => "Very Poorly Interpretable",
248 }
249 }
250}
251
252#[derive(Debug, Clone)]
254pub struct QualityAssessmentResult {
255 pub overall_quality_score: f64,
256 pub compactness_score: f64,
257 pub efficiency_score: f64,
258 pub information_score: f64,
259 pub balance_score: f64,
260 pub selection_ratio: f64,
261 pub n_features_selected: usize,
262 pub n_features_total: usize,
263}
264
265impl QualityAssessmentResult {
266 pub fn report(&self) -> String {
268 let mut report = String::new();
269
270 report.push_str("=== Feature Selection Quality Assessment ===\n\n");
271
272 report.push_str(&format!(
273 "Features selected: {} out of {} ({:.1}%)\n",
274 self.n_features_selected,
275 self.n_features_total,
276 self.selection_ratio * 100.0
277 ));
278
279 report.push_str("\nQuality Scores (0.0 - 1.0):\n");
280 report.push_str(&format!(
281 " Overall Quality: {:.4} - {}\n",
282 self.overall_quality_score,
283 self.interpret_overall()
284 ));
285 report.push_str(&format!(
286 " Compactness: {:.4} - {}\n",
287 self.compactness_score,
288 self.interpret_compactness()
289 ));
290 report.push_str(&format!(
291 " Efficiency: {:.4} - {}\n",
292 self.efficiency_score,
293 self.interpret_efficiency()
294 ));
295 report.push_str(&format!(
296 " Information Density: {:.4} - {}\n",
297 self.information_score,
298 self.interpret_information()
299 ));
300 report.push_str(&format!(
301 " Balance: {:.4} - {}\n",
302 self.balance_score,
303 self.interpret_balance()
304 ));
305
306 report.push_str("\nRecommendations:\n");
307 report.push_str(&self.generate_recommendations());
308
309 report
310 }
311
312 fn interpret_overall(&self) -> &'static str {
313 match self.overall_quality_score {
314 x if x >= 0.8 => "Excellent",
315 x if x >= 0.6 => "Good",
316 x if x >= 0.4 => "Acceptable",
317 x if x >= 0.2 => "Poor",
318 _ => "Very Poor",
319 }
320 }
321
322 fn interpret_compactness(&self) -> &'static str {
323 match self.compactness_score {
324 x if x >= 0.8 => "Very compact feature set",
325 x if x >= 0.6 => "Reasonably compact",
326 x if x >= 0.4 => "Moderately compact",
327 x if x >= 0.2 => "Not very compact",
328 _ => "Too many features selected",
329 }
330 }
331
332 fn interpret_efficiency(&self) -> &'static str {
333 match self.efficiency_score {
334 x if x >= 0.8 => "Highly efficient features",
335 x if x >= 0.6 => "Good feature efficiency",
336 x if x >= 0.4 => "Moderate efficiency",
337 x if x >= 0.2 => "Low efficiency",
338 _ => "Very low efficiency",
339 }
340 }
341
342 fn interpret_information(&self) -> &'static str {
343 match self.information_score {
344 x if x >= 0.8 => "Very high information density",
345 x if x >= 0.6 => "Good information content",
346 x if x >= 0.4 => "Moderate information",
347 x if x >= 0.2 => "Low information content",
348 _ => "Very low information",
349 }
350 }
351
352 fn interpret_balance(&self) -> &'static str {
353 match self.balance_score {
354 x if x >= 0.8 => "Well-balanced selection",
355 x if x >= 0.6 => "Good balance",
356 x if x >= 0.4 => "Acceptable balance",
357 x if x >= 0.2 => "Poor balance",
358 _ => "Very poor balance",
359 }
360 }
361
362 fn generate_recommendations(&self) -> String {
363 let mut recommendations = String::new();
364
365 if self.compactness_score < 0.5 {
366 recommendations.push_str("- Consider reducing the number of selected features\n");
367 }
368
369 if self.efficiency_score < 0.5 {
370 recommendations.push_str("- Review feature selection criteria to improve efficiency\n");
371 }
372
373 if self.information_score < 0.5 {
374 recommendations.push_str("- Look for features with higher information content\n");
375 }
376
377 if self.balance_score < 0.5 {
378 if self.selection_ratio < 0.1 {
379 recommendations
380 .push_str("- Consider selecting more features for better coverage\n");
381 } else if self.selection_ratio > 0.4 {
382 recommendations
383 .push_str("- Consider selecting fewer features to avoid redundancy\n");
384 }
385 }
386
387 if self.overall_quality_score >= 0.8 {
388 recommendations
389 .push_str("- Feature selection quality is excellent - no major changes needed\n");
390 } else if recommendations.is_empty() {
391 recommendations.push_str("- Overall quality is acceptable but could be improved\n");
392 }
393
394 recommendations
395 }
396}
397
398#[derive(Debug, Clone)]
400pub struct QualityAssessment;
401
402impl QualityAssessment {
403 pub fn assess(
405 X: ArrayView2<f64>,
406 y: ArrayView1<f64>,
407 feature_indices: &[usize],
408 performance: Option<PredictivePerformance>,
409 complexity: Option<ModelComplexity>,
410 interpretability: Option<InterpretabilityMetrics>,
411 ) -> Result<ComprehensiveQualityAssessment> {
412 if X.nrows() != y.len() {
413 return Err(QualityError::LengthMismatch.into());
414 }
415
416 if X.is_empty() || y.is_empty() {
417 return Err(QualityError::EmptyFeatureMatrix.into());
418 }
419
420 let feature_efficiency = Self::compute_feature_efficiency(X, y, feature_indices)?;
422 let information_density = Self::compute_information_density(X, y, feature_indices)?;
423
424 let selection_quality = SelectionQuality::new(
425 feature_indices.len(),
426 X.ncols(),
427 feature_efficiency,
428 information_density,
429 );
430
431 let quality_result = selection_quality.assess();
432
433 Ok(ComprehensiveQualityAssessment {
434 selection_quality: quality_result,
435 predictive_performance: performance,
436 model_complexity: complexity,
437 interpretability_metrics: interpretability,
438 })
439 }
440
441 fn compute_feature_efficiency(
443 X: ArrayView2<f64>,
444 y: ArrayView1<f64>,
445 feature_indices: &[usize],
446 ) -> Result<f64> {
447 if feature_indices.is_empty() {
448 return Ok(0.0);
449 }
450
451 let mut total_efficiency = 0.0;
452
453 for &feature_idx in feature_indices {
454 if feature_idx >= X.ncols() {
455 return Err(QualityError::InvalidFeatureIndices.into());
456 }
457
458 let feature_column = X.column(feature_idx);
459
460 let signal = Self::compute_signal_strength(feature_column, y)?;
462 let noise = Self::compute_noise_level(feature_column)?;
463
464 let efficiency = if noise > 1e-10 {
465 signal / noise
466 } else {
467 signal
468 };
469
470 total_efficiency += efficiency.min(1.0);
471 }
472
473 Ok(total_efficiency / feature_indices.len() as f64)
474 }
475
476 fn compute_signal_strength(feature: ArrayView1<f64>, target: ArrayView1<f64>) -> Result<f64> {
478 let n = feature.len() as f64;
479 if n < 2.0 {
480 return Ok(0.0);
481 }
482
483 let mean_x = feature.mean().unwrap_or(0.0);
484 let mean_y = target.mean().unwrap_or(0.0);
485
486 let mut sum_xy = 0.0;
487 let mut sum_x2 = 0.0;
488 let mut sum_y2 = 0.0;
489
490 for i in 0..feature.len() {
491 let dx = feature[i] - mean_x;
492 let dy = target[i] - mean_y;
493 sum_xy += dx * dy;
494 sum_x2 += dx * dx;
495 sum_y2 += dy * dy;
496 }
497
498 let denom = (sum_x2 * sum_y2).sqrt();
499 if denom < 1e-10 {
500 return Ok(0.0);
501 }
502
503 Ok((sum_xy / denom).abs())
504 }
505
506 fn compute_noise_level(feature: ArrayView1<f64>) -> Result<f64> {
508 let mean = feature.mean().unwrap_or(0.0);
509
510 if mean.abs() < 1e-10 {
511 return Ok(1.0); }
513
514 let variance = feature.var(1.0);
515 let std_dev = variance.sqrt();
516
517 Ok(std_dev / mean.abs())
518 }
519
520 fn compute_information_density(
522 X: ArrayView2<f64>,
523 _y: ArrayView1<f64>,
524 feature_indices: &[usize],
525 ) -> Result<f64> {
526 if feature_indices.is_empty() {
527 return Ok(0.0);
528 }
529
530 let mut total_density = 0.0;
531
532 for &feature_idx in feature_indices {
533 if feature_idx >= X.ncols() {
534 return Err(QualityError::InvalidFeatureIndices.into());
535 }
536
537 let feature_column = X.column(feature_idx);
538 let density = Self::compute_feature_entropy(feature_column)?;
539 total_density += density;
540 }
541
542 let max_entropy = (X.nrows() as f64).ln();
544 Ok((total_density / feature_indices.len() as f64) / max_entropy.max(1.0))
545 }
546
547 fn compute_feature_entropy(feature: ArrayView1<f64>) -> Result<f64> {
549 let n_bins = 10.min(feature.len());
550 if n_bins <= 1 {
551 return Ok(0.0);
552 }
553
554 let min_val = feature.iter().fold(f64::INFINITY, |acc, &x| acc.min(x));
555 let max_val = feature.iter().fold(f64::NEG_INFINITY, |acc, &x| acc.max(x));
556
557 if (max_val - min_val).abs() < 1e-10 {
558 return Ok(0.0); }
560
561 let bin_width = (max_val - min_val) / n_bins as f64;
562 let mut bin_counts = vec![0; n_bins];
563
564 for &value in feature.iter() {
565 let bin = ((value - min_val) / bin_width).floor() as usize;
566 let bin = bin.min(n_bins - 1);
567 bin_counts[bin] += 1;
568 }
569
570 let total = feature.len() as f64;
571 let mut entropy = 0.0;
572
573 for count in bin_counts {
574 if count > 0 {
575 let probability = count as f64 / total;
576 entropy -= probability * probability.ln();
577 }
578 }
579
580 Ok(entropy)
581 }
582}
583
584#[derive(Debug, Clone)]
586pub struct ComprehensiveQualityAssessment {
587 pub selection_quality: QualityAssessmentResult,
588 pub predictive_performance: Option<PredictivePerformance>,
589 pub model_complexity: Option<ModelComplexity>,
590 pub interpretability_metrics: Option<InterpretabilityMetrics>,
591}
592
593impl ComprehensiveQualityAssessment {
594 pub fn report(&self) -> String {
596 let mut report = String::new();
597
598 report.push_str("=== Comprehensive Feature Selection Quality Assessment ===\n\n");
599
600 report.push_str(&self.selection_quality.report());
602
603 if let Some(ref performance) = self.predictive_performance {
605 report.push_str("\n=== Predictive Performance ===\n");
606 report.push_str(&format!(
607 "Overall Performance: {:.4} ({})\n",
608 performance.overall_score(),
609 performance.assess_quality()
610 ));
611 report.push_str(&format!(" Accuracy: {:.4}\n", performance.accuracy));
612 report.push_str(&format!(" Precision: {:.4}\n", performance.precision));
613 report.push_str(&format!(" Recall: {:.4}\n", performance.recall));
614 report.push_str(&format!(" F1 Score: {:.4}\n", performance.f1_score));
615 report.push_str(&format!(" AUC-ROC: {:.4}\n", performance.auc_roc));
616 }
617
618 if let Some(ref complexity) = self.model_complexity {
620 report.push_str("\n=== Model Complexity ===\n");
621 report.push_str(&format!(
622 "Complexity Level: {} (Score: {:.4})\n",
623 complexity.assess_complexity(),
624 complexity.complexity_score()
625 ));
626 report.push_str(&format!(" Features: {}\n", complexity.n_features));
627 report.push_str(&format!(
628 " Parameters: {}\n",
629 complexity.n_parameters
630 ));
631 report.push_str(&format!(
632 " Training Time: {:.2}s\n",
633 complexity.training_time
634 ));
635 report.push_str(&format!(
636 " Prediction Time: {:.6}s\n",
637 complexity.prediction_time
638 ));
639 report.push_str(&format!(
640 " Memory Usage: {} bytes\n",
641 complexity.memory_usage
642 ));
643 }
644
645 if let Some(ref interpretability) = self.interpretability_metrics {
647 report.push_str("\n=== Interpretability ===\n");
648 report.push_str(&format!(
649 "Interpretability Level: {} (Score: {:.4})\n",
650 interpretability.assess_interpretability(),
651 interpretability.interpretability_score()
652 ));
653 report.push_str(&format!(
654 " Feature Importance Clarity: {:.4}\n",
655 interpretability.feature_importance_clarity
656 ));
657 report.push_str(&format!(
658 " Feature Interaction Complexity: {:.4}\n",
659 interpretability.feature_interaction_complexity
660 ));
661 report.push_str(&format!(
662 " Model Transparency: {:.4}\n",
663 interpretability.model_transparency
664 ));
665 report.push_str(&format!(
666 " Explanation Quality: {:.4}\n",
667 interpretability.explanation_quality
668 ));
669 }
670
671 report.push_str(&format!("\n{}\n", self.overall_recommendation()));
672
673 report
674 }
675
676 fn overall_recommendation(&self) -> String {
678 let mut recommendation = String::new();
679 recommendation.push_str("=== Overall Recommendation ===\n");
680
681 let quality_score = self.selection_quality.overall_quality_score;
682 let performance_score = self
683 .predictive_performance
684 .as_ref()
685 .map(|p| p.overall_score())
686 .unwrap_or(0.5);
687 let complexity_score = 1.0
688 - self
689 .model_complexity
690 .as_ref()
691 .map(|c| c.complexity_score())
692 .unwrap_or(0.5);
693 let interpretability_score = self
694 .interpretability_metrics
695 .as_ref()
696 .map(|i| i.interpretability_score())
697 .unwrap_or(0.5);
698
699 let overall_score =
700 (quality_score + performance_score + complexity_score + interpretability_score) / 4.0;
701
702 match overall_score {
703 x if x >= 0.8 => recommendation
704 .push_str("EXCELLENT: Feature selection is of high quality across all dimensions"),
705 x if x >= 0.6 => recommendation
706 .push_str("GOOD: Feature selection is solid with minor room for improvement"),
707 x if x >= 0.4 => recommendation.push_str(
708 "ACCEPTABLE: Feature selection meets basic requirements but could be improved",
709 ),
710 x if x >= 0.2 => {
711 recommendation.push_str("POOR: Feature selection needs significant improvement")
712 }
713 _ => recommendation.push_str("CRITICAL: Feature selection requires major revision"),
714 }
715
716 recommendation
717 }
718}
719
720#[allow(non_snake_case)]
721#[cfg(test)]
722mod tests {
723 use super::*;
724 use scirs2_core::ndarray::array;
725
726 #[test]
727 fn test_selection_quality() {
728 let quality = SelectionQuality::new(5, 20, 0.8, 0.7);
729 let result = quality.assess();
730
731 assert!(result.overall_quality_score >= 0.0 && result.overall_quality_score <= 1.0);
732 assert_eq!(result.n_features_selected, 5);
733 assert_eq!(result.n_features_total, 20);
734 assert_eq!(result.selection_ratio, 0.25);
735 }
736
737 #[test]
738 fn test_predictive_performance() {
739 let performance = PredictivePerformance::new(0.85, 0.80, 0.90, 0.85, 0.88);
740 assert!((performance.overall_score() - 0.856).abs() < 0.01);
741 assert_eq!(performance.assess_quality(), "Very Good");
742 }
743
744 #[test]
745 fn test_model_complexity() {
746 let complexity = ModelComplexity::new(10, 100, 30.0, 0.01, 1000000);
747 let score = complexity.complexity_score();
748 assert!(score >= 0.0 && score <= 1.0);
749 assert!(complexity.assess_complexity().len() > 0);
750 }
751
752 #[test]
753 fn test_interpretability_metrics() {
754 let interpretability = InterpretabilityMetrics::new(0.8, 0.3, 0.9, 0.7);
755 let score = interpretability.interpretability_score();
756 assert!(score >= 0.0 && score <= 1.0);
757 assert!(interpretability.assess_interpretability().len() > 0);
758 }
759
760 #[test]
761 #[allow(non_snake_case)]
762 fn test_quality_assessment() {
763 let X = array![
764 [1.0, 2.0, 3.0, 4.0],
765 [2.0, 3.0, 4.0, 5.0],
766 [3.0, 4.0, 5.0, 6.0],
767 [4.0, 5.0, 6.0, 7.0],
768 [5.0, 6.0, 7.0, 8.0],
769 ];
770 let y = array![0.0, 0.0, 1.0, 1.0, 1.0];
771
772 let feature_indices = vec![0, 2];
773
774 let performance = PredictivePerformance::new(0.8, 0.75, 0.85, 0.8, 0.82);
775 let complexity = ModelComplexity::new(2, 20, 10.0, 0.001, 100000);
776 let interpretability = InterpretabilityMetrics::new(0.9, 0.2, 0.8, 0.85);
777
778 let assessment = QualityAssessment::assess(
779 X.view(),
780 y.view(),
781 &feature_indices,
782 Some(performance),
783 Some(complexity),
784 Some(interpretability),
785 )
786 .unwrap();
787
788 assert!(assessment.selection_quality.overall_quality_score >= 0.0);
789 assert!(assessment.predictive_performance.is_some());
790 assert!(assessment.model_complexity.is_some());
791 assert!(assessment.interpretability_metrics.is_some());
792
793 let report = assessment.report();
794 assert!(report.contains("Comprehensive"));
795 assert!(report.contains("Recommendation"));
796 }
797
798 #[test]
799 #[allow(non_snake_case)]
800 fn test_quality_assessment_basic() {
801 let X = array![[1.0, 2.0], [2.0, 3.0], [3.0, 4.0],];
802 let y = array![0.0, 1.0, 1.0];
803
804 let feature_indices = vec![0];
805
806 let assessment =
807 QualityAssessment::assess(X.view(), y.view(), &feature_indices, None, None, None)
808 .unwrap();
809
810 assert!(assessment.selection_quality.overall_quality_score >= 0.0);
811 assert!(assessment.predictive_performance.is_none());
812 assert!(assessment.model_complexity.is_none());
813 assert!(assessment.interpretability_metrics.is_none());
814
815 let report = assessment.report();
816 assert!(report.contains("Quality Assessment"));
817 }
818}