Skip to main content

datasynth_eval/ml/
mod.rs

1//! ML-readiness evaluation module.
2//!
3//! Validates that generated data is suitable for machine learning tasks
4//! including feature distributions, label quality, and graph structure.
5//!
6//! Also provides baseline task definitions for benchmarking synthetic data.
7
8mod anomaly_scoring;
9mod baselines;
10mod cross_modal;
11mod domain_gap;
12mod embedding_readiness;
13mod feature_quality;
14mod features;
15mod gnn_readiness;
16mod graph;
17mod labels;
18mod scheme_detectability;
19mod splits;
20mod temporal_fidelity;
21
22pub use anomaly_scoring::{
23    AnomalyScoringAnalysis, AnomalyScoringAnalyzer, AnomalyScoringThresholds, ScoredRecord,
24};
25pub use baselines::{
26    get_accounting_baseline_tasks, BaselineAlgorithm, BaselineConfig, BaselineEvaluation,
27    BaselineResult, BaselineSummary, BaselineTask, ClassificationMetrics, ExpectedMetrics,
28    MLTaskType, PerformanceGrade, RankingMetrics, RegressionMetrics,
29};
30pub use cross_modal::{
31    CrossModalAnalysis, CrossModalAnalyzer, CrossModalThresholds, EntityModalData,
32};
33pub use domain_gap::{
34    DistributionSample, DomainGapAnalysis, DomainGapAnalyzer, DomainGapDetail, DomainGapThresholds,
35};
36pub use embedding_readiness::{
37    EmbeddingInput, EmbeddingReadinessAnalysis, EmbeddingReadinessAnalyzer,
38    EmbeddingReadinessThresholds,
39};
40pub use feature_quality::{
41    FeatureQualityAnalysis, FeatureQualityAnalyzer, FeatureQualityThresholds, FeatureVector,
42};
43pub use features::{FeatureAnalysis, FeatureAnalyzer, FeatureStats};
44pub use gnn_readiness::GraphData as GnnGraphData;
45pub use gnn_readiness::{GnnReadinessAnalysis, GnnReadinessAnalyzer, GnnReadinessThresholds};
46pub use graph::{GraphAnalysis, GraphAnalyzer, GraphMetrics};
47pub use labels::{LabelAnalysis, LabelAnalyzer, LabelDistribution};
48pub use scheme_detectability::{
49    SchemeDetectabilityAnalysis, SchemeDetectabilityAnalyzer, SchemeDetectabilityThresholds,
50    SchemeRecord,
51};
52pub use splits::{SplitAnalysis, SplitAnalyzer, SplitMetrics};
53pub use temporal_fidelity::{
54    TemporalFidelityAnalysis, TemporalFidelityAnalyzer, TemporalFidelityThresholds, TemporalRecord,
55};
56
57use serde::{Deserialize, Serialize};
58
59/// Combined ML-readiness evaluation results.
60#[derive(Debug, Clone, Serialize, Deserialize)]
61pub struct MLReadinessEvaluation {
62    /// Feature distribution analysis.
63    pub features: Option<FeatureAnalysis>,
64    /// Label quality analysis.
65    pub labels: Option<LabelAnalysis>,
66    /// Train/test split analysis.
67    pub splits: Option<SplitAnalysis>,
68    /// Graph structure analysis.
69    pub graph: Option<GraphAnalysis>,
70    /// Anomaly scoring analysis.
71    #[serde(default, skip_serializing_if = "Option::is_none")]
72    pub anomaly_scoring: Option<AnomalyScoringAnalysis>,
73    /// Feature quality analysis.
74    #[serde(default, skip_serializing_if = "Option::is_none")]
75    pub feature_quality: Option<FeatureQualityAnalysis>,
76    /// GNN readiness analysis.
77    #[serde(default, skip_serializing_if = "Option::is_none")]
78    pub gnn_readiness: Option<GnnReadinessAnalysis>,
79    /// Domain gap analysis.
80    #[serde(default, skip_serializing_if = "Option::is_none")]
81    pub domain_gap: Option<DomainGapAnalysis>,
82    /// Temporal fidelity analysis.
83    #[serde(default, skip_serializing_if = "Option::is_none")]
84    pub temporal_fidelity: Option<TemporalFidelityAnalysis>,
85    /// Scheme detectability analysis.
86    #[serde(default, skip_serializing_if = "Option::is_none")]
87    pub scheme_detectability: Option<SchemeDetectabilityAnalysis>,
88    /// Cross-modal consistency analysis.
89    #[serde(default, skip_serializing_if = "Option::is_none")]
90    pub cross_modal: Option<CrossModalAnalysis>,
91    /// Embedding readiness analysis.
92    #[serde(default, skip_serializing_if = "Option::is_none")]
93    pub embedding_readiness: Option<EmbeddingReadinessAnalysis>,
94    /// Overall ML-readiness score (0.0-1.0).
95    pub overall_score: f64,
96    /// Whether data meets ML-readiness criteria.
97    pub passes: bool,
98    /// ML-readiness issues found.
99    pub issues: Vec<String>,
100    /// ML-readiness failures (alias for issues).
101    pub failures: Vec<String>,
102}
103
104impl MLReadinessEvaluation {
105    /// Create a new empty evaluation.
106    pub fn new() -> Self {
107        Self {
108            features: None,
109            labels: None,
110            splits: None,
111            graph: None,
112            anomaly_scoring: None,
113            feature_quality: None,
114            gnn_readiness: None,
115            domain_gap: None,
116            temporal_fidelity: None,
117            scheme_detectability: None,
118            cross_modal: None,
119            embedding_readiness: None,
120            overall_score: 1.0,
121            passes: true,
122            issues: Vec::new(),
123            failures: Vec::new(),
124        }
125    }
126
127    /// Check all results against thresholds.
128    pub fn check_thresholds(&mut self, thresholds: &crate::config::EvaluationThresholds) {
129        self.issues.clear();
130        self.failures.clear();
131        let mut scores = Vec::new();
132
133        if let Some(ref labels) = self.labels {
134            // Check anomaly rate is within expected range
135            if labels.anomaly_rate < thresholds.anomaly_rate_min {
136                self.issues.push(format!(
137                    "Anomaly rate {} < {} (min threshold)",
138                    labels.anomaly_rate, thresholds.anomaly_rate_min
139                ));
140            }
141            if labels.anomaly_rate > thresholds.anomaly_rate_max {
142                self.issues.push(format!(
143                    "Anomaly rate {} > {} (max threshold)",
144                    labels.anomaly_rate, thresholds.anomaly_rate_max
145                ));
146            }
147
148            // Check label coverage
149            if labels.label_coverage < thresholds.label_coverage_min {
150                self.issues.push(format!(
151                    "Label coverage {} < {} (threshold)",
152                    labels.label_coverage, thresholds.label_coverage_min
153                ));
154            }
155
156            scores.push(labels.quality_score);
157        }
158
159        if let Some(ref splits) = self.splits {
160            if !splits.is_valid {
161                self.issues
162                    .push("Train/test split validation failed".to_string());
163            }
164            scores.push(if splits.is_valid { 1.0 } else { 0.0 });
165        }
166
167        if let Some(ref graph) = self.graph {
168            if graph.connectivity_score < thresholds.graph_connectivity_min {
169                self.issues.push(format!(
170                    "Graph connectivity {} < {} (threshold)",
171                    graph.connectivity_score, thresholds.graph_connectivity_min
172                ));
173            }
174            scores.push(graph.connectivity_score);
175        }
176
177        if let Some(ref features) = self.features {
178            scores.push(features.quality_score);
179        }
180
181        // New ML enrichment evaluators
182        if let Some(ref as_eval) = self.anomaly_scoring {
183            if !as_eval.passes {
184                self.issues.extend(as_eval.issues.clone());
185            }
186            scores.push(as_eval.anomaly_separability);
187        }
188        if let Some(ref fq_eval) = self.feature_quality {
189            if !fq_eval.passes {
190                self.issues.extend(fq_eval.issues.clone());
191            }
192            scores.push(fq_eval.feature_quality_score);
193        }
194        if let Some(ref gnn_eval) = self.gnn_readiness {
195            if !gnn_eval.passes {
196                self.issues.extend(gnn_eval.issues.clone());
197            }
198            scores.push(gnn_eval.gnn_readiness_score);
199        }
200        if let Some(ref dg_eval) = self.domain_gap {
201            if !dg_eval.passes {
202                self.issues.extend(dg_eval.issues.clone());
203            }
204            // Domain gap is inverted: lower = better, so score = 1 - gap
205            scores.push(1.0 - dg_eval.domain_gap_score);
206        }
207        if let Some(ref tf_eval) = self.temporal_fidelity {
208            if !tf_eval.passes {
209                self.issues.extend(tf_eval.issues.clone());
210            }
211            scores.push(tf_eval.temporal_fidelity_score);
212        }
213        if let Some(ref sd_eval) = self.scheme_detectability {
214            if !sd_eval.passes {
215                self.issues.extend(sd_eval.issues.clone());
216            }
217            scores.push(sd_eval.detectability_score);
218        }
219        if let Some(ref cm_eval) = self.cross_modal {
220            if !cm_eval.passes {
221                self.issues.extend(cm_eval.issues.clone());
222            }
223            scores.push(cm_eval.consistency_score);
224        }
225        if let Some(ref er_eval) = self.embedding_readiness {
226            if !er_eval.passes {
227                self.issues.extend(er_eval.issues.clone());
228            }
229            scores.push(er_eval.embedding_readiness_score);
230        }
231
232        self.overall_score = if scores.is_empty() {
233            1.0
234        } else {
235            scores.iter().sum::<f64>() / scores.len() as f64
236        };
237
238        // Sync failures with issues
239        self.failures = self.issues.clone();
240        self.passes = self.issues.is_empty();
241    }
242}
243
244impl Default for MLReadinessEvaluation {
245    fn default() -> Self {
246        Self::new()
247    }
248}