1#![allow(clippy::field_reassign_with_default)]
3#![allow(clippy::too_many_arguments)]
4#![allow(clippy::upper_case_acronyms)] pub mod benchmarks;
35pub mod config;
36pub mod enhancement;
37pub mod error;
38
39pub mod coherence;
40pub mod ml;
41pub mod quality;
42pub mod report;
43pub mod statistical;
44pub mod tuning;
45
46pub use config::{EvaluationConfig, EvaluationThresholds};
48pub use error::{EvalError, EvalResult};
49
50pub use statistical::{
51 AmountDistributionAnalysis, AmountDistributionAnalyzer, BenfordAnalysis, BenfordAnalyzer,
52 BenfordConformity, LineItemAnalysis, LineItemAnalyzer, LineItemEntry, StatisticalEvaluation,
53 TemporalAnalysis, TemporalAnalyzer, TemporalEntry,
54};
55
56pub use coherence::{
57 AuditTrailEvaluation, AuditTrailGap, BalanceSheetEvaluation, BalanceSheetEvaluator,
58 CoherenceEvaluation, DocumentChainEvaluation, DocumentChainEvaluator, FairValueEvaluation,
59 FrameworkViolation, ICMatchingEvaluation, ICMatchingEvaluator, ImpairmentEvaluation,
60 IsaComplianceEvaluation, LeaseAccountingEvaluation, LeaseAccountingEvaluator, LeaseEvaluation,
61 PcaobComplianceEvaluation, PerformanceObligation, ReferentialIntegrityEvaluation,
62 ReferentialIntegrityEvaluator, RevenueContract, RevenueRecognitionEvaluation,
63 RevenueRecognitionEvaluator, SoxComplianceEvaluation, StandardsComplianceEvaluation,
64 StandardsThresholds, SubledgerEvaluator, SubledgerReconciliationEvaluation,
65 VariableConsideration, ViolationSeverity,
66};
67
68pub use quality::{
69 CompletenessAnalysis, CompletenessAnalyzer, ConsistencyAnalysis, ConsistencyAnalyzer,
70 ConsistencyRule, DuplicateInfo, FieldCompleteness, FormatAnalysis, FormatAnalyzer,
71 FormatVariation, QualityEvaluation, UniquenessAnalysis, UniquenessAnalyzer,
72};
73
74pub use ml::{
75 FeatureAnalysis, FeatureAnalyzer, FeatureStats, GraphAnalysis, GraphAnalyzer, GraphMetrics,
76 LabelAnalysis, LabelAnalyzer, LabelDistribution, MLReadinessEvaluation, SplitAnalysis,
77 SplitAnalyzer, SplitMetrics,
78};
79
80pub use report::{
81 BaselineComparison, ComparisonResult, EvaluationReport, HtmlReportGenerator,
82 JsonReportGenerator, MetricChange, ReportMetadata, ThresholdChecker, ThresholdResult,
83};
84
85pub use tuning::{
86 ConfigSuggestion, ConfigSuggestionGenerator, TuningAnalyzer, TuningCategory, TuningOpportunity,
87};
88
89pub use enhancement::{
90 AutoTuneResult, AutoTuner, ConfigPatch, EnhancementReport, Recommendation,
91 RecommendationCategory, RecommendationEngine, RecommendationPriority, RootCause,
92 SuggestedAction,
93};
94
95pub use benchmarks::{
96 all_benchmarks, anomaly_bench_1k, data_quality_100k, entity_match_5k, fraud_detect_10k,
97 get_benchmark, graph_fraud_10k, BaselineModelType, BaselineResult, BenchmarkBuilder,
98 BenchmarkSuite, BenchmarkTaskType, CostMatrix, DatasetSpec, EvaluationSpec, FeatureSet,
99 LeaderboardEntry, MetricType, SplitRatios,
100};
101
102use serde::{Deserialize, Serialize};
103
104#[derive(Debug, Clone, Serialize, Deserialize)]
106pub struct ComprehensiveEvaluation {
107 pub statistical: StatisticalEvaluation,
109 pub coherence: CoherenceEvaluation,
111 pub quality: QualityEvaluation,
113 pub ml_readiness: MLReadinessEvaluation,
115 pub passes: bool,
117 pub failures: Vec<String>,
119 pub tuning_opportunities: Vec<TuningOpportunity>,
121 pub config_suggestions: Vec<ConfigSuggestion>,
123}
124
125impl ComprehensiveEvaluation {
126 pub fn new() -> Self {
128 Self {
129 statistical: StatisticalEvaluation::default(),
130 coherence: CoherenceEvaluation::default(),
131 quality: QualityEvaluation::default(),
132 ml_readiness: MLReadinessEvaluation::default(),
133 passes: true,
134 failures: Vec::new(),
135 tuning_opportunities: Vec::new(),
136 config_suggestions: Vec::new(),
137 }
138 }
139
140 pub fn check_all_thresholds(&mut self, thresholds: &EvaluationThresholds) {
142 self.failures.clear();
143
144 self.statistical.check_thresholds(thresholds);
146 self.failures.extend(self.statistical.failures.clone());
147
148 self.coherence.check_thresholds(thresholds);
150 self.failures.extend(self.coherence.failures.clone());
151
152 self.quality.check_thresholds(thresholds);
154 self.failures.extend(self.quality.failures.clone());
155
156 self.ml_readiness.check_thresholds(thresholds);
158 self.failures.extend(self.ml_readiness.failures.clone());
159
160 self.passes = self.failures.is_empty();
161 }
162}
163
164impl Default for ComprehensiveEvaluation {
165 fn default() -> Self {
166 Self::new()
167 }
168}
169
170pub struct Evaluator {
172 config: EvaluationConfig,
174}
175
176impl Evaluator {
177 pub fn new(config: EvaluationConfig) -> Self {
179 Self { config }
180 }
181
182 pub fn with_defaults() -> Self {
184 Self::new(EvaluationConfig::default())
185 }
186
187 pub fn config(&self) -> &EvaluationConfig {
189 &self.config
190 }
191
192 pub fn run_evaluation(&self) -> ComprehensiveEvaluation {
197 let mut evaluation = ComprehensiveEvaluation::new();
198 evaluation.check_all_thresholds(&self.config.thresholds);
199 evaluation
200 }
201}
202
203#[cfg(test)]
204mod tests {
205 use super::*;
206
207 #[test]
208 fn test_comprehensive_evaluation_new() {
209 let eval = ComprehensiveEvaluation::new();
210 assert!(eval.passes);
211 assert!(eval.failures.is_empty());
212 }
213
214 #[test]
215 fn test_evaluator_creation() {
216 let evaluator = Evaluator::with_defaults();
217 assert_eq!(evaluator.config().thresholds.benford_p_value_min, 0.05);
218 }
219}