1mod comparison;
7mod html;
8mod json;
9mod thresholds;
10
11pub use comparison::{BaselineComparison, ComparisonResult, MetricChange};
12pub use html::HtmlReportGenerator;
13pub use json::JsonReportGenerator;
14pub use thresholds::{ThresholdChecker, ThresholdResult};
15
16use crate::coherence::CoherenceEvaluation;
17use crate::ml::MLReadinessEvaluation;
18use crate::quality::QualityEvaluation;
19use crate::statistical::StatisticalEvaluation;
20use chrono::{DateTime, Utc};
21use serde::{Deserialize, Serialize};
22
23#[derive(Debug, Clone, Serialize, Deserialize)]
25pub struct EvaluationReport {
26 pub metadata: ReportMetadata,
28 pub statistical: Option<StatisticalEvaluation>,
30 pub coherence: Option<CoherenceEvaluation>,
32 pub quality: Option<QualityEvaluation>,
34 pub ml_readiness: Option<MLReadinessEvaluation>,
36 pub passes: bool,
38 pub all_issues: Vec<ReportIssue>,
40 pub overall_score: f64,
42 pub baseline_comparison: Option<BaselineComparison>,
44}
45
46#[derive(Debug, Clone, Serialize, Deserialize)]
48pub struct ReportMetadata {
49 pub generated_at: DateTime<Utc>,
51 pub version: String,
53 pub data_source: String,
55 pub thresholds_name: String,
57 pub records_evaluated: usize,
59 pub duration_ms: u64,
61}
62
63#[derive(Debug, Clone, Serialize, Deserialize)]
65pub struct ReportIssue {
66 pub category: IssueCategory,
68 pub severity: IssueSeverity,
70 pub description: String,
72 pub metric: Option<String>,
74 pub actual_value: Option<String>,
76 pub threshold_value: Option<String>,
78}
79
80#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
82pub enum IssueCategory {
83 Statistical,
84 Coherence,
85 Quality,
86 MLReadiness,
87}
88
89#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
91pub enum IssueSeverity {
92 Critical,
94 Warning,
96 Info,
98}
99
100impl EvaluationReport {
101 pub fn new(
103 metadata: ReportMetadata,
104 statistical: Option<StatisticalEvaluation>,
105 coherence: Option<CoherenceEvaluation>,
106 quality: Option<QualityEvaluation>,
107 ml_readiness: Option<MLReadinessEvaluation>,
108 ) -> Self {
109 let mut report = Self {
110 metadata,
111 statistical,
112 coherence,
113 quality,
114 ml_readiness,
115 passes: true,
116 all_issues: Vec::new(),
117 overall_score: 1.0,
118 baseline_comparison: None,
119 };
120 report.aggregate_results();
121 report
122 }
123
124 fn aggregate_results(&mut self) {
126 let mut scores = Vec::new();
127
128 if let Some(ref stat) = self.statistical {
130 if !stat.passes {
131 self.passes = false;
132 }
133 scores.push(stat.overall_score);
134 for issue in &stat.issues {
135 self.all_issues.push(ReportIssue {
136 category: IssueCategory::Statistical,
137 severity: IssueSeverity::Critical,
138 description: issue.clone(),
139 metric: None,
140 actual_value: None,
141 threshold_value: None,
142 });
143 }
144 }
145
146 if let Some(ref coh) = self.coherence {
148 if !coh.passes {
149 self.passes = false;
150 }
151 for failure in &coh.failures {
152 self.all_issues.push(ReportIssue {
153 category: IssueCategory::Coherence,
154 severity: IssueSeverity::Critical,
155 description: failure.clone(),
156 metric: None,
157 actual_value: None,
158 threshold_value: None,
159 });
160 }
161 }
162
163 if let Some(ref qual) = self.quality {
165 if !qual.passes {
166 self.passes = false;
167 }
168 scores.push(qual.overall_score);
169 for issue in &qual.issues {
170 self.all_issues.push(ReportIssue {
171 category: IssueCategory::Quality,
172 severity: IssueSeverity::Critical,
173 description: issue.clone(),
174 metric: None,
175 actual_value: None,
176 threshold_value: None,
177 });
178 }
179 }
180
181 if let Some(ref ml) = self.ml_readiness {
183 if !ml.passes {
184 self.passes = false;
185 }
186 scores.push(ml.overall_score);
187 for issue in &ml.issues {
188 self.all_issues.push(ReportIssue {
189 category: IssueCategory::MLReadiness,
190 severity: IssueSeverity::Critical,
191 description: issue.clone(),
192 metric: None,
193 actual_value: None,
194 threshold_value: None,
195 });
196 }
197 }
198
199 self.overall_score = if scores.is_empty() {
201 1.0
202 } else {
203 scores.iter().sum::<f64>() / scores.len() as f64
204 };
205 }
206
207 pub fn with_baseline_comparison(mut self, comparison: BaselineComparison) -> Self {
209 self.baseline_comparison = Some(comparison);
210 self
211 }
212
213 pub fn issues_by_category(&self, category: IssueCategory) -> Vec<&ReportIssue> {
215 self.all_issues
216 .iter()
217 .filter(|i| i.category == category)
218 .collect()
219 }
220
221 pub fn critical_issues(&self) -> Vec<&ReportIssue> {
223 self.all_issues
224 .iter()
225 .filter(|i| i.severity == IssueSeverity::Critical)
226 .collect()
227 }
228}
229
230#[derive(Debug, Clone, Copy, PartialEq, Eq)]
232pub enum ReportFormat {
233 Json,
235 Html,
237 Both,
239}
240
241pub trait ReportGenerator {
243 fn generate(&self, report: &EvaluationReport) -> crate::error::EvalResult<String>;
245
246 fn generate_to_file(
248 &self,
249 report: &EvaluationReport,
250 path: &std::path::Path,
251 ) -> crate::error::EvalResult<()> {
252 let content = self.generate(report)?;
253 std::fs::write(path, content)?;
254 Ok(())
255 }
256}