1mod comparison;
7mod html;
8mod json;
9mod thresholds;
10
11pub use comparison::{BaselineComparison, ComparisonResult, MetricChange};
12pub use html::HtmlReportGenerator;
13pub use json::JsonReportGenerator;
14pub use thresholds::{ThresholdChecker, ThresholdResult};
15
16use crate::coherence::CoherenceEvaluation;
17use crate::ml::MLReadinessEvaluation;
18use crate::quality::QualityEvaluation;
19use crate::statistical::StatisticalEvaluation;
20use chrono::{DateTime, Utc};
21use serde::{Deserialize, Serialize};
22
23#[derive(Debug, Clone, Serialize, Deserialize)]
25pub struct EvaluationReport {
26 pub metadata: ReportMetadata,
28 pub statistical: Option<StatisticalEvaluation>,
30 pub coherence: Option<CoherenceEvaluation>,
32 pub quality: Option<QualityEvaluation>,
34 pub ml_readiness: Option<MLReadinessEvaluation>,
36 pub passes: bool,
38 pub all_issues: Vec<ReportIssue>,
40 pub overall_score: f64,
42 pub baseline_comparison: Option<BaselineComparison>,
44}
45
46#[derive(Debug, Clone, Serialize, Deserialize)]
48pub struct ReportMetadata {
49 #[serde(with = "datasynth_core::serde_timestamp::utc")]
51 pub generated_at: DateTime<Utc>,
52 pub version: String,
54 pub data_source: String,
56 pub thresholds_name: String,
58 pub records_evaluated: usize,
60 pub duration_ms: u64,
62}
63
64#[derive(Debug, Clone, Serialize, Deserialize)]
66pub struct ReportIssue {
67 pub category: IssueCategory,
69 pub severity: IssueSeverity,
71 pub description: String,
73 pub metric: Option<String>,
75 pub actual_value: Option<String>,
77 pub threshold_value: Option<String>,
79}
80
81#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
83pub enum IssueCategory {
84 Statistical,
85 Coherence,
86 Quality,
87 MLReadiness,
88}
89
90#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
92pub enum IssueSeverity {
93 Critical,
95 Warning,
97 Info,
99}
100
101impl EvaluationReport {
102 pub fn new(
104 metadata: ReportMetadata,
105 statistical: Option<StatisticalEvaluation>,
106 coherence: Option<CoherenceEvaluation>,
107 quality: Option<QualityEvaluation>,
108 ml_readiness: Option<MLReadinessEvaluation>,
109 ) -> Self {
110 let mut report = Self {
111 metadata,
112 statistical,
113 coherence,
114 quality,
115 ml_readiness,
116 passes: true,
117 all_issues: Vec::new(),
118 overall_score: 1.0,
119 baseline_comparison: None,
120 };
121 report.aggregate_results();
122 report
123 }
124
125 fn aggregate_results(&mut self) {
127 let mut scores = Vec::new();
128
129 if let Some(ref stat) = self.statistical {
131 if !stat.passes {
132 self.passes = false;
133 }
134 scores.push(stat.overall_score);
135 for issue in &stat.issues {
136 self.all_issues.push(ReportIssue {
137 category: IssueCategory::Statistical,
138 severity: IssueSeverity::Critical,
139 description: issue.clone(),
140 metric: None,
141 actual_value: None,
142 threshold_value: None,
143 });
144 }
145 }
146
147 if let Some(ref coh) = self.coherence {
149 if !coh.passes {
150 self.passes = false;
151 }
152 for failure in &coh.failures {
153 self.all_issues.push(ReportIssue {
154 category: IssueCategory::Coherence,
155 severity: IssueSeverity::Critical,
156 description: failure.clone(),
157 metric: None,
158 actual_value: None,
159 threshold_value: None,
160 });
161 }
162 }
163
164 if let Some(ref qual) = self.quality {
166 if !qual.passes {
167 self.passes = false;
168 }
169 scores.push(qual.overall_score);
170 for issue in &qual.issues {
171 self.all_issues.push(ReportIssue {
172 category: IssueCategory::Quality,
173 severity: IssueSeverity::Critical,
174 description: issue.clone(),
175 metric: None,
176 actual_value: None,
177 threshold_value: None,
178 });
179 }
180 }
181
182 if let Some(ref ml) = self.ml_readiness {
184 if !ml.passes {
185 self.passes = false;
186 }
187 scores.push(ml.overall_score);
188 for issue in &ml.issues {
189 self.all_issues.push(ReportIssue {
190 category: IssueCategory::MLReadiness,
191 severity: IssueSeverity::Critical,
192 description: issue.clone(),
193 metric: None,
194 actual_value: None,
195 threshold_value: None,
196 });
197 }
198 }
199
200 self.overall_score = if scores.is_empty() {
202 1.0
203 } else {
204 scores.iter().sum::<f64>() / scores.len() as f64
205 };
206 }
207
208 pub fn with_baseline_comparison(mut self, comparison: BaselineComparison) -> Self {
210 self.baseline_comparison = Some(comparison);
211 self
212 }
213
214 pub fn issues_by_category(&self, category: IssueCategory) -> Vec<&ReportIssue> {
216 self.all_issues
217 .iter()
218 .filter(|i| i.category == category)
219 .collect()
220 }
221
222 pub fn critical_issues(&self) -> Vec<&ReportIssue> {
224 self.all_issues
225 .iter()
226 .filter(|i| i.severity == IssueSeverity::Critical)
227 .collect()
228 }
229}
230
231#[derive(Debug, Clone, Copy, PartialEq, Eq)]
233pub enum ReportFormat {
234 Json,
236 Html,
238 Both,
240}
241
242pub trait ReportGenerator {
244 fn generate(&self, report: &EvaluationReport) -> crate::error::EvalResult<String>;
246
247 fn generate_to_file(
249 &self,
250 report: &EvaluationReport,
251 path: &std::path::Path,
252 ) -> crate::error::EvalResult<()> {
253 let content = self.generate(report)?;
254 std::fs::write(path, content)?;
255 Ok(())
256 }
257}