Skip to main content

datasynth_eval/report/
mod.rs

1//! Report generation module.
2//!
3//! Generates evaluation reports in various formats (JSON, HTML) with
4//! pass/fail criteria and baseline comparison.
5
6mod comparison;
7mod html;
8mod json;
9mod thresholds;
10
11pub use comparison::{BaselineComparison, ComparisonResult, MetricChange};
12pub use html::HtmlReportGenerator;
13pub use json::JsonReportGenerator;
14pub use thresholds::{ThresholdChecker, ThresholdResult};
15
16use crate::coherence::CoherenceEvaluation;
17use crate::ml::MLReadinessEvaluation;
18use crate::quality::QualityEvaluation;
19use crate::statistical::StatisticalEvaluation;
20use chrono::{DateTime, Utc};
21use serde::{Deserialize, Serialize};
22
23/// Complete evaluation report.
24#[derive(Debug, Clone, Serialize, Deserialize)]
25pub struct EvaluationReport {
26    /// Report metadata.
27    pub metadata: ReportMetadata,
28    /// Statistical evaluation results.
29    pub statistical: Option<StatisticalEvaluation>,
30    /// Coherence evaluation results.
31    pub coherence: Option<CoherenceEvaluation>,
32    /// Quality evaluation results.
33    pub quality: Option<QualityEvaluation>,
34    /// ML-readiness evaluation results.
35    pub ml_readiness: Option<MLReadinessEvaluation>,
36    /// Overall pass/fail status.
37    pub passes: bool,
38    /// Summary of all issues found.
39    pub all_issues: Vec<ReportIssue>,
40    /// Overall score (0.0-1.0).
41    pub overall_score: f64,
42    /// Comparison with baseline (if provided).
43    pub baseline_comparison: Option<BaselineComparison>,
44}
45
46/// Report metadata.
47#[derive(Debug, Clone, Serialize, Deserialize)]
48pub struct ReportMetadata {
49    /// Report generation timestamp.
50    #[serde(with = "datasynth_core::serde_timestamp::utc")]
51    pub generated_at: DateTime<Utc>,
52    /// Evaluation version.
53    pub version: String,
54    /// Input data source.
55    pub data_source: String,
56    /// Thresholds used.
57    pub thresholds_name: String,
58    /// Number of records evaluated.
59    pub records_evaluated: usize,
60    /// Evaluation duration in milliseconds.
61    pub duration_ms: u64,
62}
63
64/// An issue found during evaluation.
65#[derive(Debug, Clone, Serialize, Deserialize)]
66pub struct ReportIssue {
67    /// Issue category.
68    pub category: IssueCategory,
69    /// Issue severity.
70    pub severity: IssueSeverity,
71    /// Issue description.
72    pub description: String,
73    /// Metric name (if applicable).
74    pub metric: Option<String>,
75    /// Actual value (if applicable).
76    pub actual_value: Option<String>,
77    /// Threshold value (if applicable).
78    pub threshold_value: Option<String>,
79}
80
81/// Category of issue.
82#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
83pub enum IssueCategory {
84    Statistical,
85    Coherence,
86    Quality,
87    MLReadiness,
88}
89
90/// Severity of issue.
91#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
92pub enum IssueSeverity {
93    /// Critical issue that fails the evaluation.
94    Critical,
95    /// Warning that may need attention.
96    Warning,
97    /// Informational note.
98    Info,
99}
100
101impl EvaluationReport {
102    /// Create a new report with the given results.
103    pub fn new(
104        metadata: ReportMetadata,
105        statistical: Option<StatisticalEvaluation>,
106        coherence: Option<CoherenceEvaluation>,
107        quality: Option<QualityEvaluation>,
108        ml_readiness: Option<MLReadinessEvaluation>,
109    ) -> Self {
110        let mut report = Self {
111            metadata,
112            statistical,
113            coherence,
114            quality,
115            ml_readiness,
116            passes: true,
117            all_issues: Vec::new(),
118            overall_score: 1.0,
119            baseline_comparison: None,
120        };
121        report.aggregate_results();
122        report
123    }
124
125    /// Aggregate results from all evaluations.
126    fn aggregate_results(&mut self) {
127        let mut scores = Vec::new();
128
129        // Collect statistical issues
130        if let Some(ref stat) = self.statistical {
131            if !stat.passes {
132                self.passes = false;
133            }
134            scores.push(stat.overall_score);
135            for issue in &stat.issues {
136                self.all_issues.push(ReportIssue {
137                    category: IssueCategory::Statistical,
138                    severity: IssueSeverity::Critical,
139                    description: issue.clone(),
140                    metric: None,
141                    actual_value: None,
142                    threshold_value: None,
143                });
144            }
145        }
146
147        // Collect coherence issues
148        if let Some(ref coh) = self.coherence {
149            if !coh.passes {
150                self.passes = false;
151            }
152            for failure in &coh.failures {
153                self.all_issues.push(ReportIssue {
154                    category: IssueCategory::Coherence,
155                    severity: IssueSeverity::Critical,
156                    description: failure.clone(),
157                    metric: None,
158                    actual_value: None,
159                    threshold_value: None,
160                });
161            }
162        }
163
164        // Collect quality issues
165        if let Some(ref qual) = self.quality {
166            if !qual.passes {
167                self.passes = false;
168            }
169            scores.push(qual.overall_score);
170            for issue in &qual.issues {
171                self.all_issues.push(ReportIssue {
172                    category: IssueCategory::Quality,
173                    severity: IssueSeverity::Critical,
174                    description: issue.clone(),
175                    metric: None,
176                    actual_value: None,
177                    threshold_value: None,
178                });
179            }
180        }
181
182        // Collect ML issues
183        if let Some(ref ml) = self.ml_readiness {
184            if !ml.passes {
185                self.passes = false;
186            }
187            scores.push(ml.overall_score);
188            for issue in &ml.issues {
189                self.all_issues.push(ReportIssue {
190                    category: IssueCategory::MLReadiness,
191                    severity: IssueSeverity::Critical,
192                    description: issue.clone(),
193                    metric: None,
194                    actual_value: None,
195                    threshold_value: None,
196                });
197            }
198        }
199
200        // Calculate overall score
201        self.overall_score = if scores.is_empty() {
202            1.0
203        } else {
204            scores.iter().sum::<f64>() / scores.len() as f64
205        };
206    }
207
208    /// Set baseline comparison.
209    pub fn with_baseline_comparison(mut self, comparison: BaselineComparison) -> Self {
210        self.baseline_comparison = Some(comparison);
211        self
212    }
213
214    /// Get issues by category.
215    pub fn issues_by_category(&self, category: IssueCategory) -> Vec<&ReportIssue> {
216        self.all_issues
217            .iter()
218            .filter(|i| i.category == category)
219            .collect()
220    }
221
222    /// Get critical issues only.
223    pub fn critical_issues(&self) -> Vec<&ReportIssue> {
224        self.all_issues
225            .iter()
226            .filter(|i| i.severity == IssueSeverity::Critical)
227            .collect()
228    }
229}
230
231/// Report format options.
232#[derive(Debug, Clone, Copy, PartialEq, Eq)]
233pub enum ReportFormat {
234    /// JSON format.
235    Json,
236    /// HTML format.
237    Html,
238    /// Both JSON and HTML.
239    Both,
240}
241
242/// Report generator trait.
243pub trait ReportGenerator {
244    /// Generate report to string.
245    fn generate(&self, report: &EvaluationReport) -> crate::error::EvalResult<String>;
246
247    /// Generate report to file.
248    fn generate_to_file(
249        &self,
250        report: &EvaluationReport,
251        path: &std::path::Path,
252    ) -> crate::error::EvalResult<()> {
253        let content = self.generate(report)?;
254        std::fs::write(path, content)?;
255        Ok(())
256    }
257}