#![allow(dead_code)]
use crate::cli::Mode;
use crate::config::Config;
use crate::modes;
use crate::transformation_analysis::{analyze_transformations, TransformationType};
#[derive(Debug, Clone)]
pub struct QualityReport {
pub score: f32,
pub issues: Vec<QualityIssue>,
pub metrics: QualityMetrics,
}
#[derive(Debug, Clone)]
pub enum QualityIssue {
TextCorruption {
line: usize,
expected: String,
got: String,
},
DataLoss {
content_type: LostContent,
line: usize,
},
StructureCorruption {
issue_type: StructureType,
location: Point,
},
VisualInconsistency {
issue_type: Inconsistency,
location: Point,
},
}
#[derive(Debug, Clone)]
pub enum LostContent {
TextLine,
BoxBorder,
Arrow,
ConnectionLine,
Label,
}
#[derive(Debug, Clone)]
pub enum StructureType {
MalformedBox,
BrokenArrow,
InvalidConnection,
NestedConflict,
}
#[derive(Debug, Clone)]
pub enum Inconsistency {
MisalignedBoxes,
InconsistentSpacing,
BorderOverlap,
TextOverflow,
}
#[derive(Debug, Clone)]
pub struct QualityMetrics {
pub text_preservation: f32,
pub structure_preservation: f32,
pub visual_consistency: f32,
pub line_count_delta: i32,
pub text_corruption_count: usize,
pub data_loss_count: usize,
}
#[derive(Debug, Clone, Copy)]
pub struct Point {
pub row: usize,
pub col: usize,
}
#[derive(Debug, Clone)]
pub struct QualityConfig {
pub min_text_preservation: f32,
pub min_structure_preservation: f32,
pub max_line_count_delta: i32,
pub allow_text_corruption: bool,
pub allow_data_loss: bool,
}
impl Default for QualityConfig {
fn default() -> Self {
Self {
min_text_preservation: 0.95,
min_structure_preservation: 0.90,
max_line_count_delta: 0,
allow_text_corruption: false,
allow_data_loss: false,
}
}
}
impl QualityReport {
#[must_use]
pub fn is_acceptable(&self, config: &QualityConfig) -> bool {
self.score >= 0.8
&& self.metrics.text_preservation >= config.min_text_preservation
&& self.metrics.structure_preservation >= config.min_structure_preservation
&& self.metrics.line_count_delta.abs() <= config.max_line_count_delta.abs()
&& (config.allow_text_corruption || self.metrics.text_corruption_count == 0)
&& (config.allow_data_loss || self.metrics.data_loss_count == 0)
}
}
#[must_use]
pub fn validate_quality(input: &str, output: &str) -> QualityReport {
let mut report = QualityReport {
score: 1.0,
issues: Vec::new(),
metrics: QualityMetrics {
text_preservation: 1.0,
structure_preservation: 1.0,
visual_consistency: 1.0,
line_count_delta: 0,
text_corruption_count: 0,
data_loss_count: 0,
},
};
let input_lines: Vec<&str> = input.lines().collect();
let output_lines: Vec<&str> = output.lines().collect();
#[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)]
{
report.metrics.line_count_delta = output_lines.len() as i32 - input_lines.len() as i32;
}
let transformation_analysis = analyze_transformations(input, output);
report.metrics.text_corruption_count = transformation_analysis.summary.destructive_count;
report.metrics.data_loss_count = transformation_analysis
.transformations
.iter()
.filter(|t| matches!(t.transform_type, TransformationType::Destructive(_)))
.count();
let original_text_chars = count_text_chars(&input_lines);
let output_text_chars = count_text_chars(&output_lines);
#[allow(clippy::cast_precision_loss)]
let base_preservation = if original_text_chars > 0 {
output_text_chars as f32 / original_text_chars as f32
} else {
1.0
};
#[allow(clippy::cast_precision_loss)]
let constructive_bonus = transformation_analysis.summary.constructive_count as f32 * 0.02;
report.metrics.text_preservation = (base_preservation + constructive_bonus).min(1.0);
check_text_corruption(&input_lines, &output_lines, &mut report);
check_data_loss(&input_lines, &output_lines, &mut report);
check_visual_consistency(&output_lines, &mut report);
report.metrics.structure_preservation =
calculate_structure_preservation(&transformation_analysis);
report.score = calculate_enhanced_overall_score(&report.metrics, &transformation_analysis);
for transformation in &transformation_analysis.transformations {
if matches!(
transformation.transform_type,
TransformationType::Destructive(_)
) {
report.issues.push(QualityIssue::TextCorruption {
line: transformation.location.line,
expected: "original content".to_string(),
got: transformation.description.clone(),
});
}
}
report
}
fn check_text_corruption(input_lines: &[&str], output_lines: &[&str], report: &mut QualityReport) {
for (line_idx, output_line) in output_lines.iter().enumerate() {
let chars: Vec<char> = output_line.chars().collect();
for (col_idx, &ch) in chars.iter().enumerate() {
if ch == '↑' || ch == '↓' || ch == '←' || ch == '→' {
if is_arrow_corrupting_text(&chars, col_idx) {
report.issues.push(QualityIssue::TextCorruption {
line: line_idx,
expected: "text content".to_string(),
got: ch.to_string(),
});
report.metrics.text_corruption_count += 1;
}
}
if ch == '│' {
if is_pipe_in_text_content(&chars, col_idx) {
report.issues.push(QualityIssue::TextCorruption {
line: line_idx,
expected: "text content".to_string(),
got: ch.to_string(),
});
report.metrics.text_corruption_count += 1;
}
}
}
}
let original_text_chars = count_text_chars(input_lines);
let output_text_chars = count_text_chars(output_lines);
#[allow(clippy::cast_precision_loss)]
{
report.metrics.text_preservation = if original_text_chars > 0 {
output_text_chars as f32 / original_text_chars as f32
} else {
1.0
};
}
}
fn check_data_loss(input_lines: &[&str], output_lines: &[&str], report: &mut QualityReport) {
let line_diff = output_lines.len().saturating_sub(input_lines.len());
if line_diff > 2 {
report.metrics.data_loss_count += 1;
report.issues.push(QualityIssue::DataLoss {
content_type: LostContent::TextLine,
line: input_lines.len(),
});
}
let input_content_size = input_lines.iter().map(|l| l.len()).sum::<usize>();
let output_content_size = output_lines.iter().map(|l| l.len()).sum::<usize>();
if input_content_size > 100 && output_content_size < input_content_size / 2 {
report.metrics.data_loss_count += 1;
report.issues.push(QualityIssue::DataLoss {
content_type: LostContent::TextLine,
line: 0,
});
}
}
fn check_visual_consistency(output_lines: &[&str], report: &mut QualityReport) {
for (line_idx, line) in output_lines.iter().enumerate() {
if line.contains("┌") && !line.contains("┐") {
report.metrics.visual_consistency -= 0.1;
report.issues.push(QualityIssue::StructureCorruption {
issue_type: StructureType::MalformedBox,
location: Point {
row: line_idx,
col: 0,
},
});
}
let spaces = line.chars().filter(|&c| c == ' ').count();
let non_spaces = line.chars().filter(|&c| c != ' ').count();
#[allow(clippy::cast_precision_loss)]
if non_spaces > 0 && spaces as f32 / non_spaces as f32 > 5.0 {
report.metrics.visual_consistency -= 0.05;
}
}
report.metrics.visual_consistency = report.metrics.visual_consistency.max(0.0);
}
#[allow(clippy::cast_precision_loss)]
fn calculate_structure_preservation(
analysis: &crate::transformation_analysis::TransformationAnalysis,
) -> f32 {
let destructive = analysis.summary.destructive_count as f32;
let constructive = analysis.summary.constructive_count as f32;
let neutral = analysis.summary.neutral_count as f32;
let total = destructive + constructive + neutral;
if total == 0.0 {
return 1.0; }
let preserved = constructive + neutral;
preserved / total
}
#[allow(clippy::cast_precision_loss)]
fn calculate_enhanced_overall_score(
metrics: &QualityMetrics,
analysis: &crate::transformation_analysis::TransformationAnalysis,
) -> f32 {
let mut score = 1.0;
score *= metrics.text_preservation;
score *= metrics.structure_preservation;
score *= metrics.visual_consistency;
score += analysis.summary.net_quality_impact * 0.1;
let destructive_penalty = analysis.summary.destructive_count as f32 * 0.2;
score -= destructive_penalty;
score -= (metrics.line_count_delta.abs() as f32 * 0.02).min(0.1);
let constructive_bonus = analysis.summary.constructive_count as f32 * 0.05;
score += constructive_bonus.min(0.2);
score.clamp(0.0, 1.0)
}
fn is_arrow_corrupting_text(line_chars: &[char], col: usize) -> bool {
if col == 0 || col >= line_chars.len() - 1 {
return false;
}
let prev = line_chars[col - 1];
let next = line_chars[col + 1];
prev.is_alphabetic() && next.is_alphabetic()
}
fn is_pipe_in_text_content(line_chars: &[char], col: usize) -> bool {
if col == 0 || col >= line_chars.len() - 1 {
return false;
}
let prev = line_chars[col - 1];
let next = line_chars[col + 1];
(prev.is_alphabetic() && next.is_alphabetic())
|| (prev.is_alphabetic() && next == ' ')
|| (prev == ' ' && next.is_alphabetic())
}
fn count_text_chars(lines: &[&str]) -> usize {
lines
.iter()
.map(|line| {
line.chars()
.filter(|&c| {
c.is_alphabetic() || c.is_numeric() || c.is_ascii_punctuation() || c == ' '
})
.count()
})
.sum()
}
pub fn validate_fixture(
input_path: &str,
expected_path: &str,
config: &QualityConfig,
) -> Result<(), String> {
validate_fixture_with_options(input_path, expected_path, config, false)
}
pub fn validate_fixture_with_fences(
input_path: &str,
expected_path: &str,
config: &QualityConfig,
) -> Result<(), String> {
validate_fixture_with_options(input_path, expected_path, config, true)
}
fn validate_fixture_with_options(
input_path: &str,
expected_path: &str,
config: &QualityConfig,
repair_fences: bool,
) -> Result<(), String> {
let input = std::fs::read_to_string(input_path)
.map_err(|e| format!("Failed to read input {input_path}: {e}"))?;
let expected = std::fs::read_to_string(expected_path)
.map_err(|e| format!("Failed to read expected {expected_path}: {e}"))?;
let processed =
modes::process_by_mode(&Mode::Diagram, &input, repair_fences, &Config::default());
let report = validate_quality(&input, &processed);
if !report.is_acceptable(config) {
return Err(format!(
"Quality validation failed for {}:\n\
Score: {:.2}\n\
Text preservation: {:.2} (min: {:.2})\n\
Structure preservation: {:.2} (min: {:.2})\n\
Line delta: {} (max: {})\n\
Text corruption: {} (allowed: {})\n\
Data loss: {} (allowed: {})\n\
Issues: {}",
input_path,
report.score,
report.metrics.text_preservation,
config.min_text_preservation,
report.metrics.structure_preservation,
config.min_structure_preservation,
report.metrics.line_count_delta,
config.max_line_count_delta,
report.metrics.text_corruption_count,
config.allow_text_corruption,
report.metrics.data_loss_count,
config.allow_data_loss,
report.issues.len()
));
}
let processed_normalized = normalize_output(&processed);
let expected_normalized = normalize_output(&expected);
if processed_normalized != expected_normalized {
return Err(format!(
"Output mismatch for {}\n\
Expected length: {}\n\
Got length: {}",
input_path,
expected_normalized.len(),
processed_normalized.len()
));
}
Ok(())
}
fn normalize_output(output: &str) -> String {
output
.lines()
.map(str::trim_end)
.collect::<Vec<_>>()
.join("\n")
.trim()
.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_quality_validation_clean_input() {
let input = r"
┌──────────┐
│ Clean │
│ Text │
└──────────┘
";
let output = input; let report = validate_quality(input, output);
assert!(report.score > 0.95, "Clean input should have high score");
assert_eq!(report.metrics.text_corruption_count, 0);
assert_eq!(report.metrics.data_loss_count, 0);
assert!(report.metrics.text_preservation > 0.95);
}
#[test]
fn test_quality_validation_text_corruption() {
let input = r"
┌──────────┐
│ Clean │
│ Text │
└──────────┘
";
let output = r"
┌──────────┐
│ Clean↑ │
│ Text │
└──────────┘
";
let report = validate_quality(input, output);
assert!(
report.score < 0.9,
"Corrupted output should have lower score"
);
assert!(report.metrics.text_corruption_count > 0);
assert!(report
.issues
.iter()
.any(|issue| matches!(issue, QualityIssue::TextCorruption { .. })));
}
#[test]
fn test_quality_validation_data_loss() {
let input = r"
┌──────────┐
│ Original │
│ Content │
└──────────┘
";
let output = r"
┌──────────┐
│ Original │
└──────────┘
";
let report = validate_quality(input, output);
assert!(
report.metrics.line_count_delta < 0,
"Should detect line loss"
);
assert!(report.score < 1.0, "Data loss should reduce score");
}
}