use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DockerfileQualityScore {
pub grade: String,
pub score: f64,
pub safety: f64,
pub complexity: f64,
pub layer_optimization: f64,
pub determinism: f64,
pub security: f64,
pub suggestions: Vec<String>,
}
impl DockerfileQualityScore {
pub fn new() -> Self {
Self {
grade: String::from("F"),
score: 0.0,
safety: 0.0,
complexity: 0.0,
layer_optimization: 0.0,
determinism: 0.0,
security: 0.0,
suggestions: Vec::new(),
}
}
}
impl Default for DockerfileQualityScore {
fn default() -> Self {
Self::new()
}
}
struct LintPenalty {
determinism: f64,
security: f64,
layer: f64,
}
fn calculate_lint_penalty(lint_results: &crate::linter::LintResult) -> LintPenalty {
let mut penalty = LintPenalty {
determinism: 0.0,
security: 0.0,
layer: 0.0,
};
for diag in &lint_results.diagnostics {
match diag.code.as_str() {
"DOCKER001" => penalty.security += 0.5, "DOCKER002" => penalty.determinism += 0.2, "DOCKER003" => penalty.layer += 0.2, "DOCKER004" => penalty.security += 0.5, "DOCKER005" => penalty.layer += 0.1, "DOCKER006" => penalty.security += 0.05, _ => {}
}
}
penalty
}
pub fn score_dockerfile(source: &str) -> Result<DockerfileQualityScore, String> {
let mut score = DockerfileQualityScore::new();
use crate::linter::rules::lint_dockerfile;
let lint_results = lint_dockerfile(source);
let lint_penalty = calculate_lint_penalty(&lint_results);
score.safety = calculate_safety_score(source);
score.complexity = calculate_complexity_score(source);
score.layer_optimization = calculate_layer_optimization_score(source);
score.determinism = calculate_determinism_score(source);
score.security = calculate_security_score(source);
score.determinism = (score.determinism - lint_penalty.determinism).max(0.0);
score.security = (score.security - lint_penalty.security).max(0.0);
score.layer_optimization = (score.layer_optimization - lint_penalty.layer).max(0.0);
score.score = (score.safety * 0.30)
+ (score.complexity * 0.25)
+ (score.layer_optimization * 0.20)
+ (score.determinism * 0.15)
+ (score.security * 0.10);
score.grade = calculate_grade(score.score);
score.suggestions = generate_suggestions(source, &score);
for diag in &lint_results.diagnostics {
score
.suggestions
.push(format!("Line {}: {}", diag.span.start_line, diag.message));
}
Ok(score)
}
fn calculate_safety_score(source: &str) -> f64 {
if source.trim().is_empty() {
return 0.0;
}
let mut run_commands = 0;
let mut safe_run_commands = 0;
let mut has_error_handling = false;
for line in source.lines() {
let trimmed = line.trim();
if trimmed.starts_with("RUN ") {
run_commands += 1;
if trimmed.contains("set -euo pipefail")
|| trimmed.contains("set -e") && trimmed.contains("set -o pipefail")
{
safe_run_commands += 1;
has_error_handling = true;
}
if trimmed.contains("|| exit") || trimmed.contains("|| return") {
has_error_handling = true;
}
}
}
if run_commands == 0 {
return 5.0; }
let safety_ratio = safe_run_commands as f64 / run_commands as f64;
let mut score: f64 = match safety_ratio {
r if r >= 0.8 => 10.0, r if r >= 0.6 => 8.0, r if r >= 0.4 => 6.0, r if r >= 0.2 => 4.0, _ => 2.0, };
if has_error_handling {
score += 1.0;
}
score.min(10.0)
}
fn calculate_complexity_score(source: &str) -> f64 {
let lines: Vec<&str> = source.lines().collect();
if lines.is_empty() {
return 0.0;
}
let mut run_count = 0;
let mut longest_run = 0;
let mut current_run_lines = 0;
let mut in_run = false;
for line in &lines {
let trimmed = line.trim();
if trimmed.starts_with("RUN ") {
run_count += 1;
in_run = true;
current_run_lines = 1;
} else if in_run {
if trimmed.ends_with('\\') {
current_run_lines += 1;
} else {
if current_run_lines > 0 {
current_run_lines += 1;
if current_run_lines > longest_run {
longest_run = current_run_lines;
}
}
in_run = false;
current_run_lines = 0;
}
}
}
if in_run && current_run_lines > 0 && current_run_lines > longest_run {
longest_run = current_run_lines;
}
let run_score = match run_count {
0 => 5.0, 1..=3 => 10.0, 4..=6 => 8.0, 7..=10 => 6.0, 11..=15 => 4.0, _ => 2.0, };
let length_score = match longest_run {
0 => 5.0, 1..=5 => 10.0, 6..=10 => 8.0, 11..=20 => 6.0, 21..=30 => 4.0, _ => 2.0, };
f64::midpoint(run_score, length_score)
}
fn calculate_layer_optimization_score(source: &str) -> f64 {
if source.trim().is_empty() {
return 0.0;
}
let mut has_combined_commands = false;
let mut has_cache_cleanup = false;
let mut has_no_cache_flag = false;
let mut has_multistage = false;
let mut run_count = 0;
for line in source.lines() {
let trimmed = line.trim();
if trimmed.starts_with("RUN ") {
run_count += 1;
if trimmed.contains("&&") {
has_combined_commands = true;
}
if trimmed.contains("rm -rf /var/cache/apk/*")
|| trimmed.contains("apt-get clean")
|| trimmed.contains("yum clean all")
|| trimmed.contains("rm -rf /var/lib/apt/lists/*")
{
has_cache_cleanup = true;
}
if trimmed.contains("--no-cache") {
has_no_cache_flag = true;
}
}
if trimmed.starts_with("FROM ") && trimmed.contains(" AS ") {
has_multistage = true;
}
}
let mut score: f64 = 0.0;
if run_count > 0 {
score += 2.0;
}
if has_combined_commands {
score += 4.0;
}
if has_cache_cleanup || has_no_cache_flag {
score += 3.0;
}
if has_multistage {
score += 1.0;
}
score.min(10.0)
}
include!("dockerfile_scoring_calculate.rs");