devops-validate 0.1.0

YAML validation and auto-repair engine for DevOps configuration files: Kubernetes, Docker Compose, GitLab CI, GitHub Actions, Prometheus, Alertmanager, Helm, and Ansible.
Documentation
//! Rule loader for built-in and custom rules
//!
//! Loads rules from embedded YAML or external sources.

use super::engine::{Rule, RuleEngine};

/// Embedded K8s best-practice rules
const K8S_RULES_YAML: &str = r#"
rules:
  - id: k8s/replicas-1
    condition: '$.spec.replicas == 1'
    severity: warning
    message: 'replicas=1 — consider >=2 for high availability'

  - id: k8s/no-resource-limits
    condition: '$.spec.template.spec.containers[0].resources.limits == null'
    severity: warning
    message: 'Container has no resource limits — may cause OOM kills'

  - id: k8s/no-liveness-probe
    condition: '$.spec.template.spec.containers[0].livenessProbe == null'
    severity: warning
    message: 'Container has no livenessProbe — Kubernetes will not detect hangs'

  - id: k8s/no-readiness-probe
    condition: '$.spec.template.spec.containers[0].readinessProbe == null'
    severity: warning
    message: 'Container has no readinessProbe — traffic may be sent to unready pods'

  - id: k8s/latest-image-tag
    condition: '$.spec.template.spec.containers[0].image contains ":latest"'
    severity: warning
    message: 'Image uses :latest tag — pin a specific version for reproducibility'

  - id: k8s/no-resource-requests
    condition: '$.spec.template.spec.containers[0].resources.requests == null'
    severity: info
    message: 'Container has no resource requests — scheduler cannot make optimal placement decisions'

  - id: k8s/always-pull-policy
    condition: '$.spec.template.spec.containers[0].imagePullPolicy == "Always"'
    severity: info
    message: 'imagePullPolicy=Always adds latency to every pod start — use IfNotPresent with a pinned tag'

  - id: k8s/no-security-context
    condition: '$.spec.template.spec.containers[0].securityContext == null'
    severity: hint
    message: 'No securityContext set — consider runAsNonRoot, readOnlyRootFilesystem, allowPrivilegeEscalation: false'

  - id: k8s/host-network
    condition: '$.spec.template.spec.hostNetwork == true'
    severity: warning
    message: 'hostNetwork=true — pods share the host network namespace (security risk)'

  - id: k8s/privileged-container
    condition: '$.spec.template.spec.containers[0].securityContext.privileged == true'
    severity: warning
    message: 'privileged=true — container has full host access (security risk)'

  - id: k8s/run-as-root
    condition: '$.spec.template.spec.containers[0].securityContext.runAsNonRoot != true'
    severity: info
    message: 'Container may run as root — consider runAsNonRoot: true'

  - id: k8s/service-type-loadbalancer
    condition: '$.spec.type == "LoadBalancer"'
    severity: warning
    message: 'type=LoadBalancer creates a cloud load balancer — ensure this is intentional (cost implications)'

  - id: k8s/service-empty-selector
    condition: '$.spec.selector == null'
    severity: warning
    message: 'Service has no selector — will match no pods'

  - id: k8s/hpa-min-gt-max
    condition: '$.spec.minReplicas > $.spec.maxReplicas'
    severity: error
    message: 'minReplicas cannot be greater than maxReplicas'
"#;

/// Embedded GitLab CI rules
const GITLAB_CI_RULES_YAML: &str = r#"
rules:
  - id: gitlab-ci/no-stages
    condition: '$.stages == null'
    severity: info
    message: 'No stages defined — using default stages: build, test, deploy'
"#;

/// Load built-in rules for all supported types
pub fn load_builtin_rules() -> RuleEngine {
    let mut rules = Vec::new();

    // Load K8s rules
    if let Ok(k8s_rules) = parse_rules_yaml(K8S_RULES_YAML) {
        rules.extend(k8s_rules);
    }

    // Load GitLab CI rules
    if let Ok(gitlab_rules) = parse_rules_yaml(GITLAB_CI_RULES_YAML) {
        rules.extend(gitlab_rules);
    }

    RuleEngine::with_rules(rules)
}

/// Load rules for a specific type
pub fn load_rules_for_type(yaml_type: &str) -> RuleEngine {
    let rules = match yaml_type.split('/').next().unwrap_or(yaml_type) {
        "k8s" => parse_rules_yaml(K8S_RULES_YAML).unwrap_or_default(),
        "gitlab-ci" => parse_rules_yaml(GITLAB_CI_RULES_YAML).unwrap_or_default(),
        _ => Vec::new(),
    };
    RuleEngine::with_rules(rules)
}

/// Parse rules from YAML string
fn parse_rules_yaml(yaml: &str) -> Result<Vec<Rule>, String> {
    #[derive(serde::Deserialize)]
    struct RulesDoc {
        rules: Vec<Rule>,
    }

    let doc: RulesDoc =
        serde_yaml::from_str(yaml).map_err(|e| format!("Failed to parse rules YAML: {}", e))?;

    Ok(doc.rules)
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_load_builtin_rules() {
        let engine = load_builtin_rules();
        assert!(engine.rule_count() > 0);
    }

    #[test]
    fn test_load_k8s_rules() {
        let engine = load_rules_for_type("k8s/deployment");
        assert!(engine.rule_count() > 0);
    }

    #[test]
    fn test_parse_rules_yaml() {
        let rules = parse_rules_yaml(K8S_RULES_YAML).unwrap();
        assert!(!rules.is_empty());

        // Check first rule
        let first = &rules[0];
        assert_eq!(first.id, "k8s/replicas-1");
        assert_eq!(first.severity, "warning");
    }
}