spawn_access_control/
model_explainer.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
use crate::model_optimizer::ModelParameters;
use crate::ml_metrics::ModelMetrics;
use serde::Serialize;
use std::collections::HashMap;

#[derive(Debug, Serialize)]
pub struct ModelExplanation {
    pub feature_importance: HashMap<String, f64>,
    pub decision_paths: Vec<DecisionPath>,
    pub performance_metrics: ExplainedMetrics,
    pub security_impact: SecurityImpactAnalysis,
}

#[derive(Debug, Serialize)]
pub struct DecisionPath {
    pub path: Vec<DecisionNode>,
    pub confidence: f64,
    pub impact: f64,
}

#[derive(Debug, Serialize)]
pub struct DecisionNode {
    pub feature: String,
    pub threshold: f64,
    pub direction: String,
}

#[derive(Debug, Serialize)]
pub struct ExplainedMetrics {
    pub metrics: ModelMetrics,
    pub explanations: HashMap<String, String>,
    pub recommendations: Vec<String>,
}

#[derive(Debug, Clone, Serialize)]
pub struct SecurityImpactAnalysis {
    pub false_positive_impact: f64,
    pub risk_factors: Vec<RiskFactor>,
}

impl SecurityImpactAnalysis {
    pub fn analyze_security_impact(&self, metrics: &ModelMetrics) -> Self {
        Self {
            false_positive_impact: 1.0 - metrics.precision,
            risk_factors: self.analyze_risk_factors(metrics),
        }
    }

    fn analyze_risk_factors(&self, metrics: &ModelMetrics) -> Vec<RiskFactor> {
        let mut factors = Vec::new();
        
        if metrics.precision < 0.9 {
            factors.push(RiskFactor {
                name: "High False Positive Rate".to_string(),
                impact_score: 1.0 - metrics.precision,
            });
        }
        
        factors
    }
}

#[derive(Clone, Debug, Serialize)]
pub struct RiskFactor {
    pub name: String,
    pub impact_score: f64,
}

pub struct ModelExplainer {
    feature_names: Vec<String>,
    #[allow(dead_code)]
    security_config: SecurityConfig,
}

#[derive(Clone)]
pub struct SecurityConfig {
    pub false_positive_weight: f64,
    pub false_negative_weight: f64,
    pub risk_threshold: f64,
}

impl ModelExplainer {
    pub fn new(feature_names: Vec<String>, security_config: SecurityConfig) -> Self {
        Self {
            feature_names,
            security_config,
        }
    }

    pub fn explain_model(&self, params: &ModelParameters, metrics: &ModelMetrics) -> ModelExplanation {
        ModelExplanation {
            feature_importance: self.calculate_feature_importance(params),
            decision_paths: self.analyze_decision_paths(),
            performance_metrics: self.explain_metrics(metrics),
            security_impact: self.analyze_security_impact(metrics),
        }
    }

    fn calculate_feature_importance(&self, params: &ModelParameters) -> HashMap<String, f64> {
        let mut importance = HashMap::new();
        
        // Feature önem skorlarını hesapla
        for (idx, feature) in self.feature_names.iter().enumerate() {
            let score = self.calculate_feature_score(idx, params);
            importance.insert(feature.clone(), score);
        }

        // Skorları normalize et
        let total: f64 = importance.values().sum();
        for score in importance.values_mut() {
            *score /= total;
        }

        importance
    }

    fn calculate_feature_score(&self, feature_idx: usize, params: &ModelParameters) -> f64 {
        // Feature önem skoru hesaplama mantığı
        let base_score = 1.0 / self.feature_names.len() as f64;
        let depth_factor = (-((params.max_depth as f64 - 10.0).powi(2)) / 100.0).exp();
        
        base_score * depth_factor * (1.0 + (feature_idx as f64 / 10.0))
    }

    fn analyze_decision_paths(&self) -> Vec<DecisionPath> {
        // Örnek karar yolları analizi
        vec![
            DecisionPath {
                path: vec![
                    DecisionNode {
                        feature: "time_of_day".to_string(),
                        threshold: 18.0,
                        direction: "greater_than".to_string(),
                    },
                    DecisionNode {
                        feature: "failed_attempts".to_string(),
                        threshold: 3.0,
                        direction: "less_than".to_string(),
                    },
                ],
                confidence: 0.85,
                impact: 0.7,
            }
        ]
    }

    fn explain_metrics(&self, metrics: &ModelMetrics) -> ExplainedMetrics {
        let mut explanations = HashMap::new();
        let mut recommendations = Vec::new();

        // Metrikleri açıkla
        explanations.insert(
            "f1_score".to_string(),
            format!("F1 score of {:.2} indicates balanced performance", metrics.f1_score)
        );

        // Öneriler oluştur
        if metrics.f1_score < 0.8 {
            recommendations.push("Consider increasing training data diversity".to_string());
        }
        if metrics.precision < metrics.recall {
            recommendations.push("Model might be too aggressive, consider adjusting threshold".to_string());
        }

        ExplainedMetrics {
            metrics: metrics.clone(),
            explanations,
            recommendations,
        }
    }

    fn analyze_security_impact(&self, metrics: &ModelMetrics) -> SecurityImpactAnalysis {
        SecurityImpactAnalysis {
            false_positive_impact: 1.0 - metrics.precision,
            risk_factors: vec![], // Boş risk faktörleri ile başla
        }
    }
}