spawn_access_control/
model_explainer.rs

1use crate::model_optimizer::ModelParameters;
2use crate::ml_metrics::ModelMetrics;
3use serde::Serialize;
4use std::collections::HashMap;
5
6#[derive(Debug, Serialize)]
7pub struct ModelExplanation {
8    pub feature_importance: HashMap<String, f64>,
9    pub decision_paths: Vec<DecisionPath>,
10    pub performance_metrics: ExplainedMetrics,
11    pub security_impact: SecurityImpactAnalysis,
12}
13
14#[derive(Debug, Serialize)]
15pub struct DecisionPath {
16    pub path: Vec<DecisionNode>,
17    pub confidence: f64,
18    pub impact: f64,
19}
20
21#[derive(Debug, Serialize)]
22pub struct DecisionNode {
23    pub feature: String,
24    pub threshold: f64,
25    pub direction: String,
26}
27
28#[derive(Debug, Serialize)]
29pub struct ExplainedMetrics {
30    pub metrics: ModelMetrics,
31    pub explanations: HashMap<String, String>,
32    pub recommendations: Vec<String>,
33}
34
35#[derive(Debug, Clone, Serialize)]
36pub struct SecurityImpactAnalysis {
37    pub false_positive_impact: f64,
38    pub risk_factors: Vec<RiskFactor>,
39}
40
41impl SecurityImpactAnalysis {
42    pub fn analyze_security_impact(&self, metrics: &ModelMetrics) -> Self {
43        Self {
44            false_positive_impact: 1.0 - metrics.precision,
45            risk_factors: self.analyze_risk_factors(metrics),
46        }
47    }
48
49    fn analyze_risk_factors(&self, metrics: &ModelMetrics) -> Vec<RiskFactor> {
50        let mut factors = Vec::new();
51        
52        if metrics.precision < 0.9 {
53            factors.push(RiskFactor {
54                name: "High False Positive Rate".to_string(),
55                impact_score: 1.0 - metrics.precision,
56            });
57        }
58        
59        factors
60    }
61}
62
63#[derive(Clone, Debug, Serialize)]
64pub struct RiskFactor {
65    pub name: String,
66    pub impact_score: f64,
67}
68
69pub struct ModelExplainer {
70    feature_names: Vec<String>,
71    #[allow(dead_code)]
72    security_config: SecurityConfig,
73}
74
75#[derive(Clone)]
76pub struct SecurityConfig {
77    pub false_positive_weight: f64,
78    pub false_negative_weight: f64,
79    pub risk_threshold: f64,
80}
81
82impl ModelExplainer {
83    pub fn new(feature_names: Vec<String>, security_config: SecurityConfig) -> Self {
84        Self {
85            feature_names,
86            security_config,
87        }
88    }
89
90    pub fn explain_model(&self, params: &ModelParameters, metrics: &ModelMetrics) -> ModelExplanation {
91        ModelExplanation {
92            feature_importance: self.calculate_feature_importance(params),
93            decision_paths: self.analyze_decision_paths(),
94            performance_metrics: self.explain_metrics(metrics),
95            security_impact: self.analyze_security_impact(metrics),
96        }
97    }
98
99    fn calculate_feature_importance(&self, params: &ModelParameters) -> HashMap<String, f64> {
100        let mut importance = HashMap::new();
101        
102        // Feature önem skorlarını hesapla
103        for (idx, feature) in self.feature_names.iter().enumerate() {
104            let score = self.calculate_feature_score(idx, params);
105            importance.insert(feature.clone(), score);
106        }
107
108        // Skorları normalize et
109        let total: f64 = importance.values().sum();
110        for score in importance.values_mut() {
111            *score /= total;
112        }
113
114        importance
115    }
116
117    fn calculate_feature_score(&self, feature_idx: usize, params: &ModelParameters) -> f64 {
118        // Feature önem skoru hesaplama mantığı
119        let base_score = 1.0 / self.feature_names.len() as f64;
120        let depth_factor = (-((params.max_depth as f64 - 10.0).powi(2)) / 100.0).exp();
121        
122        base_score * depth_factor * (1.0 + (feature_idx as f64 / 10.0))
123    }
124
125    fn analyze_decision_paths(&self) -> Vec<DecisionPath> {
126        // Örnek karar yolları analizi
127        vec![
128            DecisionPath {
129                path: vec![
130                    DecisionNode {
131                        feature: "time_of_day".to_string(),
132                        threshold: 18.0,
133                        direction: "greater_than".to_string(),
134                    },
135                    DecisionNode {
136                        feature: "failed_attempts".to_string(),
137                        threshold: 3.0,
138                        direction: "less_than".to_string(),
139                    },
140                ],
141                confidence: 0.85,
142                impact: 0.7,
143            }
144        ]
145    }
146
147    fn explain_metrics(&self, metrics: &ModelMetrics) -> ExplainedMetrics {
148        let mut explanations = HashMap::new();
149        let mut recommendations = Vec::new();
150
151        // Metrikleri açıkla
152        explanations.insert(
153            "f1_score".to_string(),
154            format!("F1 score of {:.2} indicates balanced performance", metrics.f1_score)
155        );
156
157        // Öneriler oluştur
158        if metrics.f1_score < 0.8 {
159            recommendations.push("Consider increasing training data diversity".to_string());
160        }
161        if metrics.precision < metrics.recall {
162            recommendations.push("Model might be too aggressive, consider adjusting threshold".to_string());
163        }
164
165        ExplainedMetrics {
166            metrics: metrics.clone(),
167            explanations,
168            recommendations,
169        }
170    }
171
172    fn analyze_security_impact(&self, metrics: &ModelMetrics) -> SecurityImpactAnalysis {
173        SecurityImpactAnalysis {
174            false_positive_impact: 1.0 - metrics.precision,
175            risk_factors: vec![], // Boş risk faktörleri ile başla
176        }
177    }
178}