spawn_access_control/
model_explainer.rsuse crate::model_optimizer::ModelParameters;
use crate::ml_metrics::ModelMetrics;
use serde::Serialize;
use std::collections::HashMap;
#[derive(Debug, Serialize)]
pub struct ModelExplanation {
pub feature_importance: HashMap<String, f64>,
pub decision_paths: Vec<DecisionPath>,
pub performance_metrics: ExplainedMetrics,
pub security_impact: SecurityImpactAnalysis,
}
#[derive(Debug, Serialize)]
pub struct DecisionPath {
pub path: Vec<DecisionNode>,
pub confidence: f64,
pub impact: f64,
}
#[derive(Debug, Serialize)]
pub struct DecisionNode {
pub feature: String,
pub threshold: f64,
pub direction: String,
}
#[derive(Debug, Serialize)]
pub struct ExplainedMetrics {
pub metrics: ModelMetrics,
pub explanations: HashMap<String, String>,
pub recommendations: Vec<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct SecurityImpactAnalysis {
pub false_positive_impact: f64,
pub risk_factors: Vec<RiskFactor>,
}
impl SecurityImpactAnalysis {
pub fn analyze_security_impact(&self, metrics: &ModelMetrics) -> Self {
Self {
false_positive_impact: 1.0 - metrics.precision,
risk_factors: self.analyze_risk_factors(metrics),
}
}
fn analyze_risk_factors(&self, metrics: &ModelMetrics) -> Vec<RiskFactor> {
let mut factors = Vec::new();
if metrics.precision < 0.9 {
factors.push(RiskFactor {
name: "High False Positive Rate".to_string(),
impact_score: 1.0 - metrics.precision,
});
}
factors
}
}
#[derive(Clone, Debug, Serialize)]
pub struct RiskFactor {
pub name: String,
pub impact_score: f64,
}
pub struct ModelExplainer {
feature_names: Vec<String>,
#[allow(dead_code)]
security_config: SecurityConfig,
}
#[derive(Clone)]
pub struct SecurityConfig {
pub false_positive_weight: f64,
pub false_negative_weight: f64,
pub risk_threshold: f64,
}
impl ModelExplainer {
pub fn new(feature_names: Vec<String>, security_config: SecurityConfig) -> Self {
Self {
feature_names,
security_config,
}
}
pub fn explain_model(&self, params: &ModelParameters, metrics: &ModelMetrics) -> ModelExplanation {
ModelExplanation {
feature_importance: self.calculate_feature_importance(params),
decision_paths: self.analyze_decision_paths(),
performance_metrics: self.explain_metrics(metrics),
security_impact: self.analyze_security_impact(metrics),
}
}
fn calculate_feature_importance(&self, params: &ModelParameters) -> HashMap<String, f64> {
let mut importance = HashMap::new();
for (idx, feature) in self.feature_names.iter().enumerate() {
let score = self.calculate_feature_score(idx, params);
importance.insert(feature.clone(), score);
}
let total: f64 = importance.values().sum();
for score in importance.values_mut() {
*score /= total;
}
importance
}
fn calculate_feature_score(&self, feature_idx: usize, params: &ModelParameters) -> f64 {
let base_score = 1.0 / self.feature_names.len() as f64;
let depth_factor = (-((params.max_depth as f64 - 10.0).powi(2)) / 100.0).exp();
base_score * depth_factor * (1.0 + (feature_idx as f64 / 10.0))
}
fn analyze_decision_paths(&self) -> Vec<DecisionPath> {
vec![
DecisionPath {
path: vec![
DecisionNode {
feature: "time_of_day".to_string(),
threshold: 18.0,
direction: "greater_than".to_string(),
},
DecisionNode {
feature: "failed_attempts".to_string(),
threshold: 3.0,
direction: "less_than".to_string(),
},
],
confidence: 0.85,
impact: 0.7,
}
]
}
fn explain_metrics(&self, metrics: &ModelMetrics) -> ExplainedMetrics {
let mut explanations = HashMap::new();
let mut recommendations = Vec::new();
explanations.insert(
"f1_score".to_string(),
format!("F1 score of {:.2} indicates balanced performance", metrics.f1_score)
);
if metrics.f1_score < 0.8 {
recommendations.push("Consider increasing training data diversity".to_string());
}
if metrics.precision < metrics.recall {
recommendations.push("Model might be too aggressive, consider adjusting threshold".to_string());
}
ExplainedMetrics {
metrics: metrics.clone(),
explanations,
recommendations,
}
}
fn analyze_security_impact(&self, metrics: &ModelMetrics) -> SecurityImpactAnalysis {
SecurityImpactAnalysis {
false_positive_impact: 1.0 - metrics.precision,
risk_factors: vec![], }
}
}