trustformers_debug/
behavior_analysis.rs

1//! Behavior Analysis
2//!
3//! Advanced analysis tools for understanding neural network behavior including
4//! input sensitivity, feature importance, and neuron activation patterns.
5
6use anyhow::Result;
7use serde::{Deserialize, Serialize};
8use std::collections::{HashMap, HashSet};
9
10/// Configuration for behavior analysis
11#[derive(Debug, Clone, Serialize, Deserialize)]
12pub struct BehaviorAnalysisConfig {
13    /// Enable input sensitivity analysis
14    pub enable_input_sensitivity: bool,
15    /// Enable feature importance calculations
16    pub enable_feature_importance: bool,
17    /// Enable neuron activation pattern analysis
18    pub enable_activation_patterns: bool,
19    /// Enable dead neuron detection
20    pub enable_dead_neuron_detection: bool,
21    /// Enable correlation analysis
22    pub enable_correlation_analysis: bool,
23    /// Threshold for dead neuron detection (activation below this value)
24    pub dead_neuron_threshold: f32,
25    /// Number of samples for sensitivity analysis
26    pub sensitivity_samples: usize,
27    /// Perturbation magnitude for sensitivity analysis
28    pub perturbation_magnitude: f32,
29    /// Correlation threshold for significance
30    pub correlation_threshold: f32,
31}
32
33impl Default for BehaviorAnalysisConfig {
34    fn default() -> Self {
35        Self {
36            enable_input_sensitivity: true,
37            enable_feature_importance: true,
38            enable_activation_patterns: true,
39            enable_dead_neuron_detection: true,
40            enable_correlation_analysis: true,
41            dead_neuron_threshold: 1e-6,
42            sensitivity_samples: 100,
43            perturbation_magnitude: 0.01,
44            correlation_threshold: 0.5,
45        }
46    }
47}
48
49/// Input sensitivity analysis results
50#[derive(Debug, Clone, Serialize, Deserialize)]
51pub struct InputSensitivity {
52    pub input_dimension: usize,
53    pub sensitivity_score: f32,
54    pub gradient_magnitude: f32,
55    pub perturbation_impact: f32,
56    pub rank: usize,
57}
58
59/// Feature importance analysis results
60#[derive(Debug, Clone, Serialize, Deserialize)]
61pub struct FeatureImportance {
62    pub feature_id: String,
63    pub importance_score: f32,
64    pub attribution_method: AttributionMethod,
65    pub confidence: f32,
66    pub rank: usize,
67}
68
69#[derive(Debug, Clone, Serialize, Deserialize)]
70pub enum AttributionMethod {
71    GradientBased,
72    PermutationImportance,
73    ShapleySampling,
74    IntegratedGradients,
75    LimeApproximation,
76}
77
78/// Neuron activation pattern information
79#[derive(Debug, Clone, Serialize, Deserialize)]
80pub struct NeuronActivationPattern {
81    pub layer_id: String,
82    pub neuron_id: usize,
83    pub activation_statistics: ActivationStatistics,
84    pub pattern_type: ActivationPatternType,
85    pub stability_score: f32,
86    pub selectivity_score: f32,
87}
88
89#[derive(Debug, Clone, Serialize, Deserialize)]
90pub struct ActivationStatistics {
91    pub mean: f32,
92    pub std: f32,
93    pub min: f32,
94    pub max: f32,
95    pub percentile_25: f32,
96    pub percentile_75: f32,
97    pub skewness: f32,
98    pub kurtosis: f32,
99    pub sparsity: f32, // Fraction of near-zero activations
100}
101
102#[derive(Debug, Clone, Serialize, Deserialize)]
103pub enum ActivationPatternType {
104    Normal,
105    Saturated,
106    Dead,
107    Oscillating,
108    Sparse,
109    Dense,
110    Bipolar,
111}
112
113/// Dead neuron detection results
114#[derive(Debug, Clone, Serialize, Deserialize)]
115pub struct DeadNeuronInfo {
116    pub layer_id: String,
117    pub neuron_id: usize,
118    pub activation_level: f32,
119    pub dead_probability: f32,
120    pub suggested_action: NeuronRepairAction,
121}
122
123#[derive(Debug, Clone, Serialize, Deserialize)]
124pub enum NeuronRepairAction {
125    Reinitialize,
126    AdjustLearningRate,
127    ChangeActivationFunction,
128    AddNoise,
129    Skip, // Neuron is functioning normally
130}
131
132/// Correlation analysis results
133#[derive(Debug, Clone, Serialize, Deserialize)]
134pub struct CorrelationAnalysis {
135    pub correlation_matrix: Vec<Vec<f32>>,
136    pub significant_correlations: Vec<CorrelationPair>,
137    pub redundant_features: Vec<FeatureGroup>,
138    pub independent_features: Vec<usize>,
139}
140
141#[derive(Debug, Clone, Serialize, Deserialize)]
142pub struct CorrelationPair {
143    pub feature_a: usize,
144    pub feature_b: usize,
145    pub correlation: f32,
146    pub p_value: f32,
147    pub relationship_type: CorrelationType,
148}
149
150#[derive(Debug, Clone, Serialize, Deserialize)]
151pub enum CorrelationType {
152    Strong,
153    Moderate,
154    Weak,
155    None,
156}
157
158#[derive(Debug, Clone, Serialize, Deserialize)]
159pub struct FeatureGroup {
160    pub features: Vec<usize>,
161    pub average_correlation: f32,
162    pub group_importance: f32,
163}
164
165/// Comprehensive behavior analysis report
166#[derive(Debug, Clone, Serialize, Deserialize)]
167pub struct BehaviorAnalysisReport {
168    pub input_sensitivities: Vec<InputSensitivity>,
169    pub feature_importances: Vec<FeatureImportance>,
170    pub activation_patterns: Vec<NeuronActivationPattern>,
171    pub dead_neurons: Vec<DeadNeuronInfo>,
172    pub correlation_analysis: Option<CorrelationAnalysis>,
173    pub behavior_summary: BehaviorSummary,
174    pub recommendations: Vec<BehaviorRecommendation>,
175}
176
177#[derive(Debug, Clone, Serialize, Deserialize)]
178pub struct BehaviorSummary {
179    pub total_neurons_analyzed: usize,
180    pub dead_neuron_percentage: f32,
181    pub average_activation_sparsity: f32,
182    pub feature_distribution_entropy: f32,
183    pub model_stability_score: f32,
184    pub interpretability_score: f32,
185}
186
187#[derive(Debug, Clone, Serialize, Deserialize)]
188pub struct BehaviorRecommendation {
189    pub category: RecommendationCategory,
190    pub priority: Priority,
191    pub description: String,
192    pub implementation: String,
193    pub expected_impact: f32,
194}
195
196#[derive(Debug, Clone, Serialize, Deserialize)]
197pub enum RecommendationCategory {
198    Architecture,
199    Training,
200    Initialization,
201    Regularization,
202    DataPreprocessing,
203}
204
205#[derive(Debug, Clone, Serialize, Deserialize)]
206pub enum Priority {
207    Critical,
208    High,
209    Medium,
210    Low,
211}
212
213/// Behavior analyzer
214#[derive(Debug)]
215pub struct BehaviorAnalyzer {
216    config: BehaviorAnalysisConfig,
217    activation_history: HashMap<String, Vec<Vec<f32>>>,
218    input_gradients: HashMap<String, Vec<f32>>,
219    feature_attributions: HashMap<String, FeatureImportance>,
220    analysis_cache: HashMap<String, BehaviorAnalysisReport>,
221}
222
223impl BehaviorAnalyzer {
224    /// Create a new behavior analyzer
225    pub fn new(config: BehaviorAnalysisConfig) -> Self {
226        Self {
227            config,
228            activation_history: HashMap::new(),
229            input_gradients: HashMap::new(),
230            feature_attributions: HashMap::new(),
231            analysis_cache: HashMap::new(),
232        }
233    }
234
235    /// Record neuron activations for analysis
236    pub fn record_activations(&mut self, layer_id: String, activations: Vec<f32>) {
237        self.activation_history
238            .entry(layer_id)
239            .or_insert_with(Vec::new)
240            .push(activations);
241    }
242
243    /// Record input gradients for sensitivity analysis
244    pub fn record_input_gradients(&mut self, input_id: String, gradients: Vec<f32>) {
245        self.input_gradients.insert(input_id, gradients);
246    }
247
248    /// Perform comprehensive behavior analysis
249    pub async fn analyze(&mut self) -> Result<BehaviorAnalysisReport> {
250        let mut report = BehaviorAnalysisReport {
251            input_sensitivities: Vec::new(),
252            feature_importances: Vec::new(),
253            activation_patterns: Vec::new(),
254            dead_neurons: Vec::new(),
255            correlation_analysis: None,
256            behavior_summary: BehaviorSummary {
257                total_neurons_analyzed: 0,
258                dead_neuron_percentage: 0.0,
259                average_activation_sparsity: 0.0,
260                feature_distribution_entropy: 0.0,
261                model_stability_score: 0.0,
262                interpretability_score: 0.0,
263            },
264            recommendations: Vec::new(),
265        };
266
267        if self.config.enable_input_sensitivity {
268            report.input_sensitivities = self.analyze_input_sensitivity().await?;
269        }
270
271        if self.config.enable_feature_importance {
272            report.feature_importances = self.calculate_feature_importance().await?;
273        }
274
275        if self.config.enable_activation_patterns {
276            report.activation_patterns = self.analyze_activation_patterns().await?;
277        }
278
279        if self.config.enable_dead_neuron_detection {
280            report.dead_neurons = self.detect_dead_neurons().await?;
281        }
282
283        if self.config.enable_correlation_analysis {
284            report.correlation_analysis = Some(self.perform_correlation_analysis().await?);
285        }
286
287        self.generate_behavior_summary(&mut report);
288        self.generate_recommendations(&mut report);
289
290        Ok(report)
291    }
292
293    /// Analyze input sensitivity using gradient-based methods
294    async fn analyze_input_sensitivity(&self) -> Result<Vec<InputSensitivity>> {
295        let mut sensitivities = Vec::new();
296
297        for (_input_id, gradients) in &self.input_gradients {
298            for (dim, &gradient) in gradients.iter().enumerate() {
299                let sensitivity_score = gradient.abs();
300                let gradient_magnitude = gradient.abs();
301
302                // Simulate perturbation impact (would normally require model re-evaluation)
303                let perturbation_impact = self.estimate_perturbation_impact(gradient, dim);
304
305                sensitivities.push(InputSensitivity {
306                    input_dimension: dim,
307                    sensitivity_score,
308                    gradient_magnitude,
309                    perturbation_impact,
310                    rank: 0, // Will be set after sorting
311                });
312            }
313        }
314
315        // Sort by sensitivity score and assign ranks
316        sensitivities
317            .sort_by(|a, b| b.sensitivity_score.partial_cmp(&a.sensitivity_score).unwrap());
318        for (rank, sensitivity) in sensitivities.iter_mut().enumerate() {
319            sensitivity.rank = rank + 1;
320        }
321
322        Ok(sensitivities)
323    }
324
325    /// Estimate perturbation impact (simplified version)
326    fn estimate_perturbation_impact(&self, gradient: f32, _dimension: usize) -> f32 {
327        // Simplified estimation: perturbation impact is proportional to gradient magnitude
328        gradient.abs() * self.config.perturbation_magnitude
329    }
330
331    /// Calculate feature importance using multiple methods
332    async fn calculate_feature_importance(&self) -> Result<Vec<FeatureImportance>> {
333        let mut importances = Vec::new();
334
335        // Gradient-based importance
336        for (input_id, gradients) in &self.input_gradients {
337            let total_gradient = gradients.iter().map(|g| g.abs()).sum::<f32>();
338            let importance_score = total_gradient / gradients.len() as f32;
339
340            importances.push(FeatureImportance {
341                feature_id: input_id.clone(),
342                importance_score,
343                attribution_method: AttributionMethod::GradientBased,
344                confidence: self.calculate_attribution_confidence(importance_score),
345                rank: 0,
346            });
347        }
348
349        // Sort by importance and assign ranks
350        importances.sort_by(|a, b| b.importance_score.partial_cmp(&a.importance_score).unwrap());
351        for (rank, importance) in importances.iter_mut().enumerate() {
352            importance.rank = rank + 1;
353        }
354
355        Ok(importances)
356    }
357
358    /// Calculate confidence in attribution score
359    fn calculate_attribution_confidence(&self, score: f32) -> f32 {
360        // Simple confidence based on score magnitude
361        (score.tanh() * 0.5 + 0.5).min(1.0)
362    }
363
364    /// Analyze neuron activation patterns
365    async fn analyze_activation_patterns(&self) -> Result<Vec<NeuronActivationPattern>> {
366        let mut patterns = Vec::new();
367
368        for (layer_id, activation_history) in &self.activation_history {
369            if activation_history.is_empty() {
370                continue;
371            }
372
373            let neuron_count = activation_history[0].len();
374
375            for neuron_id in 0..neuron_count {
376                let neuron_activations: Vec<f32> = activation_history
377                    .iter()
378                    .map(|batch| batch.get(neuron_id).copied().unwrap_or(0.0))
379                    .collect();
380
381                let statistics = self.compute_activation_statistics(&neuron_activations);
382                let pattern_type = self.classify_activation_pattern(&statistics);
383                let stability_score = self.compute_stability_score(&neuron_activations);
384                let selectivity_score = self.compute_selectivity_score(&neuron_activations);
385
386                patterns.push(NeuronActivationPattern {
387                    layer_id: layer_id.clone(),
388                    neuron_id,
389                    activation_statistics: statistics,
390                    pattern_type,
391                    stability_score,
392                    selectivity_score,
393                });
394            }
395        }
396
397        Ok(patterns)
398    }
399
400    /// Compute detailed activation statistics
401    fn compute_activation_statistics(&self, activations: &[f32]) -> ActivationStatistics {
402        if activations.is_empty() {
403            return ActivationStatistics {
404                mean: 0.0,
405                std: 0.0,
406                min: 0.0,
407                max: 0.0,
408                percentile_25: 0.0,
409                percentile_75: 0.0,
410                skewness: 0.0,
411                kurtosis: 0.0,
412                sparsity: 1.0,
413            };
414        }
415
416        let mean = activations.iter().sum::<f32>() / activations.len() as f32;
417        let variance =
418            activations.iter().map(|&x| (x - mean).powi(2)).sum::<f32>() / activations.len() as f32;
419        let std = variance.sqrt();
420
421        let mut sorted_activations = activations.to_vec();
422        sorted_activations.sort_by(|a, b| a.partial_cmp(b).unwrap());
423
424        let min = sorted_activations[0];
425        let max = sorted_activations[sorted_activations.len() - 1];
426        let percentile_25 = sorted_activations[sorted_activations.len() / 4];
427        let percentile_75 = sorted_activations[3 * sorted_activations.len() / 4];
428
429        // Calculate skewness and kurtosis
430        let skewness = if std > 0.0 {
431            activations.iter().map(|&x| ((x - mean) / std).powi(3)).sum::<f32>()
432                / activations.len() as f32
433        } else {
434            0.0
435        };
436
437        let kurtosis = if std > 0.0 {
438            activations.iter().map(|&x| ((x - mean) / std).powi(4)).sum::<f32>()
439                / activations.len() as f32
440                - 3.0
441        } else {
442            0.0
443        };
444
445        // Calculate sparsity (fraction of near-zero activations)
446        let near_zero_count = activations
447            .iter()
448            .filter(|&&x| x.abs() < self.config.dead_neuron_threshold)
449            .count();
450        let sparsity = near_zero_count as f32 / activations.len() as f32;
451
452        ActivationStatistics {
453            mean,
454            std,
455            min,
456            max,
457            percentile_25,
458            percentile_75,
459            skewness,
460            kurtosis,
461            sparsity,
462        }
463    }
464
465    /// Classify activation pattern type
466    fn classify_activation_pattern(&self, stats: &ActivationStatistics) -> ActivationPatternType {
467        if stats.sparsity > 0.9 {
468            ActivationPatternType::Dead
469        } else if stats.sparsity > 0.7 {
470            ActivationPatternType::Sparse
471        } else if stats.max > 0.95 && stats.mean > 0.8 {
472            ActivationPatternType::Saturated
473        } else if stats.std / stats.mean.abs().max(1e-8) > 2.0 {
474            ActivationPatternType::Oscillating
475        } else if stats.mean.abs() > 0.1 && stats.mean * stats.min < 0.0 {
476            ActivationPatternType::Bipolar
477        } else if stats.sparsity < 0.3 {
478            ActivationPatternType::Dense
479        } else {
480            ActivationPatternType::Normal
481        }
482    }
483
484    /// Compute stability score for neuron activations
485    fn compute_stability_score(&self, activations: &[f32]) -> f32 {
486        if activations.len() < 2 {
487            return 0.0;
488        }
489
490        let mean = activations.iter().sum::<f32>() / activations.len() as f32;
491        let variance =
492            activations.iter().map(|&x| (x - mean).powi(2)).sum::<f32>() / activations.len() as f32;
493
494        // Stability is inverse of coefficient of variation
495        if mean.abs() > 1e-8 {
496            1.0 / (1.0 + variance.sqrt() / mean.abs())
497        } else {
498            0.0
499        }
500    }
501
502    /// Compute selectivity score (how selective the neuron is)
503    fn compute_selectivity_score(&self, activations: &[f32]) -> f32 {
504        if activations.is_empty() {
505            return 0.0;
506        }
507
508        // Selectivity based on activation distribution
509        let max_activation = activations.iter().fold(0.0f32, |a, &b| a.max(b.abs()));
510        let mean_activation =
511            activations.iter().map(|x| x.abs()).sum::<f32>() / activations.len() as f32;
512
513        if max_activation > 1e-8 {
514            1.0 - (mean_activation / max_activation)
515        } else {
516            0.0
517        }
518    }
519
520    /// Detect dead neurons
521    async fn detect_dead_neurons(&self) -> Result<Vec<DeadNeuronInfo>> {
522        let mut dead_neurons = Vec::new();
523
524        for (layer_id, activation_history) in &self.activation_history {
525            if activation_history.is_empty() {
526                continue;
527            }
528
529            let neuron_count = activation_history[0].len();
530
531            for neuron_id in 0..neuron_count {
532                let neuron_activations: Vec<f32> = activation_history
533                    .iter()
534                    .map(|batch| batch.get(neuron_id).copied().unwrap_or(0.0))
535                    .collect();
536
537                let activation_level = neuron_activations.iter().map(|x| x.abs()).sum::<f32>()
538                    / neuron_activations.len() as f32;
539
540                let dead_probability = if activation_level < self.config.dead_neuron_threshold {
541                    1.0 - (activation_level / self.config.dead_neuron_threshold)
542                } else {
543                    0.0
544                };
545
546                if dead_probability > 0.5 {
547                    let suggested_action =
548                        self.suggest_neuron_repair_action(activation_level, &neuron_activations);
549
550                    dead_neurons.push(DeadNeuronInfo {
551                        layer_id: layer_id.clone(),
552                        neuron_id,
553                        activation_level,
554                        dead_probability,
555                        suggested_action,
556                    });
557                }
558            }
559        }
560
561        Ok(dead_neurons)
562    }
563
564    /// Suggest repair action for dead neurons
565    fn suggest_neuron_repair_action(
566        &self,
567        activation_level: f32,
568        activations: &[f32],
569    ) -> NeuronRepairAction {
570        if activation_level < self.config.dead_neuron_threshold * 0.1 {
571            NeuronRepairAction::Reinitialize
572        } else if activation_level < self.config.dead_neuron_threshold * 0.5 {
573            let variance =
574                activations.iter().map(|&x| x.powi(2)).sum::<f32>() / activations.len() as f32;
575            if variance < 1e-10 {
576                NeuronRepairAction::AddNoise
577            } else {
578                NeuronRepairAction::AdjustLearningRate
579            }
580        } else {
581            NeuronRepairAction::ChangeActivationFunction
582        }
583    }
584
585    /// Perform correlation analysis
586    async fn perform_correlation_analysis(&self) -> Result<CorrelationAnalysis> {
587        // For simplification, we'll analyze correlations between input gradients
588        let gradient_vectors: Vec<&Vec<f32>> = self.input_gradients.values().collect();
589
590        if gradient_vectors.len() < 2 {
591            return Ok(CorrelationAnalysis {
592                correlation_matrix: Vec::new(),
593                significant_correlations: Vec::new(),
594                redundant_features: Vec::new(),
595                independent_features: Vec::new(),
596            });
597        }
598
599        let n = gradient_vectors.len();
600        let mut correlation_matrix = vec![vec![0.0; n]; n];
601        let mut significant_correlations = Vec::new();
602
603        // Compute correlation matrix
604        for i in 0..n {
605            for j in i..n {
606                let correlation =
607                    self.compute_correlation(&gradient_vectors[i], &gradient_vectors[j]);
608                correlation_matrix[i][j] = correlation;
609                correlation_matrix[j][i] = correlation;
610
611                if i != j && correlation.abs() > self.config.correlation_threshold {
612                    let correlation_type = if correlation.abs() > 0.8 {
613                        CorrelationType::Strong
614                    } else if correlation.abs() > 0.5 {
615                        CorrelationType::Moderate
616                    } else {
617                        CorrelationType::Weak
618                    };
619
620                    significant_correlations.push(CorrelationPair {
621                        feature_a: i,
622                        feature_b: j,
623                        correlation,
624                        p_value: 0.01, // Simplified p-value
625                        relationship_type: correlation_type,
626                    });
627                }
628            }
629        }
630
631        // Find redundant features (groups of highly correlated features)
632        let redundant_features = self.find_redundant_feature_groups(&correlation_matrix);
633
634        // Find independent features
635        let independent_features = self.find_independent_features(&correlation_matrix);
636
637        Ok(CorrelationAnalysis {
638            correlation_matrix,
639            significant_correlations,
640            redundant_features,
641            independent_features,
642        })
643    }
644
645    /// Compute Pearson correlation coefficient
646    fn compute_correlation(&self, x: &[f32], y: &[f32]) -> f32 {
647        if x.len() != y.len() || x.is_empty() {
648            return 0.0;
649        }
650
651        let n = x.len() as f32;
652        let mean_x = x.iter().sum::<f32>() / n;
653        let mean_y = y.iter().sum::<f32>() / n;
654
655        let numerator: f32 =
656            x.iter().zip(y.iter()).map(|(&xi, &yi)| (xi - mean_x) * (yi - mean_y)).sum();
657
658        let sum_sq_x: f32 = x.iter().map(|&xi| (xi - mean_x).powi(2)).sum();
659        let sum_sq_y: f32 = y.iter().map(|&yi| (yi - mean_y).powi(2)).sum();
660
661        let denominator = (sum_sq_x * sum_sq_y).sqrt();
662
663        if denominator > 1e-8 {
664            numerator / denominator
665        } else {
666            0.0
667        }
668    }
669
670    /// Find groups of redundant features
671    fn find_redundant_feature_groups(&self, correlation_matrix: &[Vec<f32>]) -> Vec<FeatureGroup> {
672        let mut groups = Vec::new();
673        let mut visited = HashSet::new();
674
675        for i in 0..correlation_matrix.len() {
676            if visited.contains(&i) {
677                continue;
678            }
679
680            let mut group = vec![i];
681            let mut group_correlations = Vec::new();
682
683            for j in (i + 1)..correlation_matrix.len() {
684                if correlation_matrix[i][j].abs() > 0.7 {
685                    group.push(j);
686                    group_correlations.push(correlation_matrix[i][j].abs());
687                    visited.insert(j);
688                }
689            }
690
691            if group.len() > 1 {
692                let average_correlation =
693                    group_correlations.iter().sum::<f32>() / group_correlations.len() as f32;
694                groups.push(FeatureGroup {
695                    features: group,
696                    average_correlation,
697                    group_importance: average_correlation, // Simplified importance
698                });
699            }
700
701            visited.insert(i);
702        }
703
704        groups
705    }
706
707    /// Find independent features
708    fn find_independent_features(&self, correlation_matrix: &[Vec<f32>]) -> Vec<usize> {
709        let mut independent = Vec::new();
710
711        for i in 0..correlation_matrix.len() {
712            let max_correlation = correlation_matrix[i]
713                .iter()
714                .enumerate()
715                .filter(|(j, _)| *j != i)
716                .map(|(_, &corr)| corr.abs())
717                .fold(0.0f32, |a, b| a.max(b));
718
719            if max_correlation < self.config.correlation_threshold {
720                independent.push(i);
721            }
722        }
723
724        independent
725    }
726
727    /// Generate behavior summary
728    fn generate_behavior_summary(&self, report: &mut BehaviorAnalysisReport) {
729        let total_neurons = report.activation_patterns.len();
730        let dead_neurons = report.dead_neurons.len();
731
732        report.behavior_summary.total_neurons_analyzed = total_neurons;
733        report.behavior_summary.dead_neuron_percentage = if total_neurons > 0 {
734            (dead_neurons as f32 / total_neurons as f32) * 100.0
735        } else {
736            0.0
737        };
738
739        if !report.activation_patterns.is_empty() {
740            report.behavior_summary.average_activation_sparsity = report
741                .activation_patterns
742                .iter()
743                .map(|p| p.activation_statistics.sparsity)
744                .sum::<f32>()
745                / report.activation_patterns.len() as f32;
746
747            report.behavior_summary.model_stability_score =
748                report.activation_patterns.iter().map(|p| p.stability_score).sum::<f32>()
749                    / report.activation_patterns.len() as f32;
750        }
751
752        // Simple entropy calculation for feature distribution
753        if !report.feature_importances.is_empty() {
754            let total_importance: f32 =
755                report.feature_importances.iter().map(|f| f.importance_score).sum();
756
757            if total_importance > 0.0 {
758                let entropy: f32 = report
759                    .feature_importances
760                    .iter()
761                    .map(|f| {
762                        let p = f.importance_score / total_importance;
763                        if p > 0.0 {
764                            -p * p.log2()
765                        } else {
766                            0.0
767                        }
768                    })
769                    .sum();
770                report.behavior_summary.feature_distribution_entropy = entropy;
771            }
772        }
773
774        // Overall interpretability score
775        report.behavior_summary.interpretability_score =
776            (report.behavior_summary.model_stability_score * 0.4
777                + (1.0 - report.behavior_summary.dead_neuron_percentage / 100.0) * 0.3
778                + (1.0 - report.behavior_summary.average_activation_sparsity) * 0.3)
779                .max(0.0)
780                .min(1.0);
781    }
782
783    /// Generate behavior recommendations
784    fn generate_recommendations(&self, report: &mut BehaviorAnalysisReport) {
785        // Dead neuron recommendations
786        if report.behavior_summary.dead_neuron_percentage > 20.0 {
787            report.recommendations.push(BehaviorRecommendation {
788                category: RecommendationCategory::Training,
789                priority: Priority::Critical,
790                description: format!("High percentage of dead neurons detected ({:.1}%)",
791                                   report.behavior_summary.dead_neuron_percentage),
792                implementation: "Consider reducing learning rate, changing initialization, or adding batch normalization".to_string(),
793                expected_impact: 0.8,
794            });
795        }
796
797        // Sparsity recommendations
798        if report.behavior_summary.average_activation_sparsity > 0.8 {
799            report.recommendations.push(BehaviorRecommendation {
800                category: RecommendationCategory::Architecture,
801                priority: Priority::High,
802                description: "Very sparse activations detected, model may be under-utilized".to_string(),
803                implementation: "Consider reducing model capacity or adjusting activation functions".to_string(),
804                expected_impact: 0.6,
805            });
806        }
807
808        // Stability recommendations
809        if report.behavior_summary.model_stability_score < 0.5 {
810            report.recommendations.push(BehaviorRecommendation {
811                category: RecommendationCategory::Training,
812                priority: Priority::High,
813                description: "Low model stability detected".to_string(),
814                implementation: "Consider adding regularization, reducing learning rate, or using gradient clipping".to_string(),
815                expected_impact: 0.7,
816            });
817        }
818
819        // Feature importance recommendations
820        if report.feature_importances.len() > 10 {
821            let top_features = &report.feature_importances[..5];
822            let bottom_features =
823                &report.feature_importances[report.feature_importances.len() - 5..];
824
825            let top_importance: f32 = top_features.iter().map(|f| f.importance_score).sum();
826            let bottom_importance: f32 = bottom_features.iter().map(|f| f.importance_score).sum();
827
828            if top_importance > bottom_importance * 10.0 {
829                report.recommendations.push(BehaviorRecommendation {
830                    category: RecommendationCategory::DataPreprocessing,
831                    priority: Priority::Medium,
832                    description: "Highly imbalanced feature importance detected".to_string(),
833                    implementation: "Consider feature selection or dimensionality reduction"
834                        .to_string(),
835                    expected_impact: 0.5,
836                });
837            }
838        }
839    }
840
841    /// Generate a comprehensive report
842    pub async fn generate_report(&self) -> Result<BehaviorAnalysisReport> {
843        let mut temp_analyzer = BehaviorAnalyzer {
844            config: self.config.clone(),
845            activation_history: self.activation_history.clone(),
846            input_gradients: self.input_gradients.clone(),
847            feature_attributions: self.feature_attributions.clone(),
848            analysis_cache: HashMap::new(),
849        };
850
851        temp_analyzer.analyze().await
852    }
853
854    /// Clear all recorded data
855    pub fn clear(&mut self) {
856        self.activation_history.clear();
857        self.input_gradients.clear();
858        self.feature_attributions.clear();
859        self.analysis_cache.clear();
860    }
861
862    /// Get summary of current analysis state
863    pub fn get_analysis_summary(&self) -> AnalysisSummary {
864        AnalysisSummary {
865            total_layers_tracked: self.activation_history.len(),
866            total_activation_samples: self
867                .activation_history
868                .values()
869                .map(|history| history.len())
870                .sum(),
871            total_inputs_tracked: self.input_gradients.len(),
872            analysis_coverage: if self.activation_history.is_empty() {
873                0.0
874            } else {
875                1.0 // Simplified coverage metric
876            },
877        }
878    }
879}
880
881/// Summary of analysis state
882#[derive(Debug, Clone, Serialize, Deserialize)]
883pub struct AnalysisSummary {
884    pub total_layers_tracked: usize,
885    pub total_activation_samples: usize,
886    pub total_inputs_tracked: usize,
887    pub analysis_coverage: f32,
888}