memscope_rs/quality/
analyzer.rs

1use std::collections::HashMap;
2use std::time::{Duration, Instant};
3
4/// Code quality analyzer for memory analysis operations
5pub struct CodeAnalyzer {
6    /// Quality metrics configuration
7    config: AnalyzerConfig,
8    /// Historical analysis data
9    history: AnalysisHistory,
10    /// Quality baselines
11    baselines: HashMap<String, QualityBaseline>,
12}
13
14/// Configuration for code analysis
15#[derive(Debug, Clone)]
16pub struct AnalyzerConfig {
17    /// Depth of analysis to perform
18    pub analysis_depth: AnalysisDepth,
19    /// Whether to track quality trends
20    pub track_trends: bool,
21    /// Maximum time to spend on analysis
22    pub max_analysis_time: Duration,
23    /// Quality thresholds
24    pub thresholds: QualityThresholds,
25}
26
27/// Depth levels for code analysis
28#[derive(Debug, Clone, PartialEq)]
29pub enum AnalysisDepth {
30    /// Basic quality checks only
31    Surface,
32    /// Standard analysis depth
33    Standard,
34    /// Comprehensive deep analysis
35    Deep,
36    /// Exhaustive analysis (slow)
37    Exhaustive,
38}
39
40/// Quality thresholds for different metrics
41#[derive(Debug, Clone)]
42pub struct QualityThresholds {
43    /// Minimum acceptable code quality score
44    pub min_quality_score: f64,
45    /// Maximum acceptable complexity
46    pub max_complexity: u32,
47    /// Minimum test coverage percentage
48    pub min_coverage: f64,
49    /// Maximum acceptable technical debt
50    pub max_technical_debt: f64,
51}
52
53/// Historical analysis data
54#[derive(Debug)]
55struct AnalysisHistory {
56    /// Previous analysis results
57    results: Vec<AnalysisResult>,
58    /// Maximum history entries to keep
59    max_entries: usize,
60}
61
62/// Quality baseline for comparison
63#[derive(Debug, Clone)]
64pub struct QualityBaseline {
65    /// Component name
66    pub component: String,
67    /// Baseline quality score
68    pub quality_score: f64,
69    /// Baseline complexity
70    pub complexity: u32,
71    /// Baseline performance metrics
72    pub performance: BaselinePerformance,
73    /// When baseline was established
74    pub timestamp: Instant,
75}
76
77/// Baseline performance metrics
78#[derive(Debug, Clone)]
79pub struct BaselinePerformance {
80    /// Average execution time
81    pub avg_execution_time: Duration,
82    /// Memory usage per operation
83    pub memory_per_operation: usize,
84    /// Error rate percentage
85    pub error_rate: f64,
86}
87
88/// Comprehensive analysis report
89#[derive(Debug, Clone)]
90pub struct AnalysisReport {
91    /// Component being analyzed
92    pub component: String,
93    /// Overall quality assessment
94    pub quality_assessment: QualityAssessment,
95    /// Individual quality metrics
96    pub metrics: Vec<QualityMetric>,
97    /// Detected issues
98    pub issues: Vec<QualityIssue>,
99    /// Performance analysis
100    pub performance_analysis: PerformanceAnalysis,
101    /// Recommendations for improvement
102    pub recommendations: Vec<Recommendation>,
103    /// Trend analysis if available
104    pub trend_analysis: Option<TrendAnalysis>,
105    /// Analysis execution time
106    pub analysis_duration: Duration,
107}
108
109/// Overall quality assessment
110#[derive(Debug, Clone)]
111pub struct QualityAssessment {
112    /// Overall quality score (0.0 to 1.0)
113    pub overall_score: f64,
114    /// Quality grade
115    pub grade: QualityGrade,
116    /// Assessment confidence level
117    pub confidence: f64,
118    /// Key strengths
119    pub strengths: Vec<String>,
120    /// Key weaknesses
121    pub weaknesses: Vec<String>,
122}
123
124/// Quality grade classifications
125#[derive(Debug, Clone, PartialEq)]
126pub enum QualityGrade {
127    /// Excellent quality (90-100%)
128    A,
129    /// Good quality (80-89%)
130    B,
131    /// Acceptable quality (70-79%)
132    C,
133    /// Poor quality (60-69%)
134    D,
135    /// Failing quality (<60%)
136    F,
137}
138
139impl PartialOrd for QualityGrade {
140    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
141        Some(self.cmp(other))
142    }
143}
144
145impl Ord for QualityGrade {
146    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
147        let self_score = match self {
148            QualityGrade::A => 5,
149            QualityGrade::B => 4,
150            QualityGrade::C => 3,
151            QualityGrade::D => 2,
152            QualityGrade::F => 1,
153        };
154        let other_score = match other {
155            QualityGrade::A => 5,
156            QualityGrade::B => 4,
157            QualityGrade::C => 3,
158            QualityGrade::D => 2,
159            QualityGrade::F => 1,
160        };
161        self_score.cmp(&other_score)
162    }
163}
164
165impl Eq for QualityGrade {}
166
167/// Individual quality metric
168#[derive(Debug, Clone)]
169pub struct QualityMetric {
170    /// Metric name
171    pub name: String,
172    /// Metric category
173    pub category: MetricCategory,
174    /// Current value
175    pub value: f64,
176    /// Target value
177    pub target: f64,
178    /// Whether metric meets target
179    pub meets_target: bool,
180    /// Metric importance weight
181    pub weight: f64,
182    /// Trend direction
183    pub trend: TrendDirection,
184}
185
186/// Categories of quality metrics
187#[derive(Debug, Clone, PartialEq)]
188pub enum MetricCategory {
189    /// Performance metrics
190    Performance,
191    /// Reliability metrics
192    Reliability,
193    /// Maintainability metrics
194    Maintainability,
195    /// Security metrics
196    Security,
197    /// Efficiency metrics
198    Efficiency,
199}
200
201/// Quality issue detected
202#[derive(Debug, Clone)]
203pub struct QualityIssue {
204    /// Issue identifier
205    pub id: String,
206    /// Issue title
207    pub title: String,
208    /// Detailed description
209    pub description: String,
210    /// Issue severity
211    pub severity: IssueSeverity,
212    /// Issue category
213    pub category: IssueCategory,
214    /// Location in code
215    pub location: Option<String>,
216    /// Estimated fix effort
217    pub fix_effort: FixEffort,
218    /// Impact if not fixed
219    pub impact: ImpactLevel,
220}
221
222/// Issue severity levels
223#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
224pub enum IssueSeverity {
225    /// Minor issue
226    Minor,
227    /// Moderate issue
228    Moderate,
229    /// Major issue
230    Major,
231    /// Critical issue
232    Critical,
233    /// Blocker issue
234    Blocker,
235}
236
237/// Categories of quality issues
238#[derive(Debug, Clone, PartialEq)]
239pub enum IssueCategory {
240    /// Memory management issues
241    MemoryManagement,
242    /// Performance problems
243    Performance,
244    /// Thread safety issues
245    ThreadSafety,
246    /// Error handling problems
247    ErrorHandling,
248    /// Code style violations
249    CodeStyle,
250    /// Design issues
251    Design,
252}
253
254/// Estimated effort to fix issue
255#[derive(Debug, Clone, PartialEq)]
256pub enum FixEffort {
257    /// Quick fix (< 1 hour)
258    Trivial,
259    /// Easy fix (1-4 hours)
260    Easy,
261    /// Medium fix (4-16 hours)
262    Medium,
263    /// Hard fix (16-40 hours)
264    Hard,
265    /// Very hard fix (> 40 hours)
266    VeryHard,
267}
268
269/// Impact level if issue not fixed
270#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
271pub enum ImpactLevel {
272    /// Minimal impact
273    Minimal,
274    /// Low impact
275    Low,
276    /// Medium impact
277    Medium,
278    /// High impact
279    High,
280    /// Critical impact
281    Critical,
282}
283
284/// Performance analysis results
285#[derive(Debug, Clone)]
286pub struct PerformanceAnalysis {
287    /// Performance score (0.0 to 1.0)
288    pub score: f64,
289    /// Performance bottlenecks
290    pub bottlenecks: Vec<PerformanceBottleneck>,
291    /// Memory efficiency
292    pub memory_efficiency: f64,
293    /// CPU efficiency
294    pub cpu_efficiency: f64,
295    /// Scalability assessment
296    pub scalability: ScalabilityAssessment,
297}
298
299/// Performance bottleneck information
300#[derive(Debug, Clone)]
301pub struct PerformanceBottleneck {
302    /// Bottleneck location
303    pub location: String,
304    /// Type of bottleneck
305    pub bottleneck_type: BottleneckType,
306    /// Severity of bottleneck
307    pub severity: f64,
308    /// Description
309    pub description: String,
310    /// Suggested optimization
311    pub optimization: String,
312}
313
314/// Types of performance bottlenecks
315#[derive(Debug, Clone, PartialEq)]
316pub enum BottleneckType {
317    /// CPU intensive operation
318    CpuBound,
319    /// Memory allocation bottleneck
320    MemoryBound,
321    /// I/O bottleneck
322    IoBound,
323    /// Lock contention
324    LockContention,
325    /// Cache misses
326    CacheMiss,
327    /// Algorithm inefficiency
328    AlgorithmInefficiency,
329}
330
331/// Scalability assessment
332#[derive(Debug, Clone)]
333pub struct ScalabilityAssessment {
334    /// Scalability score (0.0 to 1.0)
335    pub score: f64,
336    /// Expected scaling behavior
337    pub scaling_behavior: ScalingBehavior,
338    /// Resource scaling factors
339    pub resource_scaling: ResourceScaling,
340    /// Scalability limitations
341    pub limitations: Vec<String>,
342}
343
344/// Expected scaling behavior
345#[derive(Debug, Clone, PartialEq)]
346pub enum ScalingBehavior {
347    /// Constant time complexity
348    Constant,
349    /// Linear scaling
350    Linear,
351    /// Logarithmic scaling
352    Logarithmic,
353    /// Quadratic scaling
354    Quadratic,
355    /// Exponential scaling (bad)
356    Exponential,
357}
358
359/// Resource scaling characteristics
360#[derive(Debug, Clone)]
361pub struct ResourceScaling {
362    /// Memory scaling factor
363    pub memory_factor: f64,
364    /// CPU scaling factor
365    pub cpu_factor: f64,
366    /// Network scaling factor
367    pub network_factor: f64,
368}
369
370/// Improvement recommendation
371#[derive(Debug, Clone)]
372pub struct Recommendation {
373    /// Recommendation title
374    pub title: String,
375    /// Detailed description
376    pub description: String,
377    /// Priority level
378    pub priority: RecommendationPriority,
379    /// Expected impact
380    pub impact: ImpactLevel,
381    /// Implementation effort
382    pub effort: FixEffort,
383    /// Related quality issues
384    pub related_issues: Vec<String>,
385}
386
387/// Priority levels for recommendations
388#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
389pub enum RecommendationPriority {
390    /// Low priority
391    Low,
392    /// Medium priority
393    Medium,
394    /// High priority
395    High,
396    /// Critical priority
397    Critical,
398}
399
400/// Trend analysis over time
401#[derive(Debug, Clone)]
402pub struct TrendAnalysis {
403    /// Quality trend direction
404    pub quality_trend: TrendDirection,
405    /// Performance trend direction
406    pub performance_trend: TrendDirection,
407    /// Complexity trend direction
408    pub complexity_trend: TrendDirection,
409    /// Trend confidence level
410    pub confidence: f64,
411    /// Analysis time period
412    pub time_period: Duration,
413}
414
415/// Trend direction indicators
416#[derive(Debug, Clone, PartialEq)]
417pub enum TrendDirection {
418    /// Improving trend
419    Improving,
420    /// Stable trend
421    Stable,
422    /// Declining trend
423    Declining,
424    /// Unknown trend
425    Unknown,
426}
427
428/// Analysis result for historical tracking
429#[derive(Debug, Clone)]
430struct AnalysisResult {
431    /// Component analyzed
432    component: String,
433    /// Quality score achieved
434    quality_score: f64,
435    /// Analysis timestamp
436    #[allow(dead_code)]
437    timestamp: Instant,
438    /// Key metrics
439    #[allow(dead_code)]
440    metrics: HashMap<String, f64>,
441}
442
443impl CodeAnalyzer {
444    /// Create new code analyzer
445    pub fn new() -> Self {
446        Self {
447            config: AnalyzerConfig::default(),
448            history: AnalysisHistory {
449                results: Vec::new(),
450                max_entries: 100,
451            },
452            baselines: HashMap::new(),
453        }
454    }
455
456    /// Create analyzer with custom configuration
457    pub fn with_config(config: AnalyzerConfig) -> Self {
458        Self {
459            config,
460            history: AnalysisHistory {
461                results: Vec::new(),
462                max_entries: 100,
463            },
464            baselines: HashMap::new(),
465        }
466    }
467
468    /// Set quality baseline for component
469    pub fn set_baseline(&mut self, component: &str, baseline: QualityBaseline) {
470        self.baselines.insert(component.to_string(), baseline);
471    }
472
473    /// Analyze code quality for component
474    pub fn analyze_quality(
475        &mut self,
476        component: &str,
477        context: &AnalysisContext,
478    ) -> AnalysisReport {
479        let start_time = Instant::now();
480
481        // Perform quality analysis
482        let metrics = self.calculate_quality_metrics(context);
483        let issues = self.detect_quality_issues(context);
484        let performance_analysis = self.analyze_performance(context);
485        let quality_assessment = self.assess_overall_quality(&metrics, &issues);
486        let recommendations = self.generate_recommendations(&issues, &performance_analysis);
487        let trend_analysis = if self.config.track_trends {
488            Some(self.analyze_trends(component))
489        } else {
490            None
491        };
492
493        let analysis_duration = start_time.elapsed();
494
495        // Store result in history
496        self.store_analysis_result(component, &quality_assessment, &metrics);
497
498        AnalysisReport {
499            component: component.to_string(),
500            quality_assessment,
501            metrics,
502            issues,
503            performance_analysis,
504            recommendations,
505            trend_analysis,
506            analysis_duration,
507        }
508    }
509
510    fn calculate_quality_metrics(&self, context: &AnalysisContext) -> Vec<QualityMetric> {
511        vec![
512            // Performance metrics
513            QualityMetric {
514                name: "allocation_efficiency".to_string(),
515                category: MetricCategory::Performance,
516                value: context.performance_data.allocation_efficiency,
517                target: 0.95,
518                meets_target: context.performance_data.allocation_efficiency >= 0.95,
519                weight: 0.3,
520                trend: TrendDirection::Unknown,
521            },
522            // Reliability metrics
523            QualityMetric {
524                name: "error_rate".to_string(),
525                category: MetricCategory::Reliability,
526                value: context.reliability_data.error_rate,
527                target: 0.01, // 1% max error rate
528                meets_target: context.reliability_data.error_rate <= 0.01,
529                weight: 0.25,
530                trend: TrendDirection::Unknown,
531            },
532            // Memory efficiency
533            QualityMetric {
534                name: "memory_efficiency".to_string(),
535                category: MetricCategory::Efficiency,
536                value: context.memory_data.efficiency_ratio,
537                target: 0.9,
538                meets_target: context.memory_data.efficiency_ratio >= 0.9,
539                weight: 0.2,
540                trend: TrendDirection::Unknown,
541            },
542        ]
543    }
544
545    fn detect_quality_issues(&self, context: &AnalysisContext) -> Vec<QualityIssue> {
546        let mut issues = Vec::new();
547
548        // Check for memory leaks
549        if context.memory_data.growth_rate > 1024.0 * 1024.0 {
550            // 1MB/sec
551            issues.push(QualityIssue {
552                id: "memory_leak_detected".to_string(),
553                title: "Potential Memory Leak".to_string(),
554                description: format!(
555                    "High memory growth rate: {:.2}MB/sec",
556                    context.memory_data.growth_rate / (1024.0 * 1024.0)
557                ),
558                severity: IssueSeverity::Major,
559                category: IssueCategory::MemoryManagement,
560                location: Some("memory_tracking".to_string()),
561                fix_effort: FixEffort::Medium,
562                impact: ImpactLevel::High,
563            });
564        }
565
566        // Check for performance issues
567        if context.performance_data.avg_latency > Duration::from_micros(100) {
568            issues.push(QualityIssue {
569                id: "high_latency".to_string(),
570                title: "High Operation Latency".to_string(),
571                description: format!(
572                    "Average latency {:.2}µs exceeds threshold",
573                    context.performance_data.avg_latency.as_micros()
574                ),
575                severity: IssueSeverity::Moderate,
576                category: IssueCategory::Performance,
577                location: Some("allocation_tracking".to_string()),
578                fix_effort: FixEffort::Easy,
579                impact: ImpactLevel::Medium,
580            });
581        }
582
583        issues
584    }
585
586    fn analyze_performance(&self, context: &AnalysisContext) -> PerformanceAnalysis {
587        let bottlenecks = self.identify_bottlenecks(context);
588        let memory_efficiency = context.memory_data.efficiency_ratio;
589        let cpu_efficiency = 1.0 - (context.performance_data.cpu_usage / 100.0);
590
591        let scalability = ScalabilityAssessment {
592            score: 0.8, // Placeholder calculation
593            scaling_behavior: ScalingBehavior::Linear,
594            resource_scaling: ResourceScaling {
595                memory_factor: 1.2,
596                cpu_factor: 1.1,
597                network_factor: 1.0,
598            },
599            limitations: vec!["Memory bandwidth may become bottleneck at scale".to_string()],
600        };
601
602        let score = (memory_efficiency + cpu_efficiency + scalability.score) / 3.0;
603
604        PerformanceAnalysis {
605            score,
606            bottlenecks,
607            memory_efficiency,
608            cpu_efficiency,
609            scalability,
610        }
611    }
612
613    fn identify_bottlenecks(&self, context: &AnalysisContext) -> Vec<PerformanceBottleneck> {
614        let mut bottlenecks = Vec::new();
615
616        if context.performance_data.cpu_usage > 80.0 {
617            bottlenecks.push(PerformanceBottleneck {
618                location: "allocation_tracking".to_string(),
619                bottleneck_type: BottleneckType::CpuBound,
620                severity: context.performance_data.cpu_usage / 100.0,
621                description: "High CPU usage in allocation tracking".to_string(),
622                optimization: "Consider optimizing hot paths or using faster data structures"
623                    .to_string(),
624            });
625        }
626
627        if context.memory_data.fragmentation_ratio > 0.3 {
628            bottlenecks.push(PerformanceBottleneck {
629                location: "memory_management".to_string(),
630                bottleneck_type: BottleneckType::MemoryBound,
631                severity: context.memory_data.fragmentation_ratio,
632                description: "High memory fragmentation".to_string(),
633                optimization: "Implement memory compaction or use memory pools".to_string(),
634            });
635        }
636
637        bottlenecks
638    }
639
640    fn assess_overall_quality(
641        &self,
642        metrics: &[QualityMetric],
643        issues: &[QualityIssue],
644    ) -> QualityAssessment {
645        // Calculate weighted quality score
646        let weighted_score: f64 = metrics
647            .iter()
648            .map(|m| {
649                if m.meets_target {
650                    m.weight
651                } else {
652                    m.weight * (m.value / m.target)
653                }
654            })
655            .sum();
656
657        let total_weight: f64 = metrics.iter().map(|m| m.weight).sum();
658        let overall_score = if total_weight > 0.0 {
659            weighted_score / total_weight
660        } else {
661            0.0
662        };
663
664        // Apply penalty for critical issues
665        let critical_penalty = issues
666            .iter()
667            .filter(|i| i.severity >= IssueSeverity::Critical)
668            .count() as f64
669            * 0.1;
670
671        let adjusted_score = (overall_score - critical_penalty).max(0.0);
672
673        let grade = match adjusted_score {
674            s if s >= 0.9 => QualityGrade::A,
675            s if s >= 0.8 => QualityGrade::B,
676            s if s >= 0.7 => QualityGrade::C,
677            s if s >= 0.6 => QualityGrade::D,
678            _ => QualityGrade::F,
679        };
680
681        let strengths = metrics
682            .iter()
683            .filter(|m| m.meets_target && m.value > m.target * 1.1)
684            .map(|m| format!("Excellent {}", m.name))
685            .collect();
686
687        let weaknesses = issues
688            .iter()
689            .filter(|i| i.severity >= IssueSeverity::Major)
690            .map(|i| i.title.clone())
691            .collect();
692
693        QualityAssessment {
694            overall_score: adjusted_score,
695            grade,
696            confidence: 0.85, // Based on analysis depth and data quality
697            strengths,
698            weaknesses,
699        }
700    }
701
702    fn generate_recommendations(
703        &self,
704        issues: &[QualityIssue],
705        performance: &PerformanceAnalysis,
706    ) -> Vec<Recommendation> {
707        let mut recommendations = Vec::new();
708
709        // Recommendations based on issues
710        for issue in issues {
711            if issue.severity >= IssueSeverity::Major {
712                recommendations.push(Recommendation {
713                    title: format!("Fix {}", issue.title),
714                    description: format!("Address {} to improve quality", issue.description),
715                    priority: match issue.severity {
716                        IssueSeverity::Critical | IssueSeverity::Blocker => {
717                            RecommendationPriority::Critical
718                        }
719                        IssueSeverity::Major => RecommendationPriority::High,
720                        _ => RecommendationPriority::Medium,
721                    },
722                    impact: issue.impact.clone(),
723                    effort: issue.fix_effort.clone(),
724                    related_issues: vec![issue.id.clone()],
725                });
726            }
727        }
728
729        // Performance-based recommendations
730        if performance.score < 0.8 {
731            recommendations.push(Recommendation {
732                title: "Improve Performance".to_string(),
733                description: "Overall performance score is below target".to_string(),
734                priority: RecommendationPriority::High,
735                impact: ImpactLevel::High,
736                effort: FixEffort::Medium,
737                related_issues: vec![],
738            });
739        }
740
741        recommendations
742    }
743
744    fn analyze_trends(&self, component: &str) -> TrendAnalysis {
745        let recent_results: Vec<_> = self
746            .history
747            .results
748            .iter()
749            .filter(|r| r.component == component)
750            .rev()
751            .take(10)
752            .collect();
753
754        if recent_results.len() < 3 {
755            return TrendAnalysis {
756                quality_trend: TrendDirection::Unknown,
757                performance_trend: TrendDirection::Unknown,
758                complexity_trend: TrendDirection::Unknown,
759                confidence: 0.0,
760                time_period: Duration::ZERO,
761            };
762        }
763
764        // Simple trend analysis based on score progression
765        let scores: Vec<f64> = recent_results.iter().map(|r| r.quality_score).collect();
766        let quality_trend = if scores.first() > scores.last() {
767            TrendDirection::Improving
768        } else if scores.first() < scores.last() {
769            TrendDirection::Declining
770        } else {
771            TrendDirection::Stable
772        };
773
774        TrendAnalysis {
775            quality_trend,
776            performance_trend: TrendDirection::Stable,
777            complexity_trend: TrendDirection::Stable,
778            confidence: 0.7,
779            time_period: Duration::from_secs(3600), // 1 hour window
780        }
781    }
782
783    fn store_analysis_result(
784        &mut self,
785        component: &str,
786        assessment: &QualityAssessment,
787        metrics: &[QualityMetric],
788    ) {
789        let mut metric_map = HashMap::new();
790        for metric in metrics {
791            metric_map.insert(metric.name.clone(), metric.value);
792        }
793
794        let result = AnalysisResult {
795            component: component.to_string(),
796            quality_score: assessment.overall_score,
797            timestamp: Instant::now(),
798            metrics: metric_map,
799        };
800
801        self.history.results.push(result);
802
803        // Trim history if too large
804        if self.history.results.len() > self.history.max_entries {
805            self.history
806                .results
807                .drain(0..self.history.results.len() - self.history.max_entries);
808        }
809    }
810}
811
812/// Context data for quality analysis
813#[derive(Debug)]
814pub struct AnalysisContext {
815    /// Performance measurement data
816    pub performance_data: PerformanceData,
817    /// Memory usage data
818    pub memory_data: MemoryData,
819    /// Reliability measurement data
820    pub reliability_data: ReliabilityData,
821}
822
823/// Performance measurement data
824#[derive(Debug)]
825pub struct PerformanceData {
826    /// Average operation latency
827    pub avg_latency: Duration,
828    /// CPU usage percentage
829    pub cpu_usage: f64,
830    /// Allocation efficiency ratio
831    pub allocation_efficiency: f64,
832    /// Throughput (operations per second)
833    pub throughput: f64,
834}
835
836/// Memory usage data
837#[derive(Debug)]
838pub struct MemoryData {
839    /// Current memory usage
840    pub current_usage: usize,
841    /// Memory growth rate (bytes per second)
842    pub growth_rate: f64,
843    /// Memory efficiency ratio
844    pub efficiency_ratio: f64,
845    /// Memory fragmentation ratio
846    pub fragmentation_ratio: f64,
847}
848
849/// Reliability measurement data
850#[derive(Debug)]
851pub struct ReliabilityData {
852    /// Error rate percentage
853    pub error_rate: f64,
854    /// Success rate percentage
855    pub success_rate: f64,
856    /// Mean time between failures
857    pub mtbf: Duration,
858}
859
860impl Default for AnalyzerConfig {
861    fn default() -> Self {
862        Self {
863            analysis_depth: AnalysisDepth::Standard,
864            track_trends: true,
865            max_analysis_time: Duration::from_secs(30),
866            thresholds: QualityThresholds::default(),
867        }
868    }
869}
870
871impl Default for QualityThresholds {
872    fn default() -> Self {
873        Self {
874            min_quality_score: 0.8,
875            max_complexity: 10,
876            min_coverage: 0.8,
877            max_technical_debt: 0.2,
878        }
879    }
880}
881
882impl Default for CodeAnalyzer {
883    fn default() -> Self {
884        Self::new()
885    }
886}
887
888#[cfg(test)]
889mod tests {
890    use super::*;
891
892    #[test]
893    fn test_code_analyzer_creation() {
894        let analyzer = CodeAnalyzer::new();
895        assert_eq!(analyzer.config.analysis_depth, AnalysisDepth::Standard);
896        assert!(analyzer.config.track_trends);
897    }
898
899    #[test]
900    fn test_quality_assessment() {
901        let analyzer = CodeAnalyzer::new();
902
903        let metrics = vec![QualityMetric {
904            name: "test_metric".to_string(),
905            category: MetricCategory::Performance,
906            value: 0.9,
907            target: 0.8,
908            meets_target: true,
909            weight: 1.0,
910            trend: TrendDirection::Stable,
911        }];
912
913        let issues = vec![];
914        let assessment = analyzer.assess_overall_quality(&metrics, &issues);
915
916        assert!(assessment.overall_score >= 0.8);
917        assert_eq!(assessment.grade, QualityGrade::A);
918    }
919
920    #[test]
921    fn test_quality_grades() {
922        assert!(QualityGrade::A > QualityGrade::B);
923        assert!(QualityGrade::B > QualityGrade::C);
924        assert!(QualityGrade::F < QualityGrade::D);
925    }
926}