Skip to main content

memscope_rs/analysis/quality/
analyzer.rs

1use std::collections::HashMap;
2use std::time::{Duration, Instant};
3
4/// Code quality analyzer for memory analysis operations
5pub struct CodeAnalyzer {
6    /// Quality metrics configuration
7    config: AnalyzerConfig,
8    /// Historical analysis data
9    history: AnalysisHistory,
10    /// Quality baselines
11    baselines: HashMap<String, QualityBaseline>,
12}
13
14/// Configuration for code analysis
15#[derive(Debug, Clone)]
16pub struct AnalyzerConfig {
17    /// Depth of analysis to perform
18    pub analysis_depth: AnalysisDepth,
19    /// Whether to track quality trends
20    pub track_trends: bool,
21    /// Maximum time to spend on analysis
22    pub max_analysis_time: Duration,
23    /// Quality thresholds
24    pub thresholds: QualityThresholds,
25}
26
27/// Depth levels for code analysis
28#[derive(Debug, Clone, PartialEq)]
29pub enum AnalysisDepth {
30    /// Basic quality checks only
31    Surface,
32    /// Standard analysis depth
33    Standard,
34    /// Comprehensive deep analysis
35    Deep,
36    /// Exhaustive analysis (slow)
37    Exhaustive,
38}
39
40/// Quality thresholds for different metrics
41#[derive(Debug, Clone)]
42pub struct QualityThresholds {
43    /// Minimum acceptable code quality score
44    pub min_quality_score: f64,
45    /// Maximum acceptable complexity
46    pub max_complexity: u32,
47    /// Minimum test coverage percentage
48    pub min_coverage: f64,
49    /// Maximum acceptable technical debt
50    pub max_technical_debt: f64,
51}
52
53/// Historical analysis data
54#[derive(Debug)]
55struct AnalysisHistory {
56    /// Previous analysis results
57    results: Vec<AnalysisResult>,
58    /// Maximum history entries to keep
59    max_entries: usize,
60}
61
62/// Quality baseline for comparison
63#[derive(Debug, Clone)]
64pub struct QualityBaseline {
65    /// Component name
66    pub component: String,
67    /// Baseline quality score
68    pub quality_score: f64,
69    /// Baseline complexity
70    pub complexity: u32,
71    /// Baseline performance metrics
72    pub performance: BaselinePerformance,
73    /// When baseline was established
74    pub timestamp: Instant,
75}
76
77/// Baseline performance metrics
78#[derive(Debug, Clone)]
79pub struct BaselinePerformance {
80    /// Average execution time
81    pub avg_execution_time: Duration,
82    /// Memory usage per operation
83    pub memory_per_operation: usize,
84    /// Error rate percentage
85    pub error_rate: f64,
86}
87
88/// Comprehensive analysis report
89#[derive(Debug, Clone)]
90pub struct AnalysisReport {
91    /// Component being analyzed
92    pub component: String,
93    /// Overall quality assessment
94    pub quality_assessment: QualityAssessment,
95    /// Individual quality metrics
96    pub metrics: Vec<QualityMetric>,
97    /// Detected issues
98    pub issues: Vec<QualityIssue>,
99    /// Performance analysis
100    pub performance_analysis: PerformanceAnalysis,
101    /// Recommendations for improvement
102    pub recommendations: Vec<Recommendation>,
103    /// Trend analysis if available
104    pub trend_analysis: Option<TrendAnalysis>,
105    /// Analysis execution time
106    pub analysis_duration: Duration,
107}
108
109/// Overall quality assessment
110#[derive(Debug, Clone)]
111pub struct QualityAssessment {
112    /// Overall quality score (0.0 to 1.0)
113    pub overall_score: f64,
114    /// Quality grade
115    pub grade: QualityGrade,
116    /// Assessment confidence level
117    pub confidence: f64,
118    /// Key strengths
119    pub strengths: Vec<String>,
120    /// Key weaknesses
121    pub weaknesses: Vec<String>,
122}
123
124/// Quality grade classifications
125#[derive(Debug, Clone, PartialEq)]
126pub enum QualityGrade {
127    /// Excellent quality (90-100%)
128    A,
129    /// Good quality (80-89%)
130    B,
131    /// Acceptable quality (70-79%)
132    C,
133    /// Poor quality (60-69%)
134    D,
135    /// Failing quality (<60%)
136    F,
137}
138
139impl PartialOrd for QualityGrade {
140    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
141        Some(self.cmp(other))
142    }
143}
144
145impl Ord for QualityGrade {
146    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
147        let self_score = match self {
148            QualityGrade::A => 5,
149            QualityGrade::B => 4,
150            QualityGrade::C => 3,
151            QualityGrade::D => 2,
152            QualityGrade::F => 1,
153        };
154        let other_score = match other {
155            QualityGrade::A => 5,
156            QualityGrade::B => 4,
157            QualityGrade::C => 3,
158            QualityGrade::D => 2,
159            QualityGrade::F => 1,
160        };
161        self_score.cmp(&other_score)
162    }
163}
164
165impl Eq for QualityGrade {}
166
167/// Individual quality metric
168#[derive(Debug, Clone)]
169pub struct QualityMetric {
170    /// Metric name
171    pub name: String,
172    /// Metric category
173    pub category: MetricCategory,
174    /// Current value
175    pub value: f64,
176    /// Target value
177    pub target: f64,
178    /// Whether metric meets target
179    pub meets_target: bool,
180    /// Metric importance weight
181    pub weight: f64,
182    /// Trend direction
183    pub trend: TrendDirection,
184}
185
186/// Categories of quality metrics
187#[derive(Debug, Clone, PartialEq)]
188pub enum MetricCategory {
189    /// Performance metrics
190    Performance,
191    /// Reliability metrics
192    Reliability,
193    /// Maintainability metrics
194    Maintainability,
195    /// Security metrics
196    Security,
197    /// Efficiency metrics
198    Efficiency,
199}
200
201/// Quality issue detected
202#[derive(Debug, Clone)]
203pub struct QualityIssue {
204    /// Issue identifier
205    pub id: String,
206    /// Issue title
207    pub title: String,
208    /// Detailed description
209    pub description: String,
210    /// Issue severity
211    pub severity: IssueSeverity,
212    /// Issue category
213    pub category: IssueCategory,
214    /// Location in code
215    pub location: Option<String>,
216    /// Estimated fix effort
217    pub fix_effort: FixEffort,
218    /// Impact if not fixed
219    pub impact: ImpactLevel,
220}
221
222/// Issue severity levels
223#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
224pub enum IssueSeverity {
225    /// Minor issue
226    Minor,
227    /// Moderate issue
228    Moderate,
229    /// Major issue
230    Major,
231    /// Critical issue
232    Critical,
233    /// Blocker issue
234    Blocker,
235}
236
237/// Categories of quality issues
238#[derive(Debug, Clone, PartialEq)]
239pub enum IssueCategory {
240    /// Memory management issues
241    MemoryManagement,
242    /// Performance problems
243    Performance,
244    /// Thread safety issues
245    ThreadSafety,
246    /// Error handling problems
247    ErrorHandling,
248    /// Code style violations
249    CodeStyle,
250    /// Design issues
251    Design,
252}
253
254/// Estimated effort to fix issue
255#[derive(Debug, Clone, PartialEq)]
256pub enum FixEffort {
257    /// Quick fix (< 1 hour)
258    Trivial,
259    /// Easy fix (1-4 hours)
260    Easy,
261    /// Medium fix (4-16 hours)
262    Medium,
263    /// Hard fix (16-40 hours)
264    Hard,
265    /// Very hard fix (> 40 hours)
266    VeryHard,
267}
268
269/// Impact level if issue not fixed
270#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
271pub enum ImpactLevel {
272    /// Minimal impact
273    Minimal,
274    /// Low impact
275    Low,
276    /// Medium impact
277    Medium,
278    /// High impact
279    High,
280    /// Critical impact
281    Critical,
282}
283
284/// Performance analysis results
285#[derive(Debug, Clone)]
286pub struct PerformanceAnalysis {
287    /// Performance score (0.0 to 1.0)
288    pub score: f64,
289    /// Performance bottlenecks
290    pub bottlenecks: Vec<PerformanceBottleneck>,
291    /// Memory efficiency
292    pub memory_efficiency: f64,
293    /// CPU efficiency
294    pub cpu_efficiency: f64,
295    /// Scalability assessment
296    pub scalability: ScalabilityAssessment,
297}
298
299/// Performance bottleneck information
300#[derive(Debug, Clone)]
301pub struct PerformanceBottleneck {
302    /// Bottleneck location
303    pub location: String,
304    /// Type of bottleneck
305    pub bottleneck_type: BottleneckType,
306    /// Severity of bottleneck
307    pub severity: f64,
308    /// Description
309    pub description: String,
310    /// Suggested optimization
311    pub optimization: String,
312}
313
314/// Types of performance bottlenecks
315#[derive(Debug, Clone, PartialEq)]
316pub enum BottleneckType {
317    /// CPU intensive operation
318    CpuBound,
319    /// Memory allocation bottleneck
320    MemoryBound,
321    /// I/O bottleneck
322    IoBound,
323    /// Lock contention
324    LockContention,
325    /// Cache misses
326    CacheMiss,
327    /// Algorithm inefficiency
328    AlgorithmInefficiency,
329}
330
331/// Scalability assessment
332#[derive(Debug, Clone)]
333pub struct ScalabilityAssessment {
334    /// Scalability score (0.0 to 1.0)
335    pub score: f64,
336    /// Expected scaling behavior
337    pub scaling_behavior: ScalingBehavior,
338    /// Resource scaling factors
339    pub resource_scaling: ResourceScaling,
340    /// Scalability limitations
341    pub limitations: Vec<String>,
342}
343
344/// Expected scaling behavior
345#[derive(Debug, Clone, PartialEq)]
346pub enum ScalingBehavior {
347    /// Constant time complexity
348    Constant,
349    /// Linear scaling
350    Linear,
351    /// Logarithmic scaling
352    Logarithmic,
353    /// Quadratic scaling
354    Quadratic,
355    /// Exponential scaling (bad)
356    Exponential,
357}
358
359/// Resource scaling characteristics
360#[derive(Debug, Clone)]
361pub struct ResourceScaling {
362    /// Memory scaling factor
363    pub memory_factor: f64,
364    /// CPU scaling factor
365    pub cpu_factor: f64,
366    /// Network scaling factor
367    pub network_factor: f64,
368}
369
370/// Improvement recommendation
371#[derive(Debug, Clone)]
372pub struct Recommendation {
373    /// Recommendation title
374    pub title: String,
375    /// Detailed description
376    pub description: String,
377    /// Priority level
378    pub priority: RecommendationPriority,
379    /// Expected impact
380    pub impact: ImpactLevel,
381    /// Implementation effort
382    pub effort: FixEffort,
383    /// Related quality issues
384    pub related_issues: Vec<String>,
385}
386
387/// Priority levels for recommendations
388#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
389pub enum RecommendationPriority {
390    /// Low priority
391    Low,
392    /// Medium priority
393    Medium,
394    /// High priority
395    High,
396    /// Critical priority
397    Critical,
398}
399
400/// Trend analysis over time
401#[derive(Debug, Clone)]
402pub struct TrendAnalysis {
403    /// Quality trend direction
404    pub quality_trend: TrendDirection,
405    /// Performance trend direction
406    pub performance_trend: TrendDirection,
407    /// Complexity trend direction
408    pub complexity_trend: TrendDirection,
409    /// Trend confidence level
410    pub confidence: f64,
411    /// Analysis time period
412    pub time_period: Duration,
413}
414
415/// Trend direction indicators
416#[derive(Debug, Clone, PartialEq)]
417pub enum TrendDirection {
418    /// Improving trend
419    Improving,
420    /// Stable trend
421    Stable,
422    /// Declining trend
423    Declining,
424    /// Unknown trend
425    Unknown,
426}
427
428/// Analysis result for historical tracking
429#[derive(Debug, Clone)]
430struct AnalysisResult {
431    component: String,
432    quality_score: f64,
433}
434
435impl CodeAnalyzer {
436    /// Create new code analyzer
437    pub fn new() -> Self {
438        Self {
439            config: AnalyzerConfig::default(),
440            history: AnalysisHistory {
441                results: Vec::new(),
442                max_entries: 100,
443            },
444            baselines: HashMap::new(),
445        }
446    }
447
448    /// Create analyzer with custom configuration
449    pub fn with_config(config: AnalyzerConfig) -> Self {
450        Self {
451            config,
452            history: AnalysisHistory {
453                results: Vec::new(),
454                max_entries: 100,
455            },
456            baselines: HashMap::new(),
457        }
458    }
459
460    /// Set quality baseline for component
461    pub fn set_baseline(&mut self, component: &str, baseline: QualityBaseline) {
462        self.baselines.insert(component.to_string(), baseline);
463    }
464
465    /// Analyze code quality for component
466    pub fn analyze_quality(
467        &mut self,
468        component: &str,
469        context: &AnalysisContext,
470    ) -> AnalysisReport {
471        let start_time = Instant::now();
472
473        // Perform quality analysis
474        let metrics = self.calculate_quality_metrics(context);
475        let issues = self.detect_quality_issues(context);
476        let performance_analysis = self.analyze_performance(context);
477        let quality_assessment = self.assess_overall_quality(&metrics, &issues);
478        let recommendations = self.generate_recommendations(&issues, &performance_analysis);
479        let trend_analysis = if self.config.track_trends {
480            Some(self.analyze_trends(component))
481        } else {
482            None
483        };
484
485        let analysis_duration = start_time.elapsed();
486
487        // Store result in history
488        self.store_analysis_result(component, &quality_assessment, &metrics);
489
490        AnalysisReport {
491            component: component.to_string(),
492            quality_assessment,
493            metrics,
494            issues,
495            performance_analysis,
496            recommendations,
497            trend_analysis,
498            analysis_duration,
499        }
500    }
501
502    fn calculate_quality_metrics(&self, context: &AnalysisContext) -> Vec<QualityMetric> {
503        vec![
504            // Performance metrics
505            QualityMetric {
506                name: "allocation_efficiency".to_string(),
507                category: MetricCategory::Performance,
508                value: context.performance_data.allocation_efficiency,
509                target: 0.95,
510                meets_target: context.performance_data.allocation_efficiency >= 0.95,
511                weight: 0.3,
512                trend: TrendDirection::Unknown,
513            },
514            // Reliability metrics
515            QualityMetric {
516                name: "error_rate".to_string(),
517                category: MetricCategory::Reliability,
518                value: context.reliability_data.error_rate,
519                target: 0.01, // 1% max error rate
520                meets_target: context.reliability_data.error_rate <= 0.01,
521                weight: 0.25,
522                trend: TrendDirection::Unknown,
523            },
524            // Memory efficiency
525            QualityMetric {
526                name: "memory_efficiency".to_string(),
527                category: MetricCategory::Efficiency,
528                value: context.memory_data.efficiency_ratio,
529                target: 0.9,
530                meets_target: context.memory_data.efficiency_ratio >= 0.9,
531                weight: 0.2,
532                trend: TrendDirection::Unknown,
533            },
534        ]
535    }
536
537    fn detect_quality_issues(&self, context: &AnalysisContext) -> Vec<QualityIssue> {
538        let mut issues = Vec::new();
539
540        // Check for memory leaks
541        if context.memory_data.growth_rate > 1024.0 * 1024.0 {
542            // 1MB/sec
543            issues.push(QualityIssue {
544                id: "memory_leak_detected".to_string(),
545                title: "Potential Memory Leak".to_string(),
546                description: format!(
547                    "High memory growth rate: {:.2}MB/sec",
548                    context.memory_data.growth_rate / (1024.0 * 1024.0)
549                ),
550                severity: IssueSeverity::Major,
551                category: IssueCategory::MemoryManagement,
552                location: Some("memory_tracking".to_string()),
553                fix_effort: FixEffort::Medium,
554                impact: ImpactLevel::High,
555            });
556        }
557
558        // Check for performance issues
559        if context.performance_data.avg_latency > Duration::from_micros(100) {
560            issues.push(QualityIssue {
561                id: "high_latency".to_string(),
562                title: "High Operation Latency".to_string(),
563                description: format!(
564                    "Average latency {:.2}µs exceeds threshold",
565                    context.performance_data.avg_latency.as_micros()
566                ),
567                severity: IssueSeverity::Moderate,
568                category: IssueCategory::Performance,
569                location: Some("allocation_tracking".to_string()),
570                fix_effort: FixEffort::Easy,
571                impact: ImpactLevel::Medium,
572            });
573        }
574
575        issues
576    }
577
578    fn analyze_performance(&self, context: &AnalysisContext) -> PerformanceAnalysis {
579        let bottlenecks = self.identify_bottlenecks(context);
580        let memory_efficiency = context.memory_data.efficiency_ratio;
581        let cpu_efficiency = 1.0 - (context.performance_data.cpu_usage / 100.0);
582
583        let scalability = ScalabilityAssessment {
584            score: 0.8, // Placeholder calculation
585            scaling_behavior: ScalingBehavior::Linear,
586            resource_scaling: ResourceScaling {
587                memory_factor: 1.2,
588                cpu_factor: 1.1,
589                network_factor: 1.0,
590            },
591            limitations: vec!["Memory bandwidth may become bottleneck at scale".to_string()],
592        };
593
594        let score = (memory_efficiency + cpu_efficiency + scalability.score) / 3.0;
595
596        PerformanceAnalysis {
597            score,
598            bottlenecks,
599            memory_efficiency,
600            cpu_efficiency,
601            scalability,
602        }
603    }
604
605    fn identify_bottlenecks(&self, context: &AnalysisContext) -> Vec<PerformanceBottleneck> {
606        let mut bottlenecks = Vec::new();
607
608        if context.performance_data.cpu_usage > 80.0 {
609            bottlenecks.push(PerformanceBottleneck {
610                location: "allocation_tracking".to_string(),
611                bottleneck_type: BottleneckType::CpuBound,
612                severity: context.performance_data.cpu_usage / 100.0,
613                description: "High CPU usage in allocation tracking".to_string(),
614                optimization: "Consider optimizing hot paths or using faster data structures"
615                    .to_string(),
616            });
617        }
618
619        if context.memory_data.fragmentation_ratio > 0.3 {
620            bottlenecks.push(PerformanceBottleneck {
621                location: "memory_management".to_string(),
622                bottleneck_type: BottleneckType::MemoryBound,
623                severity: context.memory_data.fragmentation_ratio,
624                description: "High memory fragmentation".to_string(),
625                optimization: "Implement memory compaction or use memory pools".to_string(),
626            });
627        }
628
629        bottlenecks
630    }
631
632    fn assess_overall_quality(
633        &self,
634        metrics: &[QualityMetric],
635        issues: &[QualityIssue],
636    ) -> QualityAssessment {
637        // Calculate weighted quality score
638        let weighted_score: f64 = metrics
639            .iter()
640            .map(|m| {
641                if m.meets_target {
642                    m.weight
643                } else {
644                    m.weight * (m.value / m.target)
645                }
646            })
647            .sum();
648
649        let total_weight: f64 = metrics.iter().map(|m| m.weight).sum();
650        let overall_score = if total_weight > 0.0 {
651            weighted_score / total_weight
652        } else {
653            0.0
654        };
655
656        // Apply penalty for critical issues
657        let critical_penalty = issues
658            .iter()
659            .filter(|i| i.severity >= IssueSeverity::Critical)
660            .count() as f64
661            * 0.1;
662
663        let adjusted_score = (overall_score - critical_penalty).max(0.0);
664
665        let grade = match adjusted_score {
666            s if s >= 0.9 => QualityGrade::A,
667            s if s >= 0.8 => QualityGrade::B,
668            s if s >= 0.7 => QualityGrade::C,
669            s if s >= 0.6 => QualityGrade::D,
670            _ => QualityGrade::F,
671        };
672
673        let strengths = metrics
674            .iter()
675            .filter(|m| m.meets_target && m.value > m.target * 1.1)
676            .map(|m| format!("Excellent {}", m.name))
677            .collect();
678
679        let weaknesses = issues
680            .iter()
681            .filter(|i| i.severity >= IssueSeverity::Major)
682            .map(|i| i.title.clone())
683            .collect();
684
685        QualityAssessment {
686            overall_score: adjusted_score,
687            grade,
688            confidence: 0.85, // Based on analysis depth and data quality
689            strengths,
690            weaknesses,
691        }
692    }
693
694    fn generate_recommendations(
695        &self,
696        issues: &[QualityIssue],
697        performance: &PerformanceAnalysis,
698    ) -> Vec<Recommendation> {
699        let mut recommendations = Vec::new();
700
701        // Recommendations based on issues
702        for issue in issues {
703            if issue.severity >= IssueSeverity::Major {
704                recommendations.push(Recommendation {
705                    title: format!("Fix {}", issue.title),
706                    description: format!("Address {} to improve quality", issue.description),
707                    priority: match issue.severity {
708                        IssueSeverity::Critical | IssueSeverity::Blocker => {
709                            RecommendationPriority::Critical
710                        }
711                        IssueSeverity::Major => RecommendationPriority::High,
712                        _ => RecommendationPriority::Medium,
713                    },
714                    impact: issue.impact.clone(),
715                    effort: issue.fix_effort.clone(),
716                    related_issues: vec![issue.id.clone()],
717                });
718            }
719        }
720
721        // Performance-based recommendations
722        if performance.score < 0.8 {
723            recommendations.push(Recommendation {
724                title: "Improve Performance".to_string(),
725                description: "Overall performance score is below target".to_string(),
726                priority: RecommendationPriority::High,
727                impact: ImpactLevel::High,
728                effort: FixEffort::Medium,
729                related_issues: vec![],
730            });
731        }
732
733        recommendations
734    }
735
736    fn analyze_trends(&self, component: &str) -> TrendAnalysis {
737        let recent_results: Vec<_> = self
738            .history
739            .results
740            .iter()
741            .filter(|r| r.component == component)
742            .rev()
743            .take(10)
744            .collect();
745
746        if recent_results.len() < 3 {
747            return TrendAnalysis {
748                quality_trend: TrendDirection::Unknown,
749                performance_trend: TrendDirection::Unknown,
750                complexity_trend: TrendDirection::Unknown,
751                confidence: 0.0,
752                time_period: Duration::ZERO,
753            };
754        }
755
756        // Simple trend analysis based on score progression
757        let scores: Vec<f64> = recent_results.iter().map(|r| r.quality_score).collect();
758        let quality_trend = if scores.first() > scores.last() {
759            TrendDirection::Improving
760        } else if scores.first() < scores.last() {
761            TrendDirection::Declining
762        } else {
763            TrendDirection::Stable
764        };
765
766        TrendAnalysis {
767            quality_trend,
768            performance_trend: TrendDirection::Stable,
769            complexity_trend: TrendDirection::Stable,
770            confidence: 0.7,
771            time_period: Duration::from_secs(3600), // 1 hour window
772        }
773    }
774
775    fn store_analysis_result(
776        &mut self,
777        component: &str,
778        assessment: &QualityAssessment,
779        _metrics: &[QualityMetric],
780    ) {
781        let result = AnalysisResult {
782            component: component.to_string(),
783            quality_score: assessment.overall_score,
784        };
785
786        self.history.results.push(result);
787
788        // Trim history if too large
789        if self.history.results.len() > self.history.max_entries {
790            self.history
791                .results
792                .drain(0..self.history.results.len() - self.history.max_entries);
793        }
794    }
795}
796
797/// Context data for quality analysis
798#[derive(Debug)]
799pub struct AnalysisContext {
800    /// Performance measurement data
801    pub performance_data: PerformanceData,
802    /// Memory usage data
803    pub memory_data: MemoryData,
804    /// Reliability measurement data
805    pub reliability_data: ReliabilityData,
806}
807
808/// Performance measurement data
809#[derive(Debug)]
810pub struct PerformanceData {
811    /// Average operation latency
812    pub avg_latency: Duration,
813    /// CPU usage percentage
814    pub cpu_usage: f64,
815    /// Allocation efficiency ratio
816    pub allocation_efficiency: f64,
817    /// Throughput (operations per second)
818    pub throughput: f64,
819}
820
821/// Memory usage data
822#[derive(Debug)]
823pub struct MemoryData {
824    /// Current memory usage
825    pub current_usage: usize,
826    /// Memory growth rate (bytes per second)
827    pub growth_rate: f64,
828    /// Memory efficiency ratio
829    pub efficiency_ratio: f64,
830    /// Memory fragmentation ratio
831    pub fragmentation_ratio: f64,
832}
833
834/// Reliability measurement data
835#[derive(Debug)]
836pub struct ReliabilityData {
837    /// Error rate percentage
838    pub error_rate: f64,
839    /// Success rate percentage
840    pub success_rate: f64,
841    /// Mean time between failures
842    pub mtbf: Duration,
843}
844
845impl Default for AnalyzerConfig {
846    fn default() -> Self {
847        Self {
848            analysis_depth: AnalysisDepth::Standard,
849            track_trends: true,
850            max_analysis_time: Duration::from_secs(30),
851            thresholds: QualityThresholds::default(),
852        }
853    }
854}
855
856impl Default for QualityThresholds {
857    fn default() -> Self {
858        Self {
859            min_quality_score: 0.8,
860            max_complexity: 10,
861            min_coverage: 0.8,
862            max_technical_debt: 0.2,
863        }
864    }
865}
866
867impl Default for CodeAnalyzer {
868    fn default() -> Self {
869        Self::new()
870    }
871}
872
873#[cfg(test)]
874mod tests {
875    use super::*;
876
877    #[test]
878    fn test_code_analyzer_creation() {
879        let analyzer = CodeAnalyzer::new();
880        assert_eq!(analyzer.config.analysis_depth, AnalysisDepth::Standard);
881        assert!(analyzer.config.track_trends);
882    }
883
884    #[test]
885    fn test_quality_assessment() {
886        let analyzer = CodeAnalyzer::new();
887
888        let metrics = vec![QualityMetric {
889            name: "test_metric".to_string(),
890            category: MetricCategory::Performance,
891            value: 0.9,
892            target: 0.8,
893            meets_target: true,
894            weight: 1.0,
895            trend: TrendDirection::Stable,
896        }];
897
898        let issues = vec![];
899        let assessment = analyzer.assess_overall_quality(&metrics, &issues);
900
901        assert!(assessment.overall_score >= 0.8);
902        assert_eq!(assessment.grade, QualityGrade::A);
903    }
904
905    #[test]
906    fn test_quality_grades() {
907        assert!(QualityGrade::A > QualityGrade::B);
908        assert!(QualityGrade::B > QualityGrade::C);
909        assert!(QualityGrade::F < QualityGrade::D);
910    }
911}