1use std::collections::HashMap;
2use std::time::{Duration, Instant};
3
4pub struct CodeAnalyzer {
6 config: AnalyzerConfig,
8 history: AnalysisHistory,
10 baselines: HashMap<String, QualityBaseline>,
12}
13
14#[derive(Debug, Clone)]
16pub struct AnalyzerConfig {
17 pub analysis_depth: AnalysisDepth,
19 pub track_trends: bool,
21 pub max_analysis_time: Duration,
23 pub thresholds: QualityThresholds,
25}
26
27#[derive(Debug, Clone, PartialEq)]
29pub enum AnalysisDepth {
30 Surface,
32 Standard,
34 Deep,
36 Exhaustive,
38}
39
40#[derive(Debug, Clone)]
42pub struct QualityThresholds {
43 pub min_quality_score: f64,
45 pub max_complexity: u32,
47 pub min_coverage: f64,
49 pub max_technical_debt: f64,
51}
52
53#[derive(Debug)]
55struct AnalysisHistory {
56 results: Vec<AnalysisResult>,
58 max_entries: usize,
60}
61
62#[derive(Debug, Clone)]
64pub struct QualityBaseline {
65 pub component: String,
67 pub quality_score: f64,
69 pub complexity: u32,
71 pub performance: BaselinePerformance,
73 pub timestamp: Instant,
75}
76
77#[derive(Debug, Clone)]
79pub struct BaselinePerformance {
80 pub avg_execution_time: Duration,
82 pub memory_per_operation: usize,
84 pub error_rate: f64,
86}
87
88#[derive(Debug, Clone)]
90pub struct AnalysisReport {
91 pub component: String,
93 pub quality_assessment: QualityAssessment,
95 pub metrics: Vec<QualityMetric>,
97 pub issues: Vec<QualityIssue>,
99 pub performance_analysis: PerformanceAnalysis,
101 pub recommendations: Vec<Recommendation>,
103 pub trend_analysis: Option<TrendAnalysis>,
105 pub analysis_duration: Duration,
107}
108
109#[derive(Debug, Clone)]
111pub struct QualityAssessment {
112 pub overall_score: f64,
114 pub grade: QualityGrade,
116 pub confidence: f64,
118 pub strengths: Vec<String>,
120 pub weaknesses: Vec<String>,
122}
123
124#[derive(Debug, Clone, PartialEq)]
126pub enum QualityGrade {
127 A,
129 B,
131 C,
133 D,
135 F,
137}
138
139impl PartialOrd for QualityGrade {
140 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
141 Some(self.cmp(other))
142 }
143}
144
145impl Ord for QualityGrade {
146 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
147 let self_score = match self {
148 QualityGrade::A => 5,
149 QualityGrade::B => 4,
150 QualityGrade::C => 3,
151 QualityGrade::D => 2,
152 QualityGrade::F => 1,
153 };
154 let other_score = match other {
155 QualityGrade::A => 5,
156 QualityGrade::B => 4,
157 QualityGrade::C => 3,
158 QualityGrade::D => 2,
159 QualityGrade::F => 1,
160 };
161 self_score.cmp(&other_score)
162 }
163}
164
165impl Eq for QualityGrade {}
166
167#[derive(Debug, Clone)]
169pub struct QualityMetric {
170 pub name: String,
172 pub category: MetricCategory,
174 pub value: f64,
176 pub target: f64,
178 pub meets_target: bool,
180 pub weight: f64,
182 pub trend: TrendDirection,
184}
185
186#[derive(Debug, Clone, PartialEq)]
188pub enum MetricCategory {
189 Performance,
191 Reliability,
193 Maintainability,
195 Security,
197 Efficiency,
199}
200
201#[derive(Debug, Clone)]
203pub struct QualityIssue {
204 pub id: String,
206 pub title: String,
208 pub description: String,
210 pub severity: IssueSeverity,
212 pub category: IssueCategory,
214 pub location: Option<String>,
216 pub fix_effort: FixEffort,
218 pub impact: ImpactLevel,
220}
221
222#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
224pub enum IssueSeverity {
225 Minor,
227 Moderate,
229 Major,
231 Critical,
233 Blocker,
235}
236
237#[derive(Debug, Clone, PartialEq)]
239pub enum IssueCategory {
240 MemoryManagement,
242 Performance,
244 ThreadSafety,
246 ErrorHandling,
248 CodeStyle,
250 Design,
252}
253
254#[derive(Debug, Clone, PartialEq)]
256pub enum FixEffort {
257 Trivial,
259 Easy,
261 Medium,
263 Hard,
265 VeryHard,
267}
268
269#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
271pub enum ImpactLevel {
272 Minimal,
274 Low,
276 Medium,
278 High,
280 Critical,
282}
283
284#[derive(Debug, Clone)]
286pub struct PerformanceAnalysis {
287 pub score: f64,
289 pub bottlenecks: Vec<PerformanceBottleneck>,
291 pub memory_efficiency: f64,
293 pub cpu_efficiency: f64,
295 pub scalability: ScalabilityAssessment,
297}
298
299#[derive(Debug, Clone)]
301pub struct PerformanceBottleneck {
302 pub location: String,
304 pub bottleneck_type: BottleneckType,
306 pub severity: f64,
308 pub description: String,
310 pub optimization: String,
312}
313
314#[derive(Debug, Clone, PartialEq)]
316pub enum BottleneckType {
317 CpuBound,
319 MemoryBound,
321 IoBound,
323 LockContention,
325 CacheMiss,
327 AlgorithmInefficiency,
329}
330
331#[derive(Debug, Clone)]
333pub struct ScalabilityAssessment {
334 pub score: f64,
336 pub scaling_behavior: ScalingBehavior,
338 pub resource_scaling: ResourceScaling,
340 pub limitations: Vec<String>,
342}
343
344#[derive(Debug, Clone, PartialEq)]
346pub enum ScalingBehavior {
347 Constant,
349 Linear,
351 Logarithmic,
353 Quadratic,
355 Exponential,
357}
358
359#[derive(Debug, Clone)]
361pub struct ResourceScaling {
362 pub memory_factor: f64,
364 pub cpu_factor: f64,
366 pub network_factor: f64,
368}
369
370#[derive(Debug, Clone)]
372pub struct Recommendation {
373 pub title: String,
375 pub description: String,
377 pub priority: RecommendationPriority,
379 pub impact: ImpactLevel,
381 pub effort: FixEffort,
383 pub related_issues: Vec<String>,
385}
386
387#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
389pub enum RecommendationPriority {
390 Low,
392 Medium,
394 High,
396 Critical,
398}
399
400#[derive(Debug, Clone)]
402pub struct TrendAnalysis {
403 pub quality_trend: TrendDirection,
405 pub performance_trend: TrendDirection,
407 pub complexity_trend: TrendDirection,
409 pub confidence: f64,
411 pub time_period: Duration,
413}
414
415#[derive(Debug, Clone, PartialEq)]
417pub enum TrendDirection {
418 Improving,
420 Stable,
422 Declining,
424 Unknown,
426}
427
428#[derive(Debug, Clone)]
430struct AnalysisResult {
431 component: String,
433 quality_score: f64,
435 #[allow(dead_code)]
437 timestamp: Instant,
438 #[allow(dead_code)]
440 metrics: HashMap<String, f64>,
441}
442
443impl CodeAnalyzer {
444 pub fn new() -> Self {
446 Self {
447 config: AnalyzerConfig::default(),
448 history: AnalysisHistory {
449 results: Vec::new(),
450 max_entries: 100,
451 },
452 baselines: HashMap::new(),
453 }
454 }
455
456 pub fn with_config(config: AnalyzerConfig) -> Self {
458 Self {
459 config,
460 history: AnalysisHistory {
461 results: Vec::new(),
462 max_entries: 100,
463 },
464 baselines: HashMap::new(),
465 }
466 }
467
468 pub fn set_baseline(&mut self, component: &str, baseline: QualityBaseline) {
470 self.baselines.insert(component.to_string(), baseline);
471 }
472
473 pub fn analyze_quality(
475 &mut self,
476 component: &str,
477 context: &AnalysisContext,
478 ) -> AnalysisReport {
479 let start_time = Instant::now();
480
481 let metrics = self.calculate_quality_metrics(context);
483 let issues = self.detect_quality_issues(context);
484 let performance_analysis = self.analyze_performance(context);
485 let quality_assessment = self.assess_overall_quality(&metrics, &issues);
486 let recommendations = self.generate_recommendations(&issues, &performance_analysis);
487 let trend_analysis = if self.config.track_trends {
488 Some(self.analyze_trends(component))
489 } else {
490 None
491 };
492
493 let analysis_duration = start_time.elapsed();
494
495 self.store_analysis_result(component, &quality_assessment, &metrics);
497
498 AnalysisReport {
499 component: component.to_string(),
500 quality_assessment,
501 metrics,
502 issues,
503 performance_analysis,
504 recommendations,
505 trend_analysis,
506 analysis_duration,
507 }
508 }
509
510 fn calculate_quality_metrics(&self, context: &AnalysisContext) -> Vec<QualityMetric> {
511 vec![
512 QualityMetric {
514 name: "allocation_efficiency".to_string(),
515 category: MetricCategory::Performance,
516 value: context.performance_data.allocation_efficiency,
517 target: 0.95,
518 meets_target: context.performance_data.allocation_efficiency >= 0.95,
519 weight: 0.3,
520 trend: TrendDirection::Unknown,
521 },
522 QualityMetric {
524 name: "error_rate".to_string(),
525 category: MetricCategory::Reliability,
526 value: context.reliability_data.error_rate,
527 target: 0.01, meets_target: context.reliability_data.error_rate <= 0.01,
529 weight: 0.25,
530 trend: TrendDirection::Unknown,
531 },
532 QualityMetric {
534 name: "memory_efficiency".to_string(),
535 category: MetricCategory::Efficiency,
536 value: context.memory_data.efficiency_ratio,
537 target: 0.9,
538 meets_target: context.memory_data.efficiency_ratio >= 0.9,
539 weight: 0.2,
540 trend: TrendDirection::Unknown,
541 },
542 ]
543 }
544
545 fn detect_quality_issues(&self, context: &AnalysisContext) -> Vec<QualityIssue> {
546 let mut issues = Vec::new();
547
548 if context.memory_data.growth_rate > 1024.0 * 1024.0 {
550 issues.push(QualityIssue {
552 id: "memory_leak_detected".to_string(),
553 title: "Potential Memory Leak".to_string(),
554 description: format!(
555 "High memory growth rate: {:.2}MB/sec",
556 context.memory_data.growth_rate / (1024.0 * 1024.0)
557 ),
558 severity: IssueSeverity::Major,
559 category: IssueCategory::MemoryManagement,
560 location: Some("memory_tracking".to_string()),
561 fix_effort: FixEffort::Medium,
562 impact: ImpactLevel::High,
563 });
564 }
565
566 if context.performance_data.avg_latency > Duration::from_micros(100) {
568 issues.push(QualityIssue {
569 id: "high_latency".to_string(),
570 title: "High Operation Latency".to_string(),
571 description: format!(
572 "Average latency {:.2}µs exceeds threshold",
573 context.performance_data.avg_latency.as_micros()
574 ),
575 severity: IssueSeverity::Moderate,
576 category: IssueCategory::Performance,
577 location: Some("allocation_tracking".to_string()),
578 fix_effort: FixEffort::Easy,
579 impact: ImpactLevel::Medium,
580 });
581 }
582
583 issues
584 }
585
586 fn analyze_performance(&self, context: &AnalysisContext) -> PerformanceAnalysis {
587 let bottlenecks = self.identify_bottlenecks(context);
588 let memory_efficiency = context.memory_data.efficiency_ratio;
589 let cpu_efficiency = 1.0 - (context.performance_data.cpu_usage / 100.0);
590
591 let scalability = ScalabilityAssessment {
592 score: 0.8, scaling_behavior: ScalingBehavior::Linear,
594 resource_scaling: ResourceScaling {
595 memory_factor: 1.2,
596 cpu_factor: 1.1,
597 network_factor: 1.0,
598 },
599 limitations: vec!["Memory bandwidth may become bottleneck at scale".to_string()],
600 };
601
602 let score = (memory_efficiency + cpu_efficiency + scalability.score) / 3.0;
603
604 PerformanceAnalysis {
605 score,
606 bottlenecks,
607 memory_efficiency,
608 cpu_efficiency,
609 scalability,
610 }
611 }
612
613 fn identify_bottlenecks(&self, context: &AnalysisContext) -> Vec<PerformanceBottleneck> {
614 let mut bottlenecks = Vec::new();
615
616 if context.performance_data.cpu_usage > 80.0 {
617 bottlenecks.push(PerformanceBottleneck {
618 location: "allocation_tracking".to_string(),
619 bottleneck_type: BottleneckType::CpuBound,
620 severity: context.performance_data.cpu_usage / 100.0,
621 description: "High CPU usage in allocation tracking".to_string(),
622 optimization: "Consider optimizing hot paths or using faster data structures"
623 .to_string(),
624 });
625 }
626
627 if context.memory_data.fragmentation_ratio > 0.3 {
628 bottlenecks.push(PerformanceBottleneck {
629 location: "memory_management".to_string(),
630 bottleneck_type: BottleneckType::MemoryBound,
631 severity: context.memory_data.fragmentation_ratio,
632 description: "High memory fragmentation".to_string(),
633 optimization: "Implement memory compaction or use memory pools".to_string(),
634 });
635 }
636
637 bottlenecks
638 }
639
640 fn assess_overall_quality(
641 &self,
642 metrics: &[QualityMetric],
643 issues: &[QualityIssue],
644 ) -> QualityAssessment {
645 let weighted_score: f64 = metrics
647 .iter()
648 .map(|m| {
649 if m.meets_target {
650 m.weight
651 } else {
652 m.weight * (m.value / m.target)
653 }
654 })
655 .sum();
656
657 let total_weight: f64 = metrics.iter().map(|m| m.weight).sum();
658 let overall_score = if total_weight > 0.0 {
659 weighted_score / total_weight
660 } else {
661 0.0
662 };
663
664 let critical_penalty = issues
666 .iter()
667 .filter(|i| i.severity >= IssueSeverity::Critical)
668 .count() as f64
669 * 0.1;
670
671 let adjusted_score = (overall_score - critical_penalty).max(0.0);
672
673 let grade = match adjusted_score {
674 s if s >= 0.9 => QualityGrade::A,
675 s if s >= 0.8 => QualityGrade::B,
676 s if s >= 0.7 => QualityGrade::C,
677 s if s >= 0.6 => QualityGrade::D,
678 _ => QualityGrade::F,
679 };
680
681 let strengths = metrics
682 .iter()
683 .filter(|m| m.meets_target && m.value > m.target * 1.1)
684 .map(|m| format!("Excellent {}", m.name))
685 .collect();
686
687 let weaknesses = issues
688 .iter()
689 .filter(|i| i.severity >= IssueSeverity::Major)
690 .map(|i| i.title.clone())
691 .collect();
692
693 QualityAssessment {
694 overall_score: adjusted_score,
695 grade,
696 confidence: 0.85, strengths,
698 weaknesses,
699 }
700 }
701
702 fn generate_recommendations(
703 &self,
704 issues: &[QualityIssue],
705 performance: &PerformanceAnalysis,
706 ) -> Vec<Recommendation> {
707 let mut recommendations = Vec::new();
708
709 for issue in issues {
711 if issue.severity >= IssueSeverity::Major {
712 recommendations.push(Recommendation {
713 title: format!("Fix {}", issue.title),
714 description: format!("Address {} to improve quality", issue.description),
715 priority: match issue.severity {
716 IssueSeverity::Critical | IssueSeverity::Blocker => {
717 RecommendationPriority::Critical
718 }
719 IssueSeverity::Major => RecommendationPriority::High,
720 _ => RecommendationPriority::Medium,
721 },
722 impact: issue.impact.clone(),
723 effort: issue.fix_effort.clone(),
724 related_issues: vec![issue.id.clone()],
725 });
726 }
727 }
728
729 if performance.score < 0.8 {
731 recommendations.push(Recommendation {
732 title: "Improve Performance".to_string(),
733 description: "Overall performance score is below target".to_string(),
734 priority: RecommendationPriority::High,
735 impact: ImpactLevel::High,
736 effort: FixEffort::Medium,
737 related_issues: vec![],
738 });
739 }
740
741 recommendations
742 }
743
744 fn analyze_trends(&self, component: &str) -> TrendAnalysis {
745 let recent_results: Vec<_> = self
746 .history
747 .results
748 .iter()
749 .filter(|r| r.component == component)
750 .rev()
751 .take(10)
752 .collect();
753
754 if recent_results.len() < 3 {
755 return TrendAnalysis {
756 quality_trend: TrendDirection::Unknown,
757 performance_trend: TrendDirection::Unknown,
758 complexity_trend: TrendDirection::Unknown,
759 confidence: 0.0,
760 time_period: Duration::ZERO,
761 };
762 }
763
764 let scores: Vec<f64> = recent_results.iter().map(|r| r.quality_score).collect();
766 let quality_trend = if scores.first() > scores.last() {
767 TrendDirection::Improving
768 } else if scores.first() < scores.last() {
769 TrendDirection::Declining
770 } else {
771 TrendDirection::Stable
772 };
773
774 TrendAnalysis {
775 quality_trend,
776 performance_trend: TrendDirection::Stable,
777 complexity_trend: TrendDirection::Stable,
778 confidence: 0.7,
779 time_period: Duration::from_secs(3600), }
781 }
782
783 fn store_analysis_result(
784 &mut self,
785 component: &str,
786 assessment: &QualityAssessment,
787 metrics: &[QualityMetric],
788 ) {
789 let mut metric_map = HashMap::new();
790 for metric in metrics {
791 metric_map.insert(metric.name.clone(), metric.value);
792 }
793
794 let result = AnalysisResult {
795 component: component.to_string(),
796 quality_score: assessment.overall_score,
797 timestamp: Instant::now(),
798 metrics: metric_map,
799 };
800
801 self.history.results.push(result);
802
803 if self.history.results.len() > self.history.max_entries {
805 self.history
806 .results
807 .drain(0..self.history.results.len() - self.history.max_entries);
808 }
809 }
810}
811
812#[derive(Debug)]
814pub struct AnalysisContext {
815 pub performance_data: PerformanceData,
817 pub memory_data: MemoryData,
819 pub reliability_data: ReliabilityData,
821}
822
823#[derive(Debug)]
825pub struct PerformanceData {
826 pub avg_latency: Duration,
828 pub cpu_usage: f64,
830 pub allocation_efficiency: f64,
832 pub throughput: f64,
834}
835
836#[derive(Debug)]
838pub struct MemoryData {
839 pub current_usage: usize,
841 pub growth_rate: f64,
843 pub efficiency_ratio: f64,
845 pub fragmentation_ratio: f64,
847}
848
849#[derive(Debug)]
851pub struct ReliabilityData {
852 pub error_rate: f64,
854 pub success_rate: f64,
856 pub mtbf: Duration,
858}
859
860impl Default for AnalyzerConfig {
861 fn default() -> Self {
862 Self {
863 analysis_depth: AnalysisDepth::Standard,
864 track_trends: true,
865 max_analysis_time: Duration::from_secs(30),
866 thresholds: QualityThresholds::default(),
867 }
868 }
869}
870
871impl Default for QualityThresholds {
872 fn default() -> Self {
873 Self {
874 min_quality_score: 0.8,
875 max_complexity: 10,
876 min_coverage: 0.8,
877 max_technical_debt: 0.2,
878 }
879 }
880}
881
882impl Default for CodeAnalyzer {
883 fn default() -> Self {
884 Self::new()
885 }
886}
887
888#[cfg(test)]
889mod tests {
890 use super::*;
891
892 #[test]
893 fn test_code_analyzer_creation() {
894 let analyzer = CodeAnalyzer::new();
895 assert_eq!(analyzer.config.analysis_depth, AnalysisDepth::Standard);
896 assert!(analyzer.config.track_trends);
897 }
898
899 #[test]
900 fn test_quality_assessment() {
901 let analyzer = CodeAnalyzer::new();
902
903 let metrics = vec![QualityMetric {
904 name: "test_metric".to_string(),
905 category: MetricCategory::Performance,
906 value: 0.9,
907 target: 0.8,
908 meets_target: true,
909 weight: 1.0,
910 trend: TrendDirection::Stable,
911 }];
912
913 let issues = vec![];
914 let assessment = analyzer.assess_overall_quality(&metrics, &issues);
915
916 assert!(assessment.overall_score >= 0.8);
917 assert_eq!(assessment.grade, QualityGrade::A);
918 }
919
920 #[test]
921 fn test_quality_grades() {
922 assert!(QualityGrade::A > QualityGrade::B);
923 assert!(QualityGrade::B > QualityGrade::C);
924 assert!(QualityGrade::F < QualityGrade::D);
925 }
926}