use std::collections::HashMap;
use std::time::{Duration, Instant};
pub struct CodeAnalyzer {
config: AnalyzerConfig,
history: AnalysisHistory,
baselines: HashMap<String, QualityBaseline>,
}
#[derive(Debug, Clone)]
pub struct AnalyzerConfig {
pub analysis_depth: AnalysisDepth,
pub track_trends: bool,
pub max_analysis_time: Duration,
pub thresholds: QualityThresholds,
}
#[derive(Debug, Clone, PartialEq)]
pub enum AnalysisDepth {
Surface,
Standard,
Deep,
Exhaustive,
}
#[derive(Debug, Clone)]
pub struct QualityThresholds {
pub min_quality_score: f64,
pub max_complexity: u32,
pub min_coverage: f64,
pub max_technical_debt: f64,
}
#[derive(Debug)]
struct AnalysisHistory {
results: Vec<AnalysisResult>,
max_entries: usize,
}
#[derive(Debug, Clone)]
pub struct QualityBaseline {
pub component: String,
pub quality_score: f64,
pub complexity: u32,
pub performance: BaselinePerformance,
pub timestamp: Instant,
}
#[derive(Debug, Clone)]
pub struct BaselinePerformance {
pub avg_execution_time: Duration,
pub memory_per_operation: usize,
pub error_rate: f64,
}
#[derive(Debug, Clone)]
pub struct AnalysisReport {
pub component: String,
pub quality_assessment: QualityAssessment,
pub metrics: Vec<QualityMetric>,
pub issues: Vec<QualityIssue>,
pub performance_analysis: PerformanceAnalysis,
pub recommendations: Vec<Recommendation>,
pub trend_analysis: Option<TrendAnalysis>,
pub analysis_duration: Duration,
}
#[derive(Debug, Clone)]
pub struct QualityAssessment {
pub overall_score: f64,
pub grade: QualityGrade,
pub confidence: f64,
pub strengths: Vec<String>,
pub weaknesses: Vec<String>,
}
#[derive(Debug, Clone, PartialEq)]
pub enum QualityGrade {
A,
B,
C,
D,
F,
}
impl PartialOrd for QualityGrade {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for QualityGrade {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let self_score = match self {
QualityGrade::A => 5,
QualityGrade::B => 4,
QualityGrade::C => 3,
QualityGrade::D => 2,
QualityGrade::F => 1,
};
let other_score = match other {
QualityGrade::A => 5,
QualityGrade::B => 4,
QualityGrade::C => 3,
QualityGrade::D => 2,
QualityGrade::F => 1,
};
self_score.cmp(&other_score)
}
}
impl Eq for QualityGrade {}
#[derive(Debug, Clone)]
pub struct QualityMetric {
pub name: String,
pub category: MetricCategory,
pub value: f64,
pub target: f64,
pub meets_target: bool,
pub weight: f64,
pub trend: TrendDirection,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MetricCategory {
Performance,
Reliability,
Maintainability,
Security,
Efficiency,
}
#[derive(Debug, Clone)]
pub struct QualityIssue {
pub id: String,
pub title: String,
pub description: String,
pub severity: IssueSeverity,
pub category: IssueCategory,
pub location: Option<String>,
pub fix_effort: FixEffort,
pub impact: ImpactLevel,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum IssueSeverity {
Minor,
Moderate,
Major,
Critical,
Blocker,
}
#[derive(Debug, Clone, PartialEq)]
pub enum IssueCategory {
MemoryManagement,
Performance,
ThreadSafety,
ErrorHandling,
CodeStyle,
Design,
}
#[derive(Debug, Clone, PartialEq)]
pub enum FixEffort {
Trivial,
Easy,
Medium,
Hard,
VeryHard,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum ImpactLevel {
Minimal,
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone)]
pub struct PerformanceAnalysis {
pub score: f64,
pub bottlenecks: Vec<PerformanceBottleneck>,
pub memory_efficiency: f64,
pub cpu_efficiency: f64,
pub scalability: ScalabilityAssessment,
}
#[derive(Debug, Clone)]
pub struct PerformanceBottleneck {
pub location: String,
pub bottleneck_type: BottleneckType,
pub severity: f64,
pub description: String,
pub optimization: String,
}
#[derive(Debug, Clone, PartialEq)]
pub enum BottleneckType {
CpuBound,
MemoryBound,
IoBound,
LockContention,
CacheMiss,
AlgorithmInefficiency,
}
#[derive(Debug, Clone)]
pub struct ScalabilityAssessment {
pub score: f64,
pub scaling_behavior: ScalingBehavior,
pub resource_scaling: ResourceScaling,
pub limitations: Vec<String>,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ScalingBehavior {
Constant,
Linear,
Logarithmic,
Quadratic,
Exponential,
}
#[derive(Debug, Clone)]
pub struct ResourceScaling {
pub memory_factor: f64,
pub cpu_factor: f64,
pub network_factor: f64,
}
#[derive(Debug, Clone)]
pub struct Recommendation {
pub title: String,
pub description: String,
pub priority: RecommendationPriority,
pub impact: ImpactLevel,
pub effort: FixEffort,
pub related_issues: Vec<String>,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum RecommendationPriority {
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone)]
pub struct TrendAnalysis {
pub quality_trend: TrendDirection,
pub performance_trend: TrendDirection,
pub complexity_trend: TrendDirection,
pub confidence: f64,
pub time_period: Duration,
}
#[derive(Debug, Clone, PartialEq)]
pub enum TrendDirection {
Improving,
Stable,
Declining,
Unknown,
}
#[derive(Debug, Clone)]
struct AnalysisResult {
component: String,
quality_score: f64,
}
impl CodeAnalyzer {
pub fn new() -> Self {
Self {
config: AnalyzerConfig::default(),
history: AnalysisHistory {
results: Vec::new(),
max_entries: 100,
},
baselines: HashMap::new(),
}
}
pub fn with_config(config: AnalyzerConfig) -> Self {
Self {
config,
history: AnalysisHistory {
results: Vec::new(),
max_entries: 100,
},
baselines: HashMap::new(),
}
}
pub fn set_baseline(&mut self, component: &str, baseline: QualityBaseline) {
self.baselines.insert(component.to_string(), baseline);
}
pub fn analyze_quality(
&mut self,
component: &str,
context: &AnalysisContext,
) -> AnalysisReport {
let start_time = Instant::now();
let metrics = self.calculate_quality_metrics(context);
let issues = self.detect_quality_issues(context);
let performance_analysis = self.analyze_performance(context);
let quality_assessment = self.assess_overall_quality(&metrics, &issues);
let recommendations = self.generate_recommendations(&issues, &performance_analysis);
let trend_analysis = if self.config.track_trends {
Some(self.analyze_trends(component))
} else {
None
};
let analysis_duration = start_time.elapsed();
self.store_analysis_result(component, &quality_assessment, &metrics);
AnalysisReport {
component: component.to_string(),
quality_assessment,
metrics,
issues,
performance_analysis,
recommendations,
trend_analysis,
analysis_duration,
}
}
fn calculate_quality_metrics(&self, context: &AnalysisContext) -> Vec<QualityMetric> {
vec![
QualityMetric {
name: "allocation_efficiency".to_string(),
category: MetricCategory::Performance,
value: context.performance_data.allocation_efficiency,
target: 0.95,
meets_target: context.performance_data.allocation_efficiency >= 0.95,
weight: 0.3,
trend: TrendDirection::Unknown,
},
QualityMetric {
name: "error_rate".to_string(),
category: MetricCategory::Reliability,
value: context.reliability_data.error_rate,
target: 0.01, meets_target: context.reliability_data.error_rate <= 0.01,
weight: 0.25,
trend: TrendDirection::Unknown,
},
QualityMetric {
name: "memory_efficiency".to_string(),
category: MetricCategory::Efficiency,
value: context.memory_data.efficiency_ratio,
target: 0.9,
meets_target: context.memory_data.efficiency_ratio >= 0.9,
weight: 0.2,
trend: TrendDirection::Unknown,
},
]
}
fn detect_quality_issues(&self, context: &AnalysisContext) -> Vec<QualityIssue> {
let mut issues = Vec::new();
if context.memory_data.growth_rate > 1024.0 * 1024.0 {
issues.push(QualityIssue {
id: "memory_leak_detected".to_string(),
title: "Potential Memory Leak".to_string(),
description: format!(
"High memory growth rate: {:.2}MB/sec",
context.memory_data.growth_rate / (1024.0 * 1024.0)
),
severity: IssueSeverity::Major,
category: IssueCategory::MemoryManagement,
location: Some("memory_tracking".to_string()),
fix_effort: FixEffort::Medium,
impact: ImpactLevel::High,
});
}
if context.performance_data.avg_latency > Duration::from_micros(100) {
issues.push(QualityIssue {
id: "high_latency".to_string(),
title: "High Operation Latency".to_string(),
description: format!(
"Average latency {:.2}µs exceeds threshold",
context.performance_data.avg_latency.as_micros()
),
severity: IssueSeverity::Moderate,
category: IssueCategory::Performance,
location: Some("allocation_tracking".to_string()),
fix_effort: FixEffort::Easy,
impact: ImpactLevel::Medium,
});
}
issues
}
fn analyze_performance(&self, context: &AnalysisContext) -> PerformanceAnalysis {
let bottlenecks = self.identify_bottlenecks(context);
let memory_efficiency = context.memory_data.efficiency_ratio;
let cpu_efficiency = 1.0 - (context.performance_data.cpu_usage / 100.0);
let scalability = ScalabilityAssessment {
score: 0.8, scaling_behavior: ScalingBehavior::Linear,
resource_scaling: ResourceScaling {
memory_factor: 1.2,
cpu_factor: 1.1,
network_factor: 1.0,
},
limitations: vec!["Memory bandwidth may become bottleneck at scale".to_string()],
};
let score = (memory_efficiency + cpu_efficiency + scalability.score) / 3.0;
PerformanceAnalysis {
score,
bottlenecks,
memory_efficiency,
cpu_efficiency,
scalability,
}
}
fn identify_bottlenecks(&self, context: &AnalysisContext) -> Vec<PerformanceBottleneck> {
let mut bottlenecks = Vec::new();
if context.performance_data.cpu_usage > 80.0 {
bottlenecks.push(PerformanceBottleneck {
location: "allocation_tracking".to_string(),
bottleneck_type: BottleneckType::CpuBound,
severity: context.performance_data.cpu_usage / 100.0,
description: "High CPU usage in allocation tracking".to_string(),
optimization: "Consider optimizing hot paths or using faster data structures"
.to_string(),
});
}
if context.memory_data.fragmentation_ratio > 0.3 {
bottlenecks.push(PerformanceBottleneck {
location: "memory_management".to_string(),
bottleneck_type: BottleneckType::MemoryBound,
severity: context.memory_data.fragmentation_ratio,
description: "High memory fragmentation".to_string(),
optimization: "Implement memory compaction or use memory pools".to_string(),
});
}
bottlenecks
}
fn assess_overall_quality(
&self,
metrics: &[QualityMetric],
issues: &[QualityIssue],
) -> QualityAssessment {
let weighted_score: f64 = metrics
.iter()
.map(|m| {
if m.meets_target {
m.weight
} else {
m.weight * (m.value / m.target)
}
})
.sum();
let total_weight: f64 = metrics.iter().map(|m| m.weight).sum();
let overall_score = if total_weight > 0.0 {
weighted_score / total_weight
} else {
0.0
};
let critical_penalty = issues
.iter()
.filter(|i| i.severity >= IssueSeverity::Critical)
.count() as f64
* 0.1;
let adjusted_score = (overall_score - critical_penalty).max(0.0);
let grade = match adjusted_score {
s if s >= 0.9 => QualityGrade::A,
s if s >= 0.8 => QualityGrade::B,
s if s >= 0.7 => QualityGrade::C,
s if s >= 0.6 => QualityGrade::D,
_ => QualityGrade::F,
};
let strengths = metrics
.iter()
.filter(|m| m.meets_target && m.value > m.target * 1.1)
.map(|m| format!("Excellent {}", m.name))
.collect();
let weaknesses = issues
.iter()
.filter(|i| i.severity >= IssueSeverity::Major)
.map(|i| i.title.clone())
.collect();
QualityAssessment {
overall_score: adjusted_score,
grade,
confidence: 0.85, strengths,
weaknesses,
}
}
fn generate_recommendations(
&self,
issues: &[QualityIssue],
performance: &PerformanceAnalysis,
) -> Vec<Recommendation> {
let mut recommendations = Vec::new();
for issue in issues {
if issue.severity >= IssueSeverity::Major {
recommendations.push(Recommendation {
title: format!("Fix {}", issue.title),
description: format!("Address {} to improve quality", issue.description),
priority: match issue.severity {
IssueSeverity::Critical | IssueSeverity::Blocker => {
RecommendationPriority::Critical
}
IssueSeverity::Major => RecommendationPriority::High,
_ => RecommendationPriority::Medium,
},
impact: issue.impact.clone(),
effort: issue.fix_effort.clone(),
related_issues: vec![issue.id.clone()],
});
}
}
if performance.score < 0.8 {
recommendations.push(Recommendation {
title: "Improve Performance".to_string(),
description: "Overall performance score is below target".to_string(),
priority: RecommendationPriority::High,
impact: ImpactLevel::High,
effort: FixEffort::Medium,
related_issues: vec![],
});
}
recommendations
}
fn analyze_trends(&self, component: &str) -> TrendAnalysis {
let recent_results: Vec<_> = self
.history
.results
.iter()
.filter(|r| r.component == component)
.rev()
.take(10)
.collect();
if recent_results.len() < 3 {
return TrendAnalysis {
quality_trend: TrendDirection::Unknown,
performance_trend: TrendDirection::Unknown,
complexity_trend: TrendDirection::Unknown,
confidence: 0.0,
time_period: Duration::ZERO,
};
}
let scores: Vec<f64> = recent_results.iter().map(|r| r.quality_score).collect();
let quality_trend = if scores.first() > scores.last() {
TrendDirection::Improving
} else if scores.first() < scores.last() {
TrendDirection::Declining
} else {
TrendDirection::Stable
};
TrendAnalysis {
quality_trend,
performance_trend: TrendDirection::Stable,
complexity_trend: TrendDirection::Stable,
confidence: 0.7,
time_period: Duration::from_secs(3600), }
}
fn store_analysis_result(
&mut self,
component: &str,
assessment: &QualityAssessment,
_metrics: &[QualityMetric],
) {
let result = AnalysisResult {
component: component.to_string(),
quality_score: assessment.overall_score,
};
self.history.results.push(result);
if self.history.results.len() > self.history.max_entries {
self.history
.results
.drain(0..self.history.results.len() - self.history.max_entries);
}
}
}
#[derive(Debug)]
pub struct AnalysisContext {
pub performance_data: PerformanceData,
pub memory_data: MemoryData,
pub reliability_data: ReliabilityData,
}
#[derive(Debug)]
pub struct PerformanceData {
pub avg_latency: Duration,
pub cpu_usage: f64,
pub allocation_efficiency: f64,
pub throughput: f64,
}
#[derive(Debug)]
pub struct MemoryData {
pub current_usage: usize,
pub growth_rate: f64,
pub efficiency_ratio: f64,
pub fragmentation_ratio: f64,
}
#[derive(Debug)]
pub struct ReliabilityData {
pub error_rate: f64,
pub success_rate: f64,
pub mtbf: Duration,
}
impl Default for AnalyzerConfig {
fn default() -> Self {
Self {
analysis_depth: AnalysisDepth::Standard,
track_trends: true,
max_analysis_time: Duration::from_secs(30),
thresholds: QualityThresholds::default(),
}
}
}
impl Default for QualityThresholds {
fn default() -> Self {
Self {
min_quality_score: 0.8,
max_complexity: 10,
min_coverage: 0.8,
max_technical_debt: 0.2,
}
}
}
impl Default for CodeAnalyzer {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_code_analyzer_creation() {
let analyzer = CodeAnalyzer::new();
assert_eq!(analyzer.config.analysis_depth, AnalysisDepth::Standard);
assert!(analyzer.config.track_trends);
}
#[test]
fn test_quality_assessment() {
let analyzer = CodeAnalyzer::new();
let metrics = vec![QualityMetric {
name: "test_metric".to_string(),
category: MetricCategory::Performance,
value: 0.9,
target: 0.8,
meets_target: true,
weight: 1.0,
trend: TrendDirection::Stable,
}];
let issues = vec![];
let assessment = analyzer.assess_overall_quality(&metrics, &issues);
assert!(assessment.overall_score >= 0.8);
assert_eq!(assessment.grade, QualityGrade::A);
}
#[test]
fn test_quality_grades() {
assert!(QualityGrade::A > QualityGrade::B);
assert!(QualityGrade::B > QualityGrade::C);
assert!(QualityGrade::F < QualityGrade::D);
}
}