scribe_analysis/heuristics/
enhanced_scoring.rs

1//! # Enhanced Scoring System with Complexity Integration
2//!
3//! This module extends the basic heuristic scoring with comprehensive complexity analysis,
4//! providing deeper insights into code quality and maintainability for better file selection.
5//!
6//! ## Enhanced Features
7//!
8//! - **Complexity-Aware Scoring**: Integrates cyclomatic, cognitive, and maintainability metrics
9//! - **Quality-Based Prioritization**: Considers code quality alongside importance
10//! - **Language-Specific Analysis**: Tailored complexity analysis per programming language
11//! - **Maintainability Assessment**: Factors in long-term code maintenance concerns
12//! - **Adaptive Weights**: Adjusts scoring based on repository characteristics
13
14use std::collections::HashMap;
15use scribe_core::{Result, ScribeError};
16use crate::complexity::{ComplexityAnalyzer, ComplexityMetrics, ComplexityConfig};
17use super::{ScanResult, HeuristicWeights, ScoreComponents};
18use super::scoring::RawScoreComponents;
19
20/// Enhanced score components that include complexity metrics
21#[derive(Debug, Clone)]
22pub struct EnhancedScoreComponents {
23    /// Base score components from standard heuristics
24    pub base_score: ScoreComponents,
25    
26    /// Complexity-based scores
27    pub complexity_score: f64,
28    pub maintainability_score: f64,
29    pub cognitive_score: f64,
30    pub quality_score: f64,
31    
32    /// Combined final score
33    pub enhanced_final_score: f64,
34    
35    /// Detailed complexity metrics
36    pub complexity_metrics: Option<ComplexityMetrics>,
37    
38    /// Complexity-adjusted weights
39    pub adjusted_weights: EnhancedWeights,
40}
41
42/// Enhanced weights that include complexity factors
43#[derive(Debug, Clone)]
44pub struct EnhancedWeights {
45    /// Base heuristic weights
46    pub base_weights: HeuristicWeights,
47    
48    /// Complexity weight factors
49    pub complexity_weight: f64,
50    pub maintainability_weight: f64,
51    pub cognitive_weight: f64,
52    pub quality_weight: f64,
53    
54    /// Adaptive weight adjustments
55    pub adaptive_factors: AdaptiveFactors,
56}
57
58/// Adaptive factors that adjust scoring based on repository characteristics
59#[derive(Debug, Clone)]
60pub struct AdaptiveFactors {
61    /// Repository size factor (larger repos may prefer simpler files)
62    pub repo_size_factor: f64,
63    
64    /// Language complexity factor (some languages naturally more complex)
65    pub language_factor: f64,
66    
67    /// Project maturity factor (mature projects may prioritize maintainability)
68    pub maturity_factor: f64,
69    
70    /// Team experience factor (affects complexity tolerance)
71    pub experience_factor: f64,
72}
73
74/// Enhanced heuristic scorer with complexity integration
75#[derive(Debug)]
76pub struct EnhancedHeuristicScorer {
77    /// Base scorer for standard heuristics
78    base_scorer: super::scoring::HeuristicScorer,
79    
80    /// Complexity analyzer
81    complexity_analyzer: ComplexityAnalyzer,
82    
83    /// Enhanced weights configuration
84    weights: EnhancedWeights,
85    
86    /// Repository characteristics for adaptive scoring
87    repo_characteristics: RepositoryCharacteristics,
88    
89    /// Content cache for complexity analysis
90    content_cache: HashMap<String, ComplexityMetrics>,
91    
92    /// Whether to enable expensive complexity analysis (disabled by default for performance)
93    enable_complexity_analysis: bool,
94}
95
96/// Repository characteristics for adaptive scoring
97#[derive(Debug, Clone)]
98pub struct RepositoryCharacteristics {
99    /// Total number of files in repository
100    pub total_files: usize,
101    
102    /// Primary programming languages
103    pub primary_languages: Vec<String>,
104    
105    /// Repository age in months
106    pub age_months: usize,
107    
108    /// Average team size
109    pub team_size: usize,
110    
111    /// Project type (library, application, framework, etc.)
112    pub project_type: ProjectType,
113}
114
115/// Project type classification
116#[derive(Debug, Clone)]
117pub enum ProjectType {
118    Library,
119    Application,
120    Framework,
121    Tool,
122    Game,
123    WebService,
124    EmbeddedSystem,
125    Unknown,
126}
127
128impl Default for EnhancedWeights {
129    fn default() -> Self {
130        Self {
131            base_weights: HeuristicWeights::default(),
132            complexity_weight: 0.15,
133            maintainability_weight: 0.20,
134            cognitive_weight: 0.10,
135            quality_weight: 0.15,
136            adaptive_factors: AdaptiveFactors::default(),
137        }
138    }
139}
140
141impl Default for AdaptiveFactors {
142    fn default() -> Self {
143        Self {
144            repo_size_factor: 1.0,
145            language_factor: 1.0,
146            maturity_factor: 1.0,
147            experience_factor: 1.0,
148        }
149    }
150}
151
152impl Default for RepositoryCharacteristics {
153    fn default() -> Self {
154        Self {
155            total_files: 100,
156            primary_languages: vec!["rust".to_string()],
157            age_months: 12,
158            team_size: 3,
159            project_type: ProjectType::Application,
160        }
161    }
162}
163
164impl EnhancedHeuristicScorer {
165    /// Create a new enhanced scorer with default configuration
166    pub fn new() -> Self {
167        let base_weights = HeuristicWeights::default();
168        let base_scorer = super::scoring::HeuristicScorer::new(base_weights.clone());
169        
170        Self {
171            base_scorer,
172            complexity_analyzer: ComplexityAnalyzer::new(),
173            weights: EnhancedWeights::default(),
174            repo_characteristics: RepositoryCharacteristics::default(),
175            content_cache: HashMap::new(),
176            enable_complexity_analysis: false, // TEMPORARILY DISABLED to test baseline performance
177        }
178    }
179    
180    /// Enable complexity analysis (WARNING: This significantly impacts performance)
181    pub fn enable_complexity_analysis(&mut self) {
182        self.enable_complexity_analysis = true;
183    }
184    
185    /// Disable complexity analysis for better performance
186    pub fn disable_complexity_analysis(&mut self) {
187        self.enable_complexity_analysis = false;
188        self.content_cache.clear(); // Clear cache to save memory
189    }
190    
191    /// Create enhanced scorer with custom configuration
192    pub fn with_config(
193        weights: EnhancedWeights, 
194        complexity_config: ComplexityConfig,
195        repo_characteristics: RepositoryCharacteristics
196    ) -> Self {
197        let base_scorer = super::scoring::HeuristicScorer::new(weights.base_weights.clone());
198        let complexity_analyzer = ComplexityAnalyzer::with_config(complexity_config);
199        
200        Self {
201            base_scorer,
202            complexity_analyzer,
203            weights,
204            repo_characteristics,
205            content_cache: HashMap::new(),
206            enable_complexity_analysis: false, // TEMPORARILY DISABLED to test baseline performance
207        }
208    }
209    
210    /// Score a file with enhanced complexity-aware analysis
211    pub fn score_file_enhanced<T>(&mut self, file: &T, file_content: &str, all_files: &[T]) -> Result<EnhancedScoreComponents> 
212    where 
213        T: ScanResult + Clone,
214    {
215        // Get base heuristic score
216        let base_score = self.base_scorer.score_file(file, all_files)?;
217        
218        // Detect language from file path
219        let language = self.detect_language(file.path());
220        
221        // Analyze complexity only if enabled (with caching)
222        let (complexity_metrics, complexity_score, maintainability_score, cognitive_score, quality_score) = 
223            if self.enable_complexity_analysis {
224                let complexity_metrics = if let Some(cached) = self.content_cache.get(file.path()) {
225                    cached.clone()
226                } else {
227                    let metrics = self.complexity_analyzer.analyze_content(file_content, &language)?;
228                    self.content_cache.insert(file.path().to_string(), metrics.clone());
229                    metrics
230                };
231                
232                // Calculate complexity-based scores
233                let complexity_score = self.calculate_complexity_score(&complexity_metrics);
234                let maintainability_score = self.calculate_maintainability_score(&complexity_metrics);
235                let cognitive_score = self.calculate_cognitive_score(&complexity_metrics);
236                let quality_score = self.calculate_quality_score(&complexity_metrics);
237                
238                (Some(complexity_metrics), complexity_score, maintainability_score, cognitive_score, quality_score)
239            } else {
240                // Skip expensive complexity analysis - use neutral/default scores
241                (None, 0.5, 0.5, 0.5, 0.5)
242            };
243        
244        // Apply adaptive adjustments
245        let adjusted_weights = if let Some(ref metrics) = complexity_metrics {
246            self.calculate_adaptive_weights(file, metrics)
247        } else {
248            // Use default weights when complexity analysis is disabled
249            self.weights.clone()
250        };
251        
252        // Calculate enhanced final score
253        let enhanced_final_score = self.calculate_enhanced_final_score(
254            &base_score,
255            complexity_score,
256            maintainability_score,
257            cognitive_score,
258            quality_score,
259            &adjusted_weights
260        );
261        
262        Ok(EnhancedScoreComponents {
263            base_score,
264            complexity_score,
265            maintainability_score,
266            cognitive_score,
267            quality_score,
268            enhanced_final_score,
269            complexity_metrics,
270            adjusted_weights,
271        })
272    }
273    
274    /// Score all files with enhanced analysis
275    pub fn score_all_files_enhanced<T>(&mut self, files_with_content: &[(T, String)]) -> Result<Vec<(usize, EnhancedScoreComponents)>>
276    where 
277        T: ScanResult + Clone,
278    {
279        let files: Vec<_> = files_with_content.iter().map(|(f, _)| f.clone()).collect();
280        let mut scored_files = Vec::new();
281        
282        for (idx, (file, content)) in files_with_content.iter().enumerate() {
283            let score = self.score_file_enhanced(file, content, &files)?;
284            scored_files.push((idx, score));
285        }
286        
287        // Sort by enhanced final score (descending)
288        scored_files.sort_by(|a, b| b.1.enhanced_final_score.partial_cmp(&a.1.enhanced_final_score).unwrap_or(std::cmp::Ordering::Equal));
289        
290        Ok(scored_files)
291    }
292    
293    /// Detect programming language from file path
294    fn detect_language(&self, path: &str) -> String {
295        let extension = std::path::Path::new(path)
296            .extension()
297            .and_then(|ext| ext.to_str())
298            .unwrap_or("");
299        
300        match extension.to_lowercase().as_str() {
301            "rs" => "rust",
302            "py" => "python",
303            "js" => "javascript",
304            "ts" => "typescript",
305            "java" => "java",
306            "cs" => "c#",
307            "go" => "go",
308            "c" => "c",
309            "cpp" | "cc" | "cxx" => "cpp",
310            "h" | "hpp" => "c",
311            "rb" => "ruby",
312            "php" => "php",
313            "swift" => "swift",
314            "kt" => "kotlin",
315            "scala" => "scala",
316            _ => "unknown",
317        }.to_string()
318    }
319    
320    /// Calculate complexity-based score (0-1, where 1 is good)
321    fn calculate_complexity_score(&self, metrics: &ComplexityMetrics) -> f64 {
322        // Invert complexity score - lower complexity is better
323        1.0 - metrics.complexity_score()
324    }
325    
326    /// Calculate maintainability score (0-1)
327    fn calculate_maintainability_score(&self, metrics: &ComplexityMetrics) -> f64 {
328        // Maintainability index is 0-100, normalize to 0-1
329        metrics.maintainability_index / 100.0
330    }
331    
332    /// Calculate cognitive load score (0-1, where 1 is good)
333    fn calculate_cognitive_score(&self, metrics: &ComplexityMetrics) -> f64 {
334        // Lower cognitive complexity is better
335        let cognitive_ratio = metrics.cognitive_complexity as f64 / 20.0; // Normalize to rough 0-1 range
336        (1.0 - cognitive_ratio.min(1.0)).max(0.0)
337    }
338    
339    /// Calculate overall quality score (0-1)
340    fn calculate_quality_score(&self, metrics: &ComplexityMetrics) -> f64 {
341        // Composite quality score
342        let complexity_factor = 1.0 - (metrics.cyclomatic_complexity as f64 / 15.0).min(1.0);
343        let nesting_factor = 1.0 - (metrics.max_nesting_depth as f64 / 6.0).min(1.0);
344        let density_factor = metrics.code_density.min(1.0);
345        let comment_factor = (metrics.comment_ratio * 2.0).min(1.0); // Good commenting is valuable
346        
347        (complexity_factor * 0.3 + 
348         nesting_factor * 0.2 + 
349         density_factor * 0.3 + 
350         comment_factor * 0.2).min(1.0)
351    }
352    
353    /// Calculate adaptive weights based on file and repository characteristics
354    fn calculate_adaptive_weights<T>(&self, file: &T, metrics: &ComplexityMetrics) -> EnhancedWeights
355    where 
356        T: ScanResult,
357    {
358        let mut weights = self.weights.clone();
359        
360        // Adjust weights based on repository size
361        if self.repo_characteristics.total_files > 1000 {
362            // Large repos: prioritize simplicity and maintainability
363            weights.maintainability_weight *= 1.3;
364            weights.complexity_weight *= 1.2;
365        } else if self.repo_characteristics.total_files < 50 {
366            // Small repos: focus more on functionality
367            weights.base_weights.import_weight *= 1.2;
368            weights.base_weights.doc_weight *= 1.1;
369        }
370        
371        // Adjust based on project type
372        match self.repo_characteristics.project_type {
373            ProjectType::Library => {
374                // Libraries need excellent documentation and maintainability
375                weights.base_weights.doc_weight *= 1.4;
376                weights.maintainability_weight *= 1.3;
377                weights.quality_weight *= 1.2;
378            },
379            ProjectType::Framework => {
380                // Frameworks need clear architecture and examples
381                weights.base_weights.entrypoint_weight *= 1.3;
382                weights.base_weights.examples_weight *= 1.4;
383                weights.quality_weight *= 1.2;
384            },
385            ProjectType::Tool => {
386                // Tools prioritize main functionality and simplicity
387                weights.base_weights.entrypoint_weight *= 1.5;
388                weights.complexity_weight *= 1.3;
389            },
390            _ => {
391                // Default adjustments for applications
392            }
393        }
394        
395        // Adjust based on file complexity
396        if metrics.cyclomatic_complexity > 10 {
397            // High complexity files might be core logic - boost importance
398            weights.base_weights.import_weight *= 1.2;
399        }
400        
401        if metrics.maintainability_index < 30.0 {
402            // Low maintainability - might indicate technical debt hotspots
403            weights.maintainability_weight *= 1.4;
404        }
405        
406        // Language-specific adjustments
407        let language = &metrics.language_metrics.language;
408        match language.as_str() {
409            "rust" => {
410                // Rust: Consider ownership complexity
411                if let Some(ownership) = metrics.language_metrics.complexity_factors.get("ownership_complexity") {
412                    if *ownership > 5.0 {
413                        weights.complexity_weight *= 1.2;
414                    }
415                }
416            },
417            "python" => {
418                // Python: Value documentation and simplicity
419                weights.base_weights.doc_weight *= 1.1;
420                weights.complexity_weight *= 1.1;
421            },
422            "javascript" | "typescript" => {
423                // JS/TS: Consider async complexity
424                if let Some(async_complexity) = metrics.language_metrics.complexity_factors.get("promise_complexity") {
425                    if *async_complexity > 3.0 {
426                        weights.cognitive_weight *= 1.2;
427                    }
428                }
429            },
430            _ => {}
431        }
432        
433        weights
434    }
435    
436    /// Calculate the final enhanced score
437    fn calculate_enhanced_final_score(
438        &self,
439        base_score: &ScoreComponents,
440        complexity_score: f64,
441        maintainability_score: f64,
442        cognitive_score: f64,
443        quality_score: f64,
444        weights: &EnhancedWeights
445    ) -> f64 {
446        // Combine base score with complexity metrics
447        let base_contribution = base_score.final_score * 0.6; // Base heuristics weight
448        
449        let complexity_contribution = 
450            complexity_score * weights.complexity_weight +
451            maintainability_score * weights.maintainability_weight +
452            cognitive_score * weights.cognitive_weight +
453            quality_score * weights.quality_weight;
454        
455        let enhanced_contribution = complexity_contribution * 0.4; // Complexity metrics weight
456        
457        // Apply adaptive factors
458        let final_score = (base_contribution + enhanced_contribution) *
459            weights.adaptive_factors.repo_size_factor *
460            weights.adaptive_factors.language_factor *
461            weights.adaptive_factors.maturity_factor *
462            weights.adaptive_factors.experience_factor;
463        
464        final_score.min(2.0) // Cap the score to prevent extreme values
465    }
466    
467    /// Update repository characteristics
468    pub fn update_repository_characteristics(&mut self, characteristics: RepositoryCharacteristics) {
469        self.repo_characteristics = characteristics;
470        
471        // Recalculate adaptive factors based on new characteristics
472        self.weights.adaptive_factors = self.calculate_adaptive_factors();
473    }
474    
475    /// Calculate adaptive factors based on repository characteristics
476    fn calculate_adaptive_factors(&self) -> AdaptiveFactors {
477        let repo_size_factor = match self.repo_characteristics.total_files {
478            0..=50 => 1.1,      // Small repos - boost importance
479            51..=500 => 1.0,     // Medium repos - neutral
480            501..=2000 => 0.95,  // Large repos - slight penalty
481            _ => 0.9,            // Very large repos - prefer simpler files
482        };
483        
484        let language_factor = if self.repo_characteristics.primary_languages.contains(&"rust".to_string()) {
485            1.05 // Rust projects tend to have good practices
486        } else if self.repo_characteristics.primary_languages.contains(&"javascript".to_string()) {
487            0.95 // JS can be more complex to analyze
488        } else {
489            1.0
490        };
491        
492        let maturity_factor = match self.repo_characteristics.age_months {
493            0..=6 => 0.9,    // New projects - focus on functionality
494            7..=24 => 1.0,   // Maturing projects - balanced
495            25..=60 => 1.1,  // Mature projects - prioritize maintainability
496            _ => 1.2,        // Very mature projects - heavily prioritize quality
497        };
498        
499        let experience_factor = match self.repo_characteristics.team_size {
500            1 => 1.1,        // Solo projects - prefer simpler code
501            2..=5 => 1.0,    // Small teams - balanced
502            6..=15 => 0.95,  // Medium teams - can handle complexity
503            _ => 0.9,        // Large teams - prefer well-structured code
504        };
505        
506        AdaptiveFactors {
507            repo_size_factor,
508            language_factor,
509            maturity_factor,
510            experience_factor,
511        }
512    }
513    
514    /// Clear the complexity metrics cache
515    pub fn clear_cache(&mut self) {
516        self.content_cache.clear();
517    }
518    
519    /// Get cache statistics
520    pub fn cache_stats(&self) -> (usize, usize) {
521        (self.content_cache.len(), self.content_cache.capacity())
522    }
523}
524
525impl EnhancedScoreComponents {
526    /// Get a breakdown of score contributions
527    pub fn score_breakdown(&self) -> HashMap<String, f64> {
528        let mut breakdown = self.base_score.as_map();
529        
530        breakdown.insert("complexity_score".to_string(), self.complexity_score);
531        breakdown.insert("maintainability_score".to_string(), self.maintainability_score);
532        breakdown.insert("cognitive_score".to_string(), self.cognitive_score);
533        breakdown.insert("quality_score".to_string(), self.quality_score);
534        breakdown.insert("enhanced_final_score".to_string(), self.enhanced_final_score);
535        
536        breakdown
537    }
538    
539    /// Get the dominant scoring factor
540    pub fn dominant_factor(&self) -> (&'static str, f64) {
541        let factors = [
542            ("base_heuristics", self.base_score.final_score * 0.6),
543            ("complexity", self.complexity_score * self.adjusted_weights.complexity_weight * 0.4),
544            ("maintainability", self.maintainability_score * self.adjusted_weights.maintainability_weight * 0.4),
545            ("cognitive", self.cognitive_score * self.adjusted_weights.cognitive_weight * 0.4),
546            ("quality", self.quality_score * self.adjusted_weights.quality_weight * 0.4),
547        ];
548        
549        factors.iter()
550            .max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
551            .map(|(name, score)| (*name, *score))
552            .unwrap_or(("none", 0.0))
553    }
554    
555    /// Get a human-readable explanation of the score
556    pub fn explanation(&self) -> String {
557        let (dominant, _) = self.dominant_factor();
558        let complexity_summary = if let Some(metrics) = &self.complexity_metrics {
559            metrics.summary()
560        } else {
561            "No complexity metrics".to_string()
562        };
563        
564        format!(
565            "Score: {:.3} (dominated by {}), Base: {:.3}, Quality: {:.3}, {}",
566            self.enhanced_final_score,
567            dominant,
568            self.base_score.final_score,
569            self.quality_score,
570            complexity_summary
571        )
572    }
573}
574
575#[cfg(test)]
576mod tests {
577    use super::*;
578    use crate::heuristics::DocumentAnalysis;
579    
580    // Mock scan result for testing
581    #[derive(Debug, Clone)]
582    struct MockScanResult {
583        path: String,
584        relative_path: String,
585        depth: usize,
586        is_docs: bool,
587        is_readme: bool,
588        is_test: bool,
589        is_entrypoint: bool,
590        has_examples: bool,
591        priority_boost: f64,
592        churn_score: f64,
593        centrality_in: f64,
594        imports: Option<Vec<String>>,
595        doc_analysis: Option<DocumentAnalysis>,
596    }
597    
598    impl MockScanResult {
599        fn new(path: &str) -> Self {
600            Self {
601                path: path.to_string(),
602                relative_path: path.to_string(),
603                depth: path.matches('/').count(),
604                is_docs: path.contains("doc") || path.ends_with(".md"),
605                is_readme: path.to_lowercase().contains("readme"),
606                is_test: path.contains("test") || path.contains("spec"),
607                is_entrypoint: path.contains("main") || path.contains("index"),
608                has_examples: path.contains("example") || path.contains("demo"),
609                priority_boost: 0.0,
610                churn_score: 0.5,
611                centrality_in: 0.3,
612                imports: Some(vec!["std::collections::HashMap".to_string()]),
613                doc_analysis: Some(DocumentAnalysis::new()),
614            }
615        }
616    }
617    
618    impl super::super::ScanResult for MockScanResult {
619        fn path(&self) -> &str { &self.path }
620        fn relative_path(&self) -> &str { &self.relative_path }
621        fn depth(&self) -> usize { self.depth }
622        fn is_docs(&self) -> bool { self.is_docs }
623        fn is_readme(&self) -> bool { self.is_readme }
624        fn is_test(&self) -> bool { self.is_test }
625        fn is_entrypoint(&self) -> bool { self.is_entrypoint }
626        fn has_examples(&self) -> bool { self.has_examples }
627        fn priority_boost(&self) -> f64 { self.priority_boost }
628        fn churn_score(&self) -> f64 { self.churn_score }
629        fn centrality_in(&self) -> f64 { self.centrality_in }
630        fn imports(&self) -> Option<&[String]> { self.imports.as_deref() }
631        fn doc_analysis(&self) -> Option<&DocumentAnalysis> { self.doc_analysis.as_ref() }
632    }
633    
634    #[test]
635    fn test_enhanced_scorer_creation() {
636        let scorer = EnhancedHeuristicScorer::new();
637        assert!(scorer.weights.complexity_weight > 0.0);
638        assert!(scorer.weights.maintainability_weight > 0.0);
639    }
640    
641    #[test]
642    fn test_language_detection() {
643        let scorer = EnhancedHeuristicScorer::new();
644        
645        assert_eq!(scorer.detect_language("src/main.rs"), "rust");
646        assert_eq!(scorer.detect_language("app.py"), "python");
647        assert_eq!(scorer.detect_language("script.js"), "javascript");
648        assert_eq!(scorer.detect_language("component.ts"), "typescript");
649        assert_eq!(scorer.detect_language("Main.java"), "java");
650    }
651    
652    #[test]
653    fn test_enhanced_file_scoring() {
654        let mut scorer = EnhancedHeuristicScorer::new();
655        scorer.enable_complexity_analysis(); // Enable complexity analysis for testing
656        
657        let file = MockScanResult::new("src/main.rs");
658        let content = r#"
659fn main() {
660    if condition() {
661        for i in 0..10 {
662            println!("Hello {}", i);
663        }
664    }
665}
666"#;
667        let files = vec![file.clone()];
668        
669        let result = scorer.score_file_enhanced(&file, content, &files);
670        assert!(result.is_ok());
671        
672        let score = result.unwrap();
673        assert!(score.enhanced_final_score > 0.0);
674        assert!(score.complexity_score >= 0.0 && score.complexity_score <= 1.0);
675        assert!(score.quality_score >= 0.0 && score.quality_score <= 1.0);
676        assert!(score.complexity_metrics.is_some());
677    }
678    
679    #[test]
680    fn test_adaptive_weights() {
681        let weights = EnhancedWeights::default();
682        let complexity_config = ComplexityConfig::default();
683        let mut repo_chars = RepositoryCharacteristics::default();
684        repo_chars.project_type = ProjectType::Library;
685        repo_chars.total_files = 1500; // Large repository
686        
687        let mut scorer = EnhancedHeuristicScorer::with_config(weights, complexity_config, repo_chars);
688        
689        let file = MockScanResult::new("src/lib.rs");
690        let simple_content = "fn simple() { println!(\"hello\"); }";
691        let files = vec![file.clone()];
692        
693        let result = scorer.score_file_enhanced(&file, simple_content, &files);
694        assert!(result.is_ok());
695        
696        let score = result.unwrap();
697        
698        // For a library, documentation and maintainability should have higher weights
699        assert!(score.adjusted_weights.base_weights.doc_weight >= score.adjusted_weights.base_weights.import_weight);
700    }
701    
702    #[test]
703    fn test_complexity_vs_simple_code() {
704        let mut scorer = EnhancedHeuristicScorer::new();
705        scorer.enable_complexity_analysis(); // Enable complexity analysis for testing
706        
707        let file1 = MockScanResult::new("simple.rs");
708        let simple_content = "fn simple() { println!(\"hello\"); }";
709        
710        let file2 = MockScanResult::new("complex.rs");
711        let complex_content = r#"
712fn complex() {
713    for i in 0..100 {
714        if i % 2 == 0 {
715            while condition() {
716                match value {
717                    1 => { if nested() { deep(); } },
718                    2 => { if more_nested() { deeper(); } },
719                    _ => { if even_more() { deepest(); } },
720                }
721            }
722        }
723    }
724}
725"#;
726        
727        let files = vec![file1.clone(), file2.clone()];
728        
729        let simple_score = scorer.score_file_enhanced(&file1, simple_content, &files).unwrap();
730        let complex_score = scorer.score_file_enhanced(&file2, complex_content, &files).unwrap();
731        
732        // Simple code should generally score better on complexity metrics
733        assert!(simple_score.complexity_score > complex_score.complexity_score);
734        assert!(simple_score.cognitive_score > complex_score.cognitive_score);
735    }
736    
737    #[test]
738    fn test_score_breakdown() {
739        let mut scorer = EnhancedHeuristicScorer::new();
740        
741        let file = MockScanResult::new("test.rs");
742        let content = "fn test() { if x > 0 { return 1; } else { return 0; } }";
743        let files = vec![file.clone()];
744        
745        let score = scorer.score_file_enhanced(&file, content, &files).unwrap();
746        let breakdown = score.score_breakdown();
747        
748        assert!(breakdown.contains_key("complexity_score"));
749        assert!(breakdown.contains_key("maintainability_score"));
750        assert!(breakdown.contains_key("cognitive_score"));
751        assert!(breakdown.contains_key("quality_score"));
752        assert!(breakdown.contains_key("enhanced_final_score"));
753        
754        let explanation = score.explanation();
755        assert!(explanation.contains("Score:"));
756        assert!(explanation.contains("dominated by"));
757    }
758    
759    #[test]
760    fn test_repository_characteristics_update() {
761        let mut scorer = EnhancedHeuristicScorer::new();
762        
763        let initial_factors = scorer.weights.adaptive_factors.clone();
764        
765        let mut new_chars = RepositoryCharacteristics::default();
766        new_chars.total_files = 5000; // Much larger
767        new_chars.project_type = ProjectType::Framework;
768        new_chars.age_months = 48; // Mature project
769        
770        scorer.update_repository_characteristics(new_chars);
771        
772        let new_factors = &scorer.weights.adaptive_factors;
773        
774        // Large, mature projects should have different factors
775        assert_ne!(initial_factors.repo_size_factor, new_factors.repo_size_factor);
776        assert_ne!(initial_factors.maturity_factor, new_factors.maturity_factor);
777    }
778}