pmat 3.11.0

PMAT - Zero-config AI context generation and code quality toolkit (CLI, MCP, HTTP)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
//! Performance Testing per SPECIFICATION.md Section 30
//! 
//! This module implements comprehensive performance testing that validates
//! the performance characteristics defined in SPECIFICATION.md Section 1.4.
//! Tests are designed to fail if performance degrades beyond acceptable thresholds.

use std::time::{Duration, Instant};
use std::path::PathBuf;
use tempfile::tempdir;
use std::fs;
use pmat::cli::handlers::complexity_handlers;
use pmat::services::complexity::ComplexityAnalysisService;
use anyhow::Result;

/// Performance characteristics from SPECIFICATION.md Section 1.4
#[derive(Debug, Clone)]
pub struct PerformanceTargets {
    /// Startup latency targets
    pub startup_cold_ms: u64,     // 127ms max
    pub startup_hot_ms: u64,      // 4ms max
    
    /// Analysis throughput targets  
    pub loc_per_sec_st: u64,      // 487,000 LOC/s single-threaded
    pub loc_per_sec_mt: u64,      // 3,921,000 LOC/s multi-threaded
    
    /// Memory usage targets
    pub base_rss_mb: u64,         // 47MB base
    pub per_kloc_kb: u64,         // 312KB per KLOC
}

impl Default for PerformanceTargets {
    fn default() -> Self {
        Self {
            startup_cold_ms: 127,
            startup_hot_ms: 4,
            loc_per_sec_st: 487_000,
            loc_per_sec_mt: 3_921_000,
            base_rss_mb: 47,
            per_kloc_kb: 312,
        }
    }
}

/// Generate test code with specified lines of code
fn generate_test_code(lines: usize) -> String {
    let mut code = String::with_capacity(lines * 50);
    code.push_str("// Generated test code for performance testing\n");
    code.push_str("use std::collections::HashMap;\n\n");
    code.push_str("pub struct TestStruct {\n");
    code.push_str("    data: HashMap<String, i32>,\n");
    code.push_str("}\n\n");
    
    for i in 0..lines.saturating_sub(10) {
        code.push_str(&format!("pub fn test_function_{i}() -> i32 {{\n"));
        code.push_str(&format!("    let mut sum = 0;\n"));
        code.push_str(&format!("    for j in 0..{i} {{\n"));
        code.push_str(&format!("        sum += j * {i};\n"));
        code.push_str(&format!("    }}\n"));
        code.push_str(&format!("    sum\n"));
        code.push_str(&format!("}}\n\n"));
    }
    
    code
}

/// Test cold startup performance (first-time initialization)
#[test]
pub async fn test_cold_startup_performance() -> Result<()> {
    let targets = PerformanceTargets::default();
    let start = Instant::now();
    
    // Simulate cold startup by creating a new analysis service
    let _service = ComplexityAnalysisService::new();
    
    let duration = start.elapsed();
    let duration_ms = duration.as_millis() as u64;
    
    assert!(
        duration_ms <= targets.startup_cold_ms,
        "Cold startup took {}ms, expected ≤{}ms (SPECIFICATION.md target)",
        duration_ms, targets.startup_cold_ms
    );
    
    println!("✅ Cold startup: {}ms (target: ≤{}ms)", duration_ms, targets.startup_cold_ms);
    Ok(())
}

/// Test hot startup performance (subsequent initializations)
#[test]
pub async fn test_hot_startup_performance() -> Result<()> {
    let targets = PerformanceTargets::default();
    
    // Warm up
    let _service1 = ComplexityAnalysisService::new();
    
    // Measure hot startup
    let start = Instant::now();
    let _service2 = ComplexityAnalysisService::new();
    let duration = start.elapsed();
    let duration_ms = duration.as_millis() as u64;
    
    assert!(
        duration_ms <= targets.startup_hot_ms,
        "Hot startup took {}ms, expected ≤{}ms (SPECIFICATION.md target)",
        duration_ms, targets.startup_hot_ms
    );
    
    println!("✅ Hot startup: {}ms (target: ≤{}ms)", duration_ms, targets.startup_hot_ms);
    Ok(())
}

/// Test single-threaded analysis throughput
#[tokio::test]
async fn test_single_threaded_throughput() -> Result<()> {
    let targets = PerformanceTargets::default();
    let test_lines = 10_000; // 10K LOC test
    
    // Create test file
    let temp_dir = tempdir()?;
    let test_file = temp_dir.path().join("test.rs");
    let test_code = generate_test_code(test_lines);
    fs::write(&test_file, &test_code)?;
    
    // Measure analysis time
    let start = Instant::now();
    
    complexity_handlers::handle_analyze_complexity(
        &temp_dir.path().to_path_buf(),
        None, // single file
        20,   // cyclomatic threshold
        15,   // cognitive threshold
        &pmat::cli::enums::ComplexityOutputFormat::Json,
        Some(test_file.clone()),
        None, // include
        None, // exclude
        None, // output
        false, // fail_on_violation
    ).await?;
    
    let duration = start.elapsed();
    let actual_throughput = (test_lines as f64) / duration.as_secs_f64();
    
    assert!(
        actual_throughput >= targets.loc_per_sec_st as f64 * 0.8, // 80% of target
        "Single-threaded throughput: {:.0} LOC/s, expected ≥{} LOC/s",
        actual_throughput, targets.loc_per_sec_st
    );
    
    println!("✅ Single-threaded throughput: {:.0} LOC/s (target: ≥{} LOC/s)", 
             actual_throughput, targets.loc_per_sec_st);
    
    Ok(())
}

/// Test analysis performance with realistic project size
#[tokio::test]
async fn test_realistic_project_analysis() -> Result<()> {
    let test_lines = 50_000; // 50K LOC project
    
    // Create test project structure
    let temp_dir = tempdir()?;
    let src_dir = temp_dir.path().join("src");
    fs::create_dir(&src_dir)?;
    
    // Create multiple files to simulate realistic project
    for i in 0..10 {
        let file_path = src_dir.join(format!("module_{}.rs", i));
        let file_code = generate_test_code(test_lines / 10);
        fs::write(&file_path, &file_code)?;
    }
    
    let start = Instant::now();
    
    complexity_handlers::handle_analyze_complexity(
        &temp_dir.path().to_path_buf(),
        None, // all files
        20,   // cyclomatic threshold
        15,   // cognitive threshold
        &pmat::cli::enums::ComplexityOutputFormat::Summary,
        None, // project root
        None, // include
        None, // exclude
        None, // output
        false, // fail_on_violation
    ).await?;
    
    let duration = start.elapsed();
    let actual_throughput = (test_lines as f64) / duration.as_secs_f64();
    
    // More lenient threshold for multi-file analysis due to I/O overhead
    let min_throughput = 100_000; // 100K LOC/s
    assert!(
        actual_throughput >= min_throughput as f64,
        "Multi-file analysis throughput: {:.0} LOC/s, expected ≥{} LOC/s",
        actual_throughput, min_throughput
    );
    
    println!("✅ Multi-file analysis: {:.0} LOC/s, duration: {:?}", 
             actual_throughput, duration);
    
    Ok(())
}

/// Test memory usage patterns during analysis
#[tokio::test]
async fn test_memory_usage_patterns() -> Result<()> {
    let test_lines = 20_000; // 20K LOC test
    
    // Create test file
    let temp_dir = tempdir()?;
    let test_file = temp_dir.path().join("memory_test.rs");
    let test_code = generate_test_code(test_lines);
    fs::write(&test_file, &test_code)?;
    
    // Get initial memory usage (approximate)
    let initial_memory = get_memory_usage_mb();
    
    // Run analysis
    complexity_handlers::handle_analyze_complexity(
        &temp_dir.path().to_path_buf(),
        None,
        20,
        15,
        &pmat::cli::enums::ComplexityOutputFormat::Json,
        Some(test_file),
        None,
        None,
        None,
        false,
    ).await?;
    
    let final_memory = get_memory_usage_mb();
    let memory_used = final_memory.saturating_sub(initial_memory);
    
    // Memory usage should be reasonable for 20K LOC
    let expected_memory_mb = 10; // Conservative estimate
    assert!(
        memory_used <= expected_memory_mb,
        "Memory usage: {}MB for {}K LOC, expected ≤{}MB",
        memory_used, test_lines / 1000, expected_memory_mb
    );
    
    println!("✅ Memory usage: {}MB for {}K LOC", memory_used, test_lines / 1000);
    
    Ok(())
}

/// Test performance regression detection
#[tokio::test]
async fn test_performance_regression_detection() -> Result<()> {
    const ITERATIONS: usize = 5;
    let test_lines = 5_000; // Smaller test for multiple iterations
    
    // Create test file
    let temp_dir = tempdir()?;
    let test_file = temp_dir.path().join("regression_test.rs");
    let test_code = generate_test_code(test_lines);
    fs::write(&test_file, &test_code)?;
    
    let mut durations = Vec::with_capacity(ITERATIONS);
    
    // Run multiple iterations to detect performance variance
    for _ in 0..ITERATIONS {
        let start = Instant::now();
        
        complexity_handlers::handle_analyze_complexity(
            &temp_dir.path().to_path_buf(),
            None,
            20,
            15,
            &pmat::cli::enums::ComplexityOutputFormat::Json,
            Some(test_file.clone()),
            None,
            None,
            None,
            false,
        ).await?;
        
        durations.push(start.elapsed());
    }
    
    // Calculate statistics
    let avg_duration = durations.iter().sum::<Duration>() / ITERATIONS as u32;
    let max_duration = durations.iter().max().unwrap();
    let min_duration = durations.iter().min().unwrap();
    
    // Performance should be consistent (max ≤ 2x min)
    let variance_ratio = max_duration.as_millis() as f64 / min_duration.as_millis() as f64;
    assert!(
        variance_ratio <= 2.0,
        "High performance variance: min={}ms, max={}ms, ratio={:.2}",
        min_duration.as_millis(), max_duration.as_millis(), variance_ratio
    );
    
    println!("✅ Performance consistency: avg={}ms, min={}ms, max={}ms", 
             avg_duration.as_millis(), min_duration.as_millis(), max_duration.as_millis());
    
    Ok(())
}

/// Test large file handling performance
#[tokio::test]
async fn test_large_file_performance() -> Result<()> {
    let test_lines = 100_000; // 100K LOC single file
    
    // Create large test file
    let temp_dir = tempdir()?;
    let test_file = temp_dir.path().join("large_file.rs");
    let test_code = generate_test_code(test_lines);
    fs::write(&test_file, &test_code)?;
    
    let start = Instant::now();
    
    complexity_handlers::handle_analyze_complexity(
        &temp_dir.path().to_path_buf(),
        None,
        20,
        15,
        &pmat::cli::enums::ComplexityOutputFormat::Summary,
        Some(test_file),
        None,
        None,
        None,
        false,
    ).await?;
    
    let duration = start.elapsed();
    
    // Large files should still be processed reasonably quickly
    let max_duration_secs = 5; // 5 seconds max for 100K LOC
    assert!(
        duration.as_secs() <= max_duration_secs,
        "Large file analysis took {}s, expected ≤{}s for 100K LOC",
        duration.as_secs(), max_duration_secs
    );
    
    let throughput = (test_lines as f64) / duration.as_secs_f64();
    println!("✅ Large file performance: {:.0} LOC/s, duration: {:?}", throughput, duration);
    
    Ok(())
}

/// Approximate memory usage in MB (platform-specific)
fn get_memory_usage_mb() -> u64 {
    #[cfg(target_os = "linux")]
    {
        use std::fs;
        if let Ok(status) = fs::read_to_string("/proc/self/status") {
            for line in status.lines() {
                if line.starts_with("VmRSS:") {
                    if let Some(kb_str) = line.split_whitespace().nth(1) {
                        if let Ok(kb) = kb_str.parse::<u64>() {
                            return kb / 1024; // Convert KB to MB
                        }
                    }
                }
            }
        }
    }
    
    // Fallback for other platforms or if reading fails
    0
}

/// Performance test configuration
pub struct PerformanceTestConfig {
    pub enable_regression_tests: bool,
    pub enable_memory_tests: bool,
    pub enable_throughput_tests: bool,
    pub test_iterations: usize,
}

impl Default for PerformanceTestConfig {
    fn default() -> Self {
        Self {
            enable_regression_tests: true,
            enable_memory_tests: true,
            enable_throughput_tests: true,
            test_iterations: 3,
        }
    }
}

/// Run comprehensive performance test suite
pub async fn run_performance_test_suite(config: PerformanceTestConfig) -> Result<()> {
    println!("🏃 Running PMAT Performance Test Suite (SPECIFICATION.md Section 30)");
    println!("================================================================");
    
    if config.enable_throughput_tests {
        println!("\n📊 Throughput Tests:");
        test_single_threaded_throughput().await?;
        test_realistic_project_analysis().await?;
        test_large_file_performance().await?;
    }
    
    if config.enable_regression_tests {
        println!("\n🔍 Regression Tests:");
        test_performance_regression_detection().await?;
    }
    
    if config.enable_memory_tests {
        println!("\n💾 Memory Tests:");
        test_memory_usage_patterns().await?;
    }
    
    println!("\n✅ All performance tests passed!");
    println!("Performance characteristics meet SPECIFICATION.md Section 1.4 requirements");
    
    Ok(())
}

#[cfg(test)]
mod performance_specification_tests {
    use super::*;
    
    /// Integration test that runs the full performance suite
    #[tokio::test]
    async fn test_specification_performance_suite() -> Result<()> {
        let config = PerformanceTestConfig::default();
        run_performance_test_suite(config).await
    }
}