async_inspect/profile/
comparison.rs

1//! Performance comparison between runs
2//!
3//! This module provides functionality to compare performance metrics across multiple runs,
4//! detect regressions, and identify performance improvements.
5
6use super::{DurationStats, LockContentionMetrics, TaskMetrics};
7use serde::{Deserialize, Serialize};
8use std::collections::HashMap;
9use std::time::Duration;
10
11/// A snapshot of performance metrics from a single run
12#[derive(Debug, Clone, Serialize, Deserialize)]
13pub struct PerformanceSnapshot {
14    /// Timestamp when the snapshot was taken
15    pub timestamp: u64,
16
17    /// Run identifier
18    pub run_id: String,
19
20    /// Overall task statistics
21    pub task_stats: DurationStats,
22
23    /// All task metrics
24    pub task_metrics: Vec<TaskMetrics>,
25
26    /// Lock contention metrics
27    pub lock_metrics: Vec<LockContentionMetrics>,
28
29    /// Total tasks executed
30    pub total_tasks: usize,
31
32    /// Average task duration
33    pub avg_task_duration: Duration,
34
35    /// Total execution time
36    pub total_execution_time: Duration,
37}
38
39impl PerformanceSnapshot {
40    /// Create a new performance snapshot
41    #[must_use]
42    pub fn new(run_id: String) -> Self {
43        Self {
44            timestamp: std::time::SystemTime::now()
45                .duration_since(std::time::UNIX_EPOCH)
46                .unwrap_or_default()
47                .as_secs(),
48            run_id,
49            task_stats: DurationStats {
50                min: Duration::ZERO,
51                max: Duration::ZERO,
52                mean: Duration::ZERO,
53                median: Duration::ZERO,
54                p95: Duration::ZERO,
55                p99: Duration::ZERO,
56                std_dev: 0.0,
57                count: 0,
58            },
59            task_metrics: Vec::new(),
60            lock_metrics: Vec::new(),
61            total_tasks: 0,
62            avg_task_duration: Duration::ZERO,
63            total_execution_time: Duration::ZERO,
64        }
65    }
66
67    /// Save snapshot to file
68    pub fn save_to_file(&self, path: &str) -> Result<(), std::io::Error> {
69        let json = serde_json::to_string_pretty(self)?;
70        std::fs::write(path, json)?;
71        Ok(())
72    }
73
74    /// Load snapshot from file
75    pub fn load_from_file(path: &str) -> Result<Self, std::io::Error> {
76        let json = std::fs::read_to_string(path)?;
77        let snapshot: Self = serde_json::from_str(&json)?;
78        Ok(snapshot)
79    }
80}
81
82/// Comparison result between two runs
83#[derive(Debug, Clone)]
84pub struct PerformanceComparison {
85    /// Baseline snapshot
86    pub baseline: PerformanceSnapshot,
87
88    /// Current snapshot
89    pub current: PerformanceSnapshot,
90
91    /// Task duration changes
92    pub task_duration_change: DurationChange,
93
94    /// Lock contention changes
95    pub lock_contention_changes: Vec<LockContentionChange>,
96
97    /// Overall regression/improvement status
98    pub status: ComparisonStatus,
99
100    /// Detailed findings
101    pub findings: Vec<Finding>,
102}
103
104/// Status of the comparison
105#[derive(Debug, Clone, PartialEq, Eq)]
106pub enum ComparisonStatus {
107    /// Performance improved
108    Improved,
109    /// Performance degraded (regression)
110    Regressed,
111    /// Performance is similar
112    Similar,
113    /// Mixed results
114    Mixed,
115}
116
117/// Change in duration metrics
118#[derive(Debug, Clone)]
119pub struct DurationChange {
120    /// Mean duration change
121    pub mean_change: f64,
122
123    /// Median duration change
124    pub median_change: f64,
125
126    /// P95 duration change
127    pub p95_change: f64,
128
129    /// P99 duration change
130    pub p99_change: f64,
131
132    /// Whether this represents an improvement
133    pub is_improvement: bool,
134}
135
136/// Change in lock contention
137#[derive(Debug, Clone)]
138pub struct LockContentionChange {
139    /// Resource name
140    pub resource_name: String,
141
142    /// Baseline contention rate
143    pub baseline_rate: f64,
144
145    /// Current contention rate
146    pub current_rate: f64,
147
148    /// Change in contention rate
149    pub rate_change: f64,
150
151    /// Change in wait time
152    pub wait_time_change: f64,
153
154    /// Whether this represents an improvement
155    pub is_improvement: bool,
156}
157
158/// A specific finding from the comparison
159#[derive(Debug, Clone)]
160pub struct Finding {
161    /// Severity of the finding
162    pub severity: FindingSeverity,
163
164    /// Category of the finding
165    pub category: FindingCategory,
166
167    /// Description
168    pub description: String,
169
170    /// Impact percentage (positive = improvement, negative = regression)
171    pub impact_percent: f64,
172}
173
174/// Severity of a finding
175#[derive(Debug, Clone, PartialEq, Eq)]
176pub enum FindingSeverity {
177    /// Critical regression (>50% degradation)
178    Critical,
179    /// Major issue (20-50% degradation)
180    Major,
181    /// Minor issue (5-20% degradation)
182    Minor,
183    /// Informational
184    Info,
185}
186
187/// Category of a finding
188#[derive(Debug, Clone, PartialEq, Eq)]
189pub enum FindingCategory {
190    /// Task execution time
191    TaskDuration,
192    /// Lock contention
193    LockContention,
194    /// Overall throughput
195    Throughput,
196    /// Other
197    Other,
198}
199
200impl PerformanceComparison {
201    /// Compare two performance snapshots
202    #[must_use]
203    pub fn compare(baseline: PerformanceSnapshot, current: PerformanceSnapshot) -> Self {
204        let task_duration_change =
205            Self::compare_durations(&baseline.task_stats, &current.task_stats);
206        let lock_contention_changes =
207            Self::compare_locks(&baseline.lock_metrics, &current.lock_metrics);
208        let findings = Self::generate_findings(
209            &baseline,
210            &current,
211            &task_duration_change,
212            &lock_contention_changes,
213        );
214        let status = Self::determine_status(&findings);
215
216        Self {
217            baseline,
218            current,
219            task_duration_change,
220            lock_contention_changes,
221            status,
222            findings,
223        }
224    }
225
226    fn compare_durations(baseline: &DurationStats, current: &DurationStats) -> DurationChange {
227        let mean_change =
228            Self::calculate_change_percent(baseline.mean.as_secs_f64(), current.mean.as_secs_f64());
229
230        let median_change = Self::calculate_change_percent(
231            baseline.median.as_secs_f64(),
232            current.median.as_secs_f64(),
233        );
234
235        let p95_change =
236            Self::calculate_change_percent(baseline.p95.as_secs_f64(), current.p95.as_secs_f64());
237
238        let p99_change =
239            Self::calculate_change_percent(baseline.p99.as_secs_f64(), current.p99.as_secs_f64());
240
241        let is_improvement = mean_change < 0.0 && median_change < 0.0;
242
243        DurationChange {
244            mean_change,
245            median_change,
246            p95_change,
247            p99_change,
248            is_improvement,
249        }
250    }
251
252    fn compare_locks(
253        baseline: &[LockContentionMetrics],
254        current: &[LockContentionMetrics],
255    ) -> Vec<LockContentionChange> {
256        let mut changes = Vec::new();
257        let baseline_map: HashMap<_, _> = baseline.iter().map(|m| (&m.name, m)).collect();
258
259        for current_metric in current {
260            if let Some(baseline_metric) = baseline_map.get(&current_metric.name) {
261                let rate_change = Self::calculate_change_percent(
262                    baseline_metric.contention_rate,
263                    current_metric.contention_rate,
264                );
265
266                let wait_time_change = Self::calculate_change_percent(
267                    baseline_metric.avg_wait_time.as_secs_f64(),
268                    current_metric.avg_wait_time.as_secs_f64(),
269                );
270
271                changes.push(LockContentionChange {
272                    resource_name: current_metric.name.clone(),
273                    baseline_rate: baseline_metric.contention_rate,
274                    current_rate: current_metric.contention_rate,
275                    rate_change,
276                    wait_time_change,
277                    is_improvement: rate_change < 0.0 && wait_time_change < 0.0,
278                });
279            }
280        }
281
282        changes
283    }
284
285    fn generate_findings(
286        baseline: &PerformanceSnapshot,
287        current: &PerformanceSnapshot,
288        duration_change: &DurationChange,
289        lock_changes: &[LockContentionChange],
290    ) -> Vec<Finding> {
291        let mut findings = Vec::new();
292
293        // Check task duration changes
294        if duration_change.mean_change.abs() > 5.0 {
295            let severity = if duration_change.mean_change.abs() > 50.0 {
296                FindingSeverity::Critical
297            } else if duration_change.mean_change.abs() > 20.0 {
298                FindingSeverity::Major
299            } else {
300                FindingSeverity::Minor
301            };
302
303            findings.push(Finding {
304                severity,
305                category: FindingCategory::TaskDuration,
306                description: format!(
307                    "Mean task duration changed by {:.1}%",
308                    duration_change.mean_change
309                ),
310                impact_percent: -duration_change.mean_change, // Negative change is improvement
311            });
312        }
313
314        // Check lock contention changes
315        for lock_change in lock_changes {
316            if lock_change.rate_change.abs() > 10.0 {
317                let severity = if lock_change.rate_change.abs() > 50.0 {
318                    FindingSeverity::Major
319                } else {
320                    FindingSeverity::Minor
321                };
322
323                findings.push(Finding {
324                    severity,
325                    category: FindingCategory::LockContention,
326                    description: format!(
327                        "Lock '{}' contention changed by {:.1}%",
328                        lock_change.resource_name, lock_change.rate_change
329                    ),
330                    impact_percent: -lock_change.rate_change,
331                });
332            }
333        }
334
335        // Check throughput
336        if baseline.total_tasks > 0 && current.total_tasks > 0 {
337            let throughput_change = Self::calculate_change_percent(
338                baseline.total_tasks as f64,
339                current.total_tasks as f64,
340            );
341
342            if throughput_change.abs() > 5.0 {
343                findings.push(Finding {
344                    severity: FindingSeverity::Info,
345                    category: FindingCategory::Throughput,
346                    description: format!("Throughput changed by {throughput_change:.1}%"),
347                    impact_percent: throughput_change,
348                });
349            }
350        }
351
352        findings
353    }
354
355    fn determine_status(findings: &[Finding]) -> ComparisonStatus {
356        let regressions = findings.iter().filter(|f| f.impact_percent < 0.0).count();
357        let improvements = findings.iter().filter(|f| f.impact_percent > 0.0).count();
358
359        if regressions == 0 && improvements > 0 {
360            ComparisonStatus::Improved
361        } else if regressions > 0 && improvements == 0 {
362            ComparisonStatus::Regressed
363        } else if regressions == 0 && improvements == 0 {
364            ComparisonStatus::Similar
365        } else {
366            ComparisonStatus::Mixed
367        }
368    }
369
370    fn calculate_change_percent(baseline: f64, current: f64) -> f64 {
371        if baseline == 0.0 {
372            return 0.0;
373        }
374        ((current - baseline) / baseline) * 100.0
375    }
376
377    /// Check if there are any regressions
378    #[must_use]
379    pub fn has_regressions(&self) -> bool {
380        self.findings.iter().any(|f| {
381            matches!(
382                f.severity,
383                FindingSeverity::Critical | FindingSeverity::Major
384            ) && f.impact_percent < 0.0
385        })
386    }
387
388    /// Get all regressions
389    #[must_use]
390    pub fn get_regressions(&self) -> Vec<&Finding> {
391        self.findings
392            .iter()
393            .filter(|f| f.impact_percent < 0.0)
394            .collect()
395    }
396
397    /// Get all improvements
398    #[must_use]
399    pub fn get_improvements(&self) -> Vec<&Finding> {
400        self.findings
401            .iter()
402            .filter(|f| f.impact_percent > 0.0)
403            .collect()
404    }
405
406    /// Generate a summary report
407    #[must_use]
408    pub fn summary(&self) -> String {
409        let mut report = String::from("Performance Comparison Summary\n");
410        report.push_str("==============================\n\n");
411
412        report.push_str(&format!("Status: {:?}\n\n", self.status));
413
414        report.push_str("Task Duration Changes:\n");
415        report.push_str(&format!(
416            "  Mean: {:.1}%\n",
417            self.task_duration_change.mean_change
418        ));
419        report.push_str(&format!(
420            "  Median: {:.1}%\n",
421            self.task_duration_change.median_change
422        ));
423        report.push_str(&format!(
424            "  P95: {:.1}%\n",
425            self.task_duration_change.p95_change
426        ));
427        report.push_str(&format!(
428            "  P99: {:.1}%\n\n",
429            self.task_duration_change.p99_change
430        ));
431
432        if !self.lock_contention_changes.is_empty() {
433            report.push_str("Lock Contention Changes:\n");
434            for change in &self.lock_contention_changes {
435                report.push_str(&format!(
436                    "  {}: {:.1}% (wait time: {:.1}%)\n",
437                    change.resource_name, change.rate_change, change.wait_time_change
438                ));
439            }
440            report.push('\n');
441        }
442
443        if !self.findings.is_empty() {
444            report.push_str("Findings:\n");
445            for finding in &self.findings {
446                report.push_str(&format!(
447                    "  [{:?}] {}\n",
448                    finding.severity, finding.description
449                ));
450            }
451        }
452
453        report
454    }
455}
456
457#[cfg(test)]
458mod tests {
459    use super::*;
460
461    #[test]
462    fn test_calculate_change_percent() {
463        assert_eq!(
464            PerformanceComparison::calculate_change_percent(100.0, 110.0),
465            10.0
466        );
467        assert_eq!(
468            PerformanceComparison::calculate_change_percent(100.0, 90.0),
469            -10.0
470        );
471        assert_eq!(
472            PerformanceComparison::calculate_change_percent(0.0, 10.0),
473            0.0
474        );
475    }
476
477    #[test]
478    fn test_snapshot_serialization() {
479        let snapshot = PerformanceSnapshot::new("test_run".to_string());
480        let json = serde_json::to_string(&snapshot).unwrap();
481        let deserialized: PerformanceSnapshot = serde_json::from_str(&json).unwrap();
482        assert_eq!(snapshot.run_id, deserialized.run_id);
483    }
484}