mockforge_core/
performance.rs

1//! Performance monitoring and optimization utilities
2//!
3//! This module provides infrastructure for monitoring and optimizing
4//! performance in MockForge applications.
5
6use std::collections::HashMap;
7use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
8use std::sync::Arc;
9use std::time::{Duration, Instant};
10use tokio::sync::RwLock;
11
12/// Performance metrics collector
13#[derive(Debug)]
14pub struct PerformanceMetrics {
15    /// Request processing duration histogram
16    request_durations: Arc<RwLock<Vec<Duration>>>,
17    /// Total number of requests processed
18    request_count: AtomicU64,
19    /// Number of active concurrent requests
20    active_requests: AtomicUsize,
21    /// Cache hit/miss statistics
22    cache_hits: AtomicU64,
23    cache_misses: AtomicU64,
24    /// Memory usage tracking
25    memory_usage_bytes: AtomicU64,
26    /// Error rates
27    error_count: AtomicU64,
28    /// Custom metric counters
29    custom_counters: Arc<RwLock<HashMap<String, AtomicU64>>>,
30}
31
32impl Default for PerformanceMetrics {
33    fn default() -> Self {
34        Self::new()
35    }
36}
37
38impl PerformanceMetrics {
39    /// Create a new performance metrics collector
40    pub fn new() -> Self {
41        Self {
42            request_durations: Arc::new(RwLock::new(Vec::new())),
43            request_count: AtomicU64::new(0),
44            active_requests: AtomicUsize::new(0),
45            cache_hits: AtomicU64::new(0),
46            cache_misses: AtomicU64::new(0),
47            memory_usage_bytes: AtomicU64::new(0),
48            error_count: AtomicU64::new(0),
49            custom_counters: Arc::new(RwLock::new(HashMap::new())),
50        }
51    }
52
53    /// Record a request processing duration
54    pub async fn record_request_duration(&self, duration: Duration) {
55        self.request_count.fetch_add(1, Ordering::Relaxed);
56
57        let mut durations = self.request_durations.write().await;
58        durations.push(duration);
59
60        // Keep only the last 1000 durations to prevent unbounded growth
61        if durations.len() > 1000 {
62            let drain_count = durations.len() - 1000;
63            durations.drain(0..drain_count);
64        }
65    }
66
67    /// Increment active request count
68    pub fn increment_active_requests(&self) -> usize {
69        self.active_requests.fetch_add(1, Ordering::Relaxed)
70    }
71
72    /// Decrement active request count
73    pub fn decrement_active_requests(&self) -> usize {
74        self.active_requests.fetch_sub(1, Ordering::Relaxed)
75    }
76
77    /// Record a cache hit
78    pub fn record_cache_hit(&self) {
79        self.cache_hits.fetch_add(1, Ordering::Relaxed);
80    }
81
82    /// Record a cache miss
83    pub fn record_cache_miss(&self) {
84        self.cache_misses.fetch_add(1, Ordering::Relaxed);
85    }
86
87    /// Record an error
88    pub fn record_error(&self) {
89        self.error_count.fetch_add(1, Ordering::Relaxed);
90    }
91
92    /// Update memory usage
93    pub fn update_memory_usage(&self, bytes: u64) {
94        self.memory_usage_bytes.store(bytes, Ordering::Relaxed);
95    }
96
97    /// Increment a custom counter
98    pub async fn increment_custom_counter(&self, name: &str) {
99        let mut counters = self.custom_counters.write().await;
100        let counter = counters.entry(name.to_string()).or_insert_with(|| AtomicU64::new(0));
101        counter.fetch_add(1, Ordering::Relaxed);
102    }
103
104    /// Get performance summary
105    pub async fn get_summary(&self) -> PerformanceSummary {
106        let durations = self.request_durations.read().await;
107        let total_requests = self.request_count.load(Ordering::Relaxed);
108        let active_requests = self.active_requests.load(Ordering::Relaxed);
109        let cache_hits = self.cache_hits.load(Ordering::Relaxed);
110        let cache_misses = self.cache_misses.load(Ordering::Relaxed);
111        let memory_usage = self.memory_usage_bytes.load(Ordering::Relaxed);
112        let error_count = self.error_count.load(Ordering::Relaxed);
113
114        // Calculate percentiles
115        let mut sorted_durations: Vec<Duration> = durations.clone();
116        sorted_durations.sort();
117
118        let (p50, p95, p99) = if !sorted_durations.is_empty() {
119            let p50_idx = sorted_durations.len() / 2;
120            let p95_idx = (sorted_durations.len() * 95) / 100;
121            let p99_idx = (sorted_durations.len() * 99) / 100;
122
123            (
124                sorted_durations.get(p50_idx).copied(),
125                sorted_durations.get(p95_idx).copied(),
126                sorted_durations.get(p99_idx).copied(),
127            )
128        } else {
129            (None, None, None)
130        };
131
132        let avg_duration = if !sorted_durations.is_empty() {
133            Some(Duration::from_nanos(
134                sorted_durations.iter().map(|d| d.as_nanos() as u64).sum::<u64>()
135                    / sorted_durations.len() as u64,
136            ))
137        } else {
138            None
139        };
140
141        let cache_hit_rate = if cache_hits + cache_misses > 0 {
142            (cache_hits as f64) / ((cache_hits + cache_misses) as f64)
143        } else {
144            0.0
145        };
146
147        let error_rate = if total_requests > 0 {
148            (error_count as f64) / (total_requests as f64)
149        } else {
150            0.0
151        };
152
153        PerformanceSummary {
154            total_requests,
155            active_requests,
156            avg_duration,
157            p50_duration: p50,
158            p95_duration: p95,
159            p99_duration: p99,
160            cache_hit_rate,
161            cache_hits,
162            cache_misses,
163            memory_usage_bytes: memory_usage,
164            error_count,
165            error_rate,
166        }
167    }
168
169    /// Reset all metrics
170    pub async fn reset(&self) {
171        self.request_durations.write().await.clear();
172        self.request_count.store(0, Ordering::Relaxed);
173        self.active_requests.store(0, Ordering::Relaxed);
174        self.cache_hits.store(0, Ordering::Relaxed);
175        self.cache_misses.store(0, Ordering::Relaxed);
176        self.memory_usage_bytes.store(0, Ordering::Relaxed);
177        self.error_count.store(0, Ordering::Relaxed);
178        self.custom_counters.write().await.clear();
179    }
180}
181
182/// Performance summary snapshot
183#[derive(Debug, Clone)]
184pub struct PerformanceSummary {
185    pub total_requests: u64,
186    pub active_requests: usize,
187    pub avg_duration: Option<Duration>,
188    pub p50_duration: Option<Duration>,
189    pub p95_duration: Option<Duration>,
190    pub p99_duration: Option<Duration>,
191    pub cache_hit_rate: f64,
192    pub cache_hits: u64,
193    pub cache_misses: u64,
194    pub memory_usage_bytes: u64,
195    pub error_count: u64,
196    pub error_rate: f64,
197}
198
199/// Performance monitoring guard for automatic duration tracking
200pub struct PerformanceGuard {
201    start_time: Instant,
202    metrics: Arc<PerformanceMetrics>,
203    name: Option<String>,
204}
205
206impl PerformanceGuard {
207    /// Create a new performance guard
208    pub fn new(metrics: Arc<PerformanceMetrics>) -> Self {
209        metrics.increment_active_requests();
210        Self {
211            start_time: Instant::now(),
212            metrics,
213            name: None,
214        }
215    }
216
217    /// Create a named performance guard
218    pub fn named(metrics: Arc<PerformanceMetrics>, name: String) -> Self {
219        metrics.increment_active_requests();
220        Self {
221            start_time: Instant::now(),
222            metrics,
223            name: Some(name),
224        }
225    }
226
227    /// Get the elapsed duration
228    pub fn elapsed(&self) -> Duration {
229        self.start_time.elapsed()
230    }
231}
232
233impl Drop for PerformanceGuard {
234    fn drop(&mut self) {
235        let duration = self.start_time.elapsed();
236        self.metrics.decrement_active_requests();
237
238        // Record duration asynchronously
239        let metrics = self.metrics.clone();
240        let name = self.name.clone();
241        tokio::spawn(async move {
242            metrics.record_request_duration(duration).await;
243            if let Some(name) = name {
244                metrics.increment_custom_counter(&format!("{}_count", name)).await;
245            }
246        });
247    }
248}
249
250/// High-level performance monitoring wrapper
251#[derive(Debug, Clone)]
252pub struct PerformanceMonitor {
253    metrics: Arc<PerformanceMetrics>,
254    enabled: bool,
255}
256
257impl Default for PerformanceMonitor {
258    fn default() -> Self {
259        Self::new()
260    }
261}
262
263impl PerformanceMonitor {
264    /// Create a new performance monitor
265    pub fn new() -> Self {
266        Self {
267            metrics: Arc::new(PerformanceMetrics::new()),
268            enabled: true,
269        }
270    }
271
272    /// Create a disabled performance monitor (no-op)
273    pub fn disabled() -> Self {
274        Self {
275            metrics: Arc::new(PerformanceMetrics::new()),
276            enabled: false,
277        }
278    }
279
280    /// Enable or disable monitoring
281    pub fn set_enabled(&mut self, enabled: bool) {
282        self.enabled = enabled;
283    }
284
285    /// Check if monitoring is enabled
286    pub fn is_enabled(&self) -> bool {
287        self.enabled
288    }
289
290    /// Start tracking an operation
291    pub fn start_tracking(&self) -> Option<PerformanceGuard> {
292        if self.enabled {
293            Some(PerformanceGuard::new(self.metrics.clone()))
294        } else {
295            None
296        }
297    }
298
299    /// Start tracking a named operation
300    pub fn start_tracking_named(&self, name: &str) -> Option<PerformanceGuard> {
301        if self.enabled {
302            Some(PerformanceGuard::named(self.metrics.clone(), name.to_string()))
303        } else {
304            None
305        }
306    }
307
308    /// Record a cache hit
309    pub fn record_cache_hit(&self) {
310        if self.enabled {
311            self.metrics.record_cache_hit();
312        }
313    }
314
315    /// Record a cache miss
316    pub fn record_cache_miss(&self) {
317        if self.enabled {
318            self.metrics.record_cache_miss();
319        }
320    }
321
322    /// Record an error
323    pub fn record_error(&self) {
324        if self.enabled {
325            self.metrics.record_error();
326        }
327    }
328
329    /// Update memory usage
330    pub fn update_memory_usage(&self, bytes: u64) {
331        if self.enabled {
332            self.metrics.update_memory_usage(bytes);
333        }
334    }
335
336    /// Get performance summary
337    pub async fn get_summary(&self) -> PerformanceSummary {
338        self.metrics.get_summary().await
339    }
340
341    /// Reset all metrics
342    pub async fn reset(&self) {
343        self.metrics.reset().await;
344    }
345}
346
347#[cfg(test)]
348mod tests {
349    use super::*;
350    use tokio::time::sleep;
351
352    #[tokio::test]
353    async fn test_performance_metrics() {
354        let metrics = PerformanceMetrics::new();
355
356        // Record some sample data
357        metrics.record_request_duration(Duration::from_millis(100)).await;
358        metrics.record_request_duration(Duration::from_millis(200)).await;
359        metrics.record_cache_hit();
360        metrics.record_cache_miss();
361        metrics.record_error();
362        metrics.update_memory_usage(1024);
363
364        let summary = metrics.get_summary().await;
365
366        assert_eq!(summary.total_requests, 2);
367        assert_eq!(summary.cache_hits, 1);
368        assert_eq!(summary.cache_misses, 1);
369        assert_eq!(summary.error_count, 1);
370        assert_eq!(summary.memory_usage_bytes, 1024);
371        assert!((summary.cache_hit_rate - 0.5).abs() < f64::EPSILON);
372    }
373
374    #[tokio::test]
375    async fn test_performance_guard() {
376        let monitor = PerformanceMonitor::new();
377
378        {
379            let _guard = monitor.start_tracking();
380            sleep(Duration::from_millis(10)).await;
381        }
382
383        // Give time for async drop to complete
384        sleep(Duration::from_millis(50)).await;
385
386        let summary = monitor.get_summary().await;
387        assert_eq!(summary.total_requests, 1);
388        assert_eq!(summary.active_requests, 0);
389    }
390
391    #[tokio::test]
392    async fn test_disabled_monitor() {
393        let monitor = PerformanceMonitor::disabled();
394
395        assert!(!monitor.is_enabled());
396        assert!(monitor.start_tracking().is_none());
397
398        monitor.record_cache_hit();
399        monitor.record_error();
400
401        let summary = monitor.get_summary().await;
402        assert_eq!(summary.total_requests, 0);
403        assert_eq!(summary.cache_hits, 0);
404        assert_eq!(summary.error_count, 0);
405    }
406}