mockforge_core/
performance.rs

1//! Performance monitoring and optimization utilities
2//!
3//! This module provides infrastructure for monitoring and optimizing
4//! performance in MockForge applications.
5
6use std::collections::HashMap;
7use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
8use std::sync::Arc;
9use std::time::{Duration, Instant};
10use tokio::sync::RwLock;
11
12/// Performance metrics collector
13#[derive(Debug)]
14pub struct PerformanceMetrics {
15    /// Request processing duration histogram
16    request_durations: Arc<RwLock<Vec<Duration>>>,
17    /// Total number of requests processed
18    request_count: AtomicU64,
19    /// Number of active concurrent requests
20    active_requests: AtomicUsize,
21    /// Cache hit/miss statistics
22    cache_hits: AtomicU64,
23    cache_misses: AtomicU64,
24    /// Memory usage tracking
25    memory_usage_bytes: AtomicU64,
26    /// Error rates
27    error_count: AtomicU64,
28    /// Custom metric counters
29    custom_counters: Arc<RwLock<HashMap<String, AtomicU64>>>,
30}
31
32impl Default for PerformanceMetrics {
33    fn default() -> Self {
34        Self::new()
35    }
36}
37
38impl PerformanceMetrics {
39    /// Create a new performance metrics collector
40    pub fn new() -> Self {
41        Self {
42            request_durations: Arc::new(RwLock::new(Vec::new())),
43            request_count: AtomicU64::new(0),
44            active_requests: AtomicUsize::new(0),
45            cache_hits: AtomicU64::new(0),
46            cache_misses: AtomicU64::new(0),
47            memory_usage_bytes: AtomicU64::new(0),
48            error_count: AtomicU64::new(0),
49            custom_counters: Arc::new(RwLock::new(HashMap::new())),
50        }
51    }
52
53    /// Record a request processing duration
54    pub async fn record_request_duration(&self, duration: Duration) {
55        self.request_count.fetch_add(1, Ordering::Relaxed);
56
57        let mut durations = self.request_durations.write().await;
58        durations.push(duration);
59
60        // Keep only the last 1000 durations to prevent unbounded growth
61        if durations.len() > 1000 {
62            let drain_count = durations.len() - 1000;
63            durations.drain(0..drain_count);
64        }
65    }
66
67    /// Increment active request count
68    pub fn increment_active_requests(&self) -> usize {
69        self.active_requests.fetch_add(1, Ordering::Relaxed)
70    }
71
72    /// Decrement active request count
73    pub fn decrement_active_requests(&self) -> usize {
74        self.active_requests.fetch_sub(1, Ordering::Relaxed)
75    }
76
77    /// Record a cache hit
78    pub fn record_cache_hit(&self) {
79        self.cache_hits.fetch_add(1, Ordering::Relaxed);
80    }
81
82    /// Record a cache miss
83    pub fn record_cache_miss(&self) {
84        self.cache_misses.fetch_add(1, Ordering::Relaxed);
85    }
86
87    /// Record an error
88    pub fn record_error(&self) {
89        self.error_count.fetch_add(1, Ordering::Relaxed);
90    }
91
92    /// Update memory usage
93    pub fn update_memory_usage(&self, bytes: u64) {
94        self.memory_usage_bytes.store(bytes, Ordering::Relaxed);
95    }
96
97    /// Increment a custom counter
98    pub async fn increment_custom_counter(&self, name: &str) {
99        let mut counters = self.custom_counters.write().await;
100        let counter = counters.entry(name.to_string()).or_insert_with(|| AtomicU64::new(0));
101        counter.fetch_add(1, Ordering::Relaxed);
102    }
103
104    /// Get performance summary
105    pub async fn get_summary(&self) -> PerformanceSummary {
106        let durations = self.request_durations.read().await;
107        let total_requests = self.request_count.load(Ordering::Relaxed);
108        let active_requests = self.active_requests.load(Ordering::Relaxed);
109        let cache_hits = self.cache_hits.load(Ordering::Relaxed);
110        let cache_misses = self.cache_misses.load(Ordering::Relaxed);
111        let memory_usage = self.memory_usage_bytes.load(Ordering::Relaxed);
112        let error_count = self.error_count.load(Ordering::Relaxed);
113
114        // Calculate percentiles
115        let mut sorted_durations: Vec<Duration> = durations.clone();
116        sorted_durations.sort();
117
118        let (p50, p95, p99) = if !sorted_durations.is_empty() {
119            let p50_idx = sorted_durations.len() / 2;
120            let p95_idx = (sorted_durations.len() * 95) / 100;
121            let p99_idx = (sorted_durations.len() * 99) / 100;
122
123            (
124                sorted_durations.get(p50_idx).copied(),
125                sorted_durations.get(p95_idx).copied(),
126                sorted_durations.get(p99_idx).copied(),
127            )
128        } else {
129            (None, None, None)
130        };
131
132        let avg_duration = if !sorted_durations.is_empty() {
133            Some(Duration::from_nanos(
134                sorted_durations.iter().map(|d| d.as_nanos() as u64).sum::<u64>()
135                    / sorted_durations.len() as u64,
136            ))
137        } else {
138            None
139        };
140
141        let cache_hit_rate = if cache_hits + cache_misses > 0 {
142            (cache_hits as f64) / ((cache_hits + cache_misses) as f64)
143        } else {
144            0.0
145        };
146
147        let error_rate = if total_requests > 0 {
148            (error_count as f64) / (total_requests as f64)
149        } else {
150            0.0
151        };
152
153        PerformanceSummary {
154            total_requests,
155            active_requests,
156            avg_duration,
157            p50_duration: p50,
158            p95_duration: p95,
159            p99_duration: p99,
160            cache_hit_rate,
161            cache_hits,
162            cache_misses,
163            memory_usage_bytes: memory_usage,
164            error_count,
165            error_rate,
166        }
167    }
168
169    /// Reset all metrics
170    pub async fn reset(&self) {
171        self.request_durations.write().await.clear();
172        self.request_count.store(0, Ordering::Relaxed);
173        self.active_requests.store(0, Ordering::Relaxed);
174        self.cache_hits.store(0, Ordering::Relaxed);
175        self.cache_misses.store(0, Ordering::Relaxed);
176        self.memory_usage_bytes.store(0, Ordering::Relaxed);
177        self.error_count.store(0, Ordering::Relaxed);
178        self.custom_counters.write().await.clear();
179    }
180}
181
182/// Performance summary snapshot with aggregated metrics
183#[derive(Debug, Clone)]
184pub struct PerformanceSummary {
185    /// Total number of requests processed
186    pub total_requests: u64,
187    /// Current number of active concurrent requests
188    pub active_requests: usize,
189    /// Average request processing duration
190    pub avg_duration: Option<Duration>,
191    /// 50th percentile (median) request duration
192    pub p50_duration: Option<Duration>,
193    /// 95th percentile request duration
194    pub p95_duration: Option<Duration>,
195    /// 99th percentile request duration
196    pub p99_duration: Option<Duration>,
197    /// Cache hit rate as a ratio (0.0 to 1.0)
198    pub cache_hit_rate: f64,
199    /// Total number of cache hits
200    pub cache_hits: u64,
201    /// Total number of cache misses
202    pub cache_misses: u64,
203    /// Current memory usage in bytes
204    pub memory_usage_bytes: u64,
205    /// Total number of errors encountered
206    pub error_count: u64,
207    /// Error rate as a ratio (0.0 to 1.0)
208    pub error_rate: f64,
209}
210
211/// Performance monitoring guard for automatic duration tracking
212pub struct PerformanceGuard {
213    start_time: Instant,
214    metrics: Arc<PerformanceMetrics>,
215    name: Option<String>,
216}
217
218impl PerformanceGuard {
219    /// Create a new performance guard
220    pub fn new(metrics: Arc<PerformanceMetrics>) -> Self {
221        metrics.increment_active_requests();
222        Self {
223            start_time: Instant::now(),
224            metrics,
225            name: None,
226        }
227    }
228
229    /// Create a named performance guard
230    pub fn named(metrics: Arc<PerformanceMetrics>, name: String) -> Self {
231        metrics.increment_active_requests();
232        Self {
233            start_time: Instant::now(),
234            metrics,
235            name: Some(name),
236        }
237    }
238
239    /// Get the elapsed duration
240    pub fn elapsed(&self) -> Duration {
241        self.start_time.elapsed()
242    }
243}
244
245impl Drop for PerformanceGuard {
246    fn drop(&mut self) {
247        let duration = self.start_time.elapsed();
248        self.metrics.decrement_active_requests();
249
250        // Record duration asynchronously
251        let metrics = self.metrics.clone();
252        let name = self.name.clone();
253        tokio::spawn(async move {
254            metrics.record_request_duration(duration).await;
255            if let Some(name) = name {
256                metrics.increment_custom_counter(&format!("{}_count", name)).await;
257            }
258        });
259    }
260}
261
262/// High-level performance monitoring wrapper
263#[derive(Debug, Clone)]
264pub struct PerformanceMonitor {
265    metrics: Arc<PerformanceMetrics>,
266    enabled: bool,
267}
268
269impl Default for PerformanceMonitor {
270    fn default() -> Self {
271        Self::new()
272    }
273}
274
275impl PerformanceMonitor {
276    /// Create a new performance monitor
277    pub fn new() -> Self {
278        Self {
279            metrics: Arc::new(PerformanceMetrics::new()),
280            enabled: true,
281        }
282    }
283
284    /// Create a disabled performance monitor (no-op)
285    pub fn disabled() -> Self {
286        Self {
287            metrics: Arc::new(PerformanceMetrics::new()),
288            enabled: false,
289        }
290    }
291
292    /// Enable or disable monitoring
293    pub fn set_enabled(&mut self, enabled: bool) {
294        self.enabled = enabled;
295    }
296
297    /// Check if monitoring is enabled
298    pub fn is_enabled(&self) -> bool {
299        self.enabled
300    }
301
302    /// Start tracking an operation
303    pub fn start_tracking(&self) -> Option<PerformanceGuard> {
304        if self.enabled {
305            Some(PerformanceGuard::new(self.metrics.clone()))
306        } else {
307            None
308        }
309    }
310
311    /// Start tracking a named operation
312    pub fn start_tracking_named(&self, name: &str) -> Option<PerformanceGuard> {
313        if self.enabled {
314            Some(PerformanceGuard::named(self.metrics.clone(), name.to_string()))
315        } else {
316            None
317        }
318    }
319
320    /// Record a cache hit
321    pub fn record_cache_hit(&self) {
322        if self.enabled {
323            self.metrics.record_cache_hit();
324        }
325    }
326
327    /// Record a cache miss
328    pub fn record_cache_miss(&self) {
329        if self.enabled {
330            self.metrics.record_cache_miss();
331        }
332    }
333
334    /// Record an error
335    pub fn record_error(&self) {
336        if self.enabled {
337            self.metrics.record_error();
338        }
339    }
340
341    /// Update memory usage
342    pub fn update_memory_usage(&self, bytes: u64) {
343        if self.enabled {
344            self.metrics.update_memory_usage(bytes);
345        }
346    }
347
348    /// Get performance summary
349    pub async fn get_summary(&self) -> PerformanceSummary {
350        self.metrics.get_summary().await
351    }
352
353    /// Reset all metrics
354    pub async fn reset(&self) {
355        self.metrics.reset().await;
356    }
357}
358
359#[cfg(test)]
360mod tests {
361    use super::*;
362    use tokio::time::sleep;
363
364    #[tokio::test]
365    async fn test_performance_metrics() {
366        let metrics = PerformanceMetrics::new();
367
368        // Record some sample data
369        metrics.record_request_duration(Duration::from_millis(100)).await;
370        metrics.record_request_duration(Duration::from_millis(200)).await;
371        metrics.record_cache_hit();
372        metrics.record_cache_miss();
373        metrics.record_error();
374        metrics.update_memory_usage(1024);
375
376        let summary = metrics.get_summary().await;
377
378        assert_eq!(summary.total_requests, 2);
379        assert_eq!(summary.cache_hits, 1);
380        assert_eq!(summary.cache_misses, 1);
381        assert_eq!(summary.error_count, 1);
382        assert_eq!(summary.memory_usage_bytes, 1024);
383        assert!((summary.cache_hit_rate - 0.5).abs() < f64::EPSILON);
384    }
385
386    #[tokio::test]
387    async fn test_performance_guard() {
388        let monitor = PerformanceMonitor::new();
389
390        {
391            let _guard = monitor.start_tracking();
392            sleep(Duration::from_millis(10)).await;
393        }
394
395        // Give time for async drop to complete
396        sleep(Duration::from_millis(50)).await;
397
398        let summary = monitor.get_summary().await;
399        assert_eq!(summary.total_requests, 1);
400        assert_eq!(summary.active_requests, 0);
401    }
402
403    #[tokio::test]
404    async fn test_disabled_monitor() {
405        let monitor = PerformanceMonitor::disabled();
406
407        assert!(!monitor.is_enabled());
408        assert!(monitor.start_tracking().is_none());
409
410        monitor.record_cache_hit();
411        monitor.record_error();
412
413        let summary = monitor.get_summary().await;
414        assert_eq!(summary.total_requests, 0);
415        assert_eq!(summary.cache_hits, 0);
416        assert_eq!(summary.error_count, 0);
417    }
418}