1use std::collections::HashMap;
7use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
8use std::sync::Arc;
9use std::time::{Duration, Instant};
10use tokio::sync::RwLock;
11
12#[derive(Debug)]
14pub struct PerformanceMetrics {
15 request_durations: Arc<RwLock<Vec<Duration>>>,
17 request_count: AtomicU64,
19 active_requests: AtomicUsize,
21 cache_hits: AtomicU64,
23 cache_misses: AtomicU64,
24 memory_usage_bytes: AtomicU64,
26 error_count: AtomicU64,
28 custom_counters: Arc<RwLock<HashMap<String, AtomicU64>>>,
30}
31
32impl Default for PerformanceMetrics {
33 fn default() -> Self {
34 Self::new()
35 }
36}
37
38impl PerformanceMetrics {
39 pub fn new() -> Self {
41 Self {
42 request_durations: Arc::new(RwLock::new(Vec::new())),
43 request_count: AtomicU64::new(0),
44 active_requests: AtomicUsize::new(0),
45 cache_hits: AtomicU64::new(0),
46 cache_misses: AtomicU64::new(0),
47 memory_usage_bytes: AtomicU64::new(0),
48 error_count: AtomicU64::new(0),
49 custom_counters: Arc::new(RwLock::new(HashMap::new())),
50 }
51 }
52
53 pub async fn record_request_duration(&self, duration: Duration) {
55 self.request_count.fetch_add(1, Ordering::Relaxed);
56
57 let mut durations = self.request_durations.write().await;
58 durations.push(duration);
59
60 if durations.len() > 1000 {
62 let drain_count = durations.len() - 1000;
63 durations.drain(0..drain_count);
64 }
65 }
66
67 pub fn increment_active_requests(&self) -> usize {
69 self.active_requests.fetch_add(1, Ordering::Relaxed)
70 }
71
72 pub fn decrement_active_requests(&self) -> usize {
74 self.active_requests.fetch_sub(1, Ordering::Relaxed)
75 }
76
77 pub fn record_cache_hit(&self) {
79 self.cache_hits.fetch_add(1, Ordering::Relaxed);
80 }
81
82 pub fn record_cache_miss(&self) {
84 self.cache_misses.fetch_add(1, Ordering::Relaxed);
85 }
86
87 pub fn record_error(&self) {
89 self.error_count.fetch_add(1, Ordering::Relaxed);
90 }
91
92 pub fn update_memory_usage(&self, bytes: u64) {
94 self.memory_usage_bytes.store(bytes, Ordering::Relaxed);
95 }
96
97 pub async fn increment_custom_counter(&self, name: &str) {
99 let mut counters = self.custom_counters.write().await;
100 let counter = counters.entry(name.to_string()).or_insert_with(|| AtomicU64::new(0));
101 counter.fetch_add(1, Ordering::Relaxed);
102 }
103
104 pub async fn get_summary(&self) -> PerformanceSummary {
106 let durations = self.request_durations.read().await;
107 let total_requests = self.request_count.load(Ordering::Relaxed);
108 let active_requests = self.active_requests.load(Ordering::Relaxed);
109 let cache_hits = self.cache_hits.load(Ordering::Relaxed);
110 let cache_misses = self.cache_misses.load(Ordering::Relaxed);
111 let memory_usage = self.memory_usage_bytes.load(Ordering::Relaxed);
112 let error_count = self.error_count.load(Ordering::Relaxed);
113
114 let mut sorted_durations: Vec<Duration> = durations.clone();
116 sorted_durations.sort();
117
118 let (p50, p95, p99) = if !sorted_durations.is_empty() {
119 let p50_idx = sorted_durations.len() / 2;
120 let p95_idx = (sorted_durations.len() * 95) / 100;
121 let p99_idx = (sorted_durations.len() * 99) / 100;
122
123 (
124 sorted_durations.get(p50_idx).copied(),
125 sorted_durations.get(p95_idx).copied(),
126 sorted_durations.get(p99_idx).copied(),
127 )
128 } else {
129 (None, None, None)
130 };
131
132 let avg_duration = if !sorted_durations.is_empty() {
133 Some(Duration::from_nanos(
134 sorted_durations.iter().map(|d| d.as_nanos() as u64).sum::<u64>()
135 / sorted_durations.len() as u64,
136 ))
137 } else {
138 None
139 };
140
141 let cache_hit_rate = if cache_hits + cache_misses > 0 {
142 (cache_hits as f64) / ((cache_hits + cache_misses) as f64)
143 } else {
144 0.0
145 };
146
147 let error_rate = if total_requests > 0 {
148 (error_count as f64) / (total_requests as f64)
149 } else {
150 0.0
151 };
152
153 PerformanceSummary {
154 total_requests,
155 active_requests,
156 avg_duration,
157 p50_duration: p50,
158 p95_duration: p95,
159 p99_duration: p99,
160 cache_hit_rate,
161 cache_hits,
162 cache_misses,
163 memory_usage_bytes: memory_usage,
164 error_count,
165 error_rate,
166 }
167 }
168
169 pub async fn reset(&self) {
171 self.request_durations.write().await.clear();
172 self.request_count.store(0, Ordering::Relaxed);
173 self.active_requests.store(0, Ordering::Relaxed);
174 self.cache_hits.store(0, Ordering::Relaxed);
175 self.cache_misses.store(0, Ordering::Relaxed);
176 self.memory_usage_bytes.store(0, Ordering::Relaxed);
177 self.error_count.store(0, Ordering::Relaxed);
178 self.custom_counters.write().await.clear();
179 }
180}
181
182#[derive(Debug, Clone)]
184pub struct PerformanceSummary {
185 pub total_requests: u64,
186 pub active_requests: usize,
187 pub avg_duration: Option<Duration>,
188 pub p50_duration: Option<Duration>,
189 pub p95_duration: Option<Duration>,
190 pub p99_duration: Option<Duration>,
191 pub cache_hit_rate: f64,
192 pub cache_hits: u64,
193 pub cache_misses: u64,
194 pub memory_usage_bytes: u64,
195 pub error_count: u64,
196 pub error_rate: f64,
197}
198
199pub struct PerformanceGuard {
201 start_time: Instant,
202 metrics: Arc<PerformanceMetrics>,
203 name: Option<String>,
204}
205
206impl PerformanceGuard {
207 pub fn new(metrics: Arc<PerformanceMetrics>) -> Self {
209 metrics.increment_active_requests();
210 Self {
211 start_time: Instant::now(),
212 metrics,
213 name: None,
214 }
215 }
216
217 pub fn named(metrics: Arc<PerformanceMetrics>, name: String) -> Self {
219 metrics.increment_active_requests();
220 Self {
221 start_time: Instant::now(),
222 metrics,
223 name: Some(name),
224 }
225 }
226
227 pub fn elapsed(&self) -> Duration {
229 self.start_time.elapsed()
230 }
231}
232
233impl Drop for PerformanceGuard {
234 fn drop(&mut self) {
235 let duration = self.start_time.elapsed();
236 self.metrics.decrement_active_requests();
237
238 let metrics = self.metrics.clone();
240 let name = self.name.clone();
241 tokio::spawn(async move {
242 metrics.record_request_duration(duration).await;
243 if let Some(name) = name {
244 metrics.increment_custom_counter(&format!("{}_count", name)).await;
245 }
246 });
247 }
248}
249
250#[derive(Debug, Clone)]
252pub struct PerformanceMonitor {
253 metrics: Arc<PerformanceMetrics>,
254 enabled: bool,
255}
256
257impl Default for PerformanceMonitor {
258 fn default() -> Self {
259 Self::new()
260 }
261}
262
263impl PerformanceMonitor {
264 pub fn new() -> Self {
266 Self {
267 metrics: Arc::new(PerformanceMetrics::new()),
268 enabled: true,
269 }
270 }
271
272 pub fn disabled() -> Self {
274 Self {
275 metrics: Arc::new(PerformanceMetrics::new()),
276 enabled: false,
277 }
278 }
279
280 pub fn set_enabled(&mut self, enabled: bool) {
282 self.enabled = enabled;
283 }
284
285 pub fn is_enabled(&self) -> bool {
287 self.enabled
288 }
289
290 pub fn start_tracking(&self) -> Option<PerformanceGuard> {
292 if self.enabled {
293 Some(PerformanceGuard::new(self.metrics.clone()))
294 } else {
295 None
296 }
297 }
298
299 pub fn start_tracking_named(&self, name: &str) -> Option<PerformanceGuard> {
301 if self.enabled {
302 Some(PerformanceGuard::named(self.metrics.clone(), name.to_string()))
303 } else {
304 None
305 }
306 }
307
308 pub fn record_cache_hit(&self) {
310 if self.enabled {
311 self.metrics.record_cache_hit();
312 }
313 }
314
315 pub fn record_cache_miss(&self) {
317 if self.enabled {
318 self.metrics.record_cache_miss();
319 }
320 }
321
322 pub fn record_error(&self) {
324 if self.enabled {
325 self.metrics.record_error();
326 }
327 }
328
329 pub fn update_memory_usage(&self, bytes: u64) {
331 if self.enabled {
332 self.metrics.update_memory_usage(bytes);
333 }
334 }
335
336 pub async fn get_summary(&self) -> PerformanceSummary {
338 self.metrics.get_summary().await
339 }
340
341 pub async fn reset(&self) {
343 self.metrics.reset().await;
344 }
345}
346
347#[cfg(test)]
348mod tests {
349 use super::*;
350 use tokio::time::sleep;
351
352 #[tokio::test]
353 async fn test_performance_metrics() {
354 let metrics = PerformanceMetrics::new();
355
356 metrics.record_request_duration(Duration::from_millis(100)).await;
358 metrics.record_request_duration(Duration::from_millis(200)).await;
359 metrics.record_cache_hit();
360 metrics.record_cache_miss();
361 metrics.record_error();
362 metrics.update_memory_usage(1024);
363
364 let summary = metrics.get_summary().await;
365
366 assert_eq!(summary.total_requests, 2);
367 assert_eq!(summary.cache_hits, 1);
368 assert_eq!(summary.cache_misses, 1);
369 assert_eq!(summary.error_count, 1);
370 assert_eq!(summary.memory_usage_bytes, 1024);
371 assert!((summary.cache_hit_rate - 0.5).abs() < f64::EPSILON);
372 }
373
374 #[tokio::test]
375 async fn test_performance_guard() {
376 let monitor = PerformanceMonitor::new();
377
378 {
379 let _guard = monitor.start_tracking();
380 sleep(Duration::from_millis(10)).await;
381 }
382
383 sleep(Duration::from_millis(50)).await;
385
386 let summary = monitor.get_summary().await;
387 assert_eq!(summary.total_requests, 1);
388 assert_eq!(summary.active_requests, 0);
389 }
390
391 #[tokio::test]
392 async fn test_disabled_monitor() {
393 let monitor = PerformanceMonitor::disabled();
394
395 assert!(!monitor.is_enabled());
396 assert!(monitor.start_tracking().is_none());
397
398 monitor.record_cache_hit();
399 monitor.record_error();
400
401 let summary = monitor.get_summary().await;
402 assert_eq!(summary.total_requests, 0);
403 assert_eq!(summary.cache_hits, 0);
404 assert_eq!(summary.error_count, 0);
405 }
406}