mockforge_performance/
latency.rs1use chrono::{DateTime, Utc};
6use serde::{Deserialize, Serialize};
7use std::collections::VecDeque;
8use std::sync::Arc;
9use tokio::sync::RwLock;
10
11#[derive(Debug, Clone, Serialize, Deserialize)]
13pub struct LatencySample {
14 pub timestamp: DateTime<Utc>,
16 pub latency_ms: u64,
18 pub endpoint: Option<String>,
20 pub method: Option<String>,
22 pub status_code: Option<u16>,
24 pub error: Option<String>,
26}
27
28#[derive(Debug, Clone)]
32pub struct LatencyRecorder {
33 samples: Arc<RwLock<VecDeque<LatencySample>>>,
35 max_samples: usize,
37 max_age_seconds: u64,
39}
40
41impl LatencyRecorder {
42 pub fn new(max_samples: usize, max_age_seconds: u64) -> Self {
44 Self {
45 samples: Arc::new(RwLock::new(VecDeque::new())),
46 max_samples,
47 max_age_seconds,
48 }
49 }
50
51 pub async fn record(
53 &self,
54 latency_ms: u64,
55 endpoint: Option<String>,
56 method: Option<String>,
57 status_code: Option<u16>,
58 error: Option<String>,
59 ) {
60 let sample = LatencySample {
61 timestamp: Utc::now(),
62 latency_ms,
63 endpoint,
64 method,
65 status_code,
66 error,
67 };
68
69 let mut samples = self.samples.write().await;
70 samples.push_back(sample);
71
72 self.cleanup_old_samples(&mut samples).await;
74 }
75
76 pub async fn get_samples(&self) -> Vec<LatencySample> {
78 let mut samples = self.samples.write().await;
79 self.cleanup_old_samples(&mut samples).await;
80 samples.iter().cloned().collect()
81 }
82
83 pub async fn get_samples_for_endpoint(&self, endpoint: &str) -> Vec<LatencySample> {
85 let samples = self.get_samples().await;
86 samples
87 .into_iter()
88 .filter(|s| s.endpoint.as_ref().map(|e| e == endpoint).unwrap_or(false))
89 .collect()
90 }
91
92 pub async fn get_samples_in_range(
94 &self,
95 start: DateTime<Utc>,
96 end: DateTime<Utc>,
97 ) -> Vec<LatencySample> {
98 let samples = self.get_samples().await;
99 samples
100 .into_iter()
101 .filter(|s| s.timestamp >= start && s.timestamp <= end)
102 .collect()
103 }
104
105 async fn cleanup_old_samples(&self, samples: &mut VecDeque<LatencySample>) {
107 let now = Utc::now();
108 let cutoff = now
109 .checked_sub_signed(chrono::Duration::seconds(self.max_age_seconds as i64))
110 .unwrap_or(now);
111
112 while samples.front().map(|s| s.timestamp < cutoff).unwrap_or(false) {
114 samples.pop_front();
115 }
116
117 while samples.len() > self.max_samples {
119 samples.pop_front();
120 }
121 }
122
123 pub async fn clear(&self) {
125 let mut samples = self.samples.write().await;
126 samples.clear();
127 }
128
129 pub async fn sample_count(&self) -> usize {
131 let samples = self.samples.read().await;
132 samples.len()
133 }
134}
135
136#[derive(Debug, Clone)]
140pub struct LatencyAnalyzer {
141 recorder: Arc<LatencyRecorder>,
142}
143
144impl LatencyAnalyzer {
145 pub fn new(recorder: Arc<LatencyRecorder>) -> Self {
147 Self { recorder }
148 }
149
150 pub async fn calculate_stats(&self) -> LatencyStats {
152 let samples = self.recorder.get_samples().await;
153 self.calculate_stats_from_samples(&samples)
154 }
155
156 pub async fn calculate_stats_for_endpoint(&self, endpoint: &str) -> LatencyStats {
158 let samples = self.recorder.get_samples_for_endpoint(endpoint).await;
159 self.calculate_stats_from_samples(&samples)
160 }
161
162 fn calculate_stats_from_samples(&self, samples: &[LatencySample]) -> LatencyStats {
164 if samples.is_empty() {
165 return LatencyStats::default();
166 }
167
168 let mut latencies: Vec<u64> = samples.iter().map(|s| s.latency_ms).collect();
169 latencies.sort();
170
171 let count = latencies.len();
172 let sum: u64 = latencies.iter().sum();
173 let avg = sum as f64 / count as f64;
174
175 let min = latencies[0];
176 let max = latencies[count - 1];
177 let median = if count % 2 == 0 {
178 (latencies[count / 2 - 1] + latencies[count / 2]) as f64 / 2.0
179 } else {
180 latencies[count / 2] as f64
181 };
182
183 let p95 = if count > 0 {
184 latencies[(count as f64 * 0.95) as usize]
185 } else {
186 0
187 };
188
189 let p99 = if count > 0 {
190 latencies[(count as f64 * 0.99) as usize]
191 } else {
192 0
193 };
194
195 let error_count = samples
197 .iter()
198 .filter(|s| s.error.is_some() || s.status_code.map(|c| c >= 400).unwrap_or(false))
199 .count();
200 let error_rate = error_count as f64 / count as f64;
201
202 LatencyStats {
203 count,
204 min,
205 max,
206 avg,
207 median,
208 p95,
209 p99,
210 error_rate,
211 }
212 }
213}
214
215#[derive(Debug, Clone, Serialize, Deserialize, Default)]
217pub struct LatencyStats {
218 pub count: usize,
220 pub min: u64,
222 pub max: u64,
224 pub avg: f64,
226 pub median: f64,
228 pub p95: u64,
230 pub p99: u64,
232 pub error_rate: f64,
234}
235
236#[cfg(test)]
237mod tests {
238 use super::*;
239
240 #[tokio::test]
241 async fn test_latency_recorder() {
242 let recorder = LatencyRecorder::new(1000, 300);
243
244 recorder
245 .record(100, Some("/api/users".to_string()), Some("GET".to_string()), Some(200), None)
246 .await;
247 recorder
248 .record(150, Some("/api/users".to_string()), Some("GET".to_string()), Some(200), None)
249 .await;
250
251 let samples = recorder.get_samples().await;
252 assert_eq!(samples.len(), 2);
253 }
254
255 #[tokio::test]
256 async fn test_latency_analyzer() {
257 let recorder = Arc::new(LatencyRecorder::new(1000, 300));
258 let analyzer = LatencyAnalyzer::new(recorder.clone());
259
260 for latency in [100, 150, 200, 250, 300] {
262 recorder.record(latency, None, None, None, None).await;
263 }
264
265 let stats = analyzer.calculate_stats().await;
266 assert_eq!(stats.count, 5);
267 assert_eq!(stats.min, 100);
268 assert_eq!(stats.max, 300);
269 }
270}