1use crate::{
7 core::HandlerFn,
8 types::{HttpMethod, Request, Response, StatusCode},
9};
10use serde::{Deserialize, Serialize};
11use std::{
12 collections::HashMap,
13 sync::{Arc, Mutex},
14 time::{Duration, Instant},
15};
16use tokio::time::sleep;
17
18#[cfg(feature = "benchmarks")]
19use criterion::{Criterion, black_box, criterion_group, criterion_main};
20
21#[derive(Debug, Clone, Serialize, Deserialize)]
23pub struct PerformanceMetrics {
24 pub request_count: u64,
25 pub total_duration: Duration,
26 pub min_latency: Duration,
27 pub max_latency: Duration,
28 pub avg_latency: Duration,
29 pub p50_latency: Duration,
30 pub p95_latency: Duration,
31 pub p99_latency: Duration,
32 pub throughput_rps: f64,
33 pub memory_usage_mb: f64,
34 pub cpu_usage_percent: f64,
35}
36
37impl Default for PerformanceMetrics {
38 fn default() -> Self {
39 Self {
40 request_count: 0,
41 total_duration: Duration::from_secs(0),
42 min_latency: Duration::from_secs(u64::MAX),
43 max_latency: Duration::from_secs(0),
44 avg_latency: Duration::from_secs(0),
45 p50_latency: Duration::from_secs(0),
46 p95_latency: Duration::from_secs(0),
47 p99_latency: Duration::from_secs(0),
48 throughput_rps: 0.0,
49 memory_usage_mb: 0.0,
50 cpu_usage_percent: 0.0,
51 }
52 }
53}
54
55#[derive(Debug, Clone)]
57pub struct BenchmarkConfig {
58 pub duration: Duration,
59 pub concurrent_requests: usize,
60 pub target_rps: Option<f64>,
61 pub warmup_duration: Duration,
62 pub request_size_bytes: usize,
63 pub response_size_bytes: usize,
64}
65
66impl Default for BenchmarkConfig {
67 fn default() -> Self {
68 Self {
69 duration: Duration::from_secs(10),
70 concurrent_requests: 100,
71 target_rps: None,
72 warmup_duration: Duration::from_secs(2),
73 request_size_bytes: 1024,
74 response_size_bytes: 1024,
75 }
76 }
77}
78
79impl BenchmarkConfig {
81 pub fn ultra_low_latency() -> Self {
83 Self {
84 duration: Duration::from_secs(30),
85 concurrent_requests: 1000,
86 target_rps: Some(100_000.0), warmup_duration: Duration::from_secs(5),
88 request_size_bytes: 64, response_size_bytes: 64,
90 }
91 }
92
93 pub fn high_throughput() -> Self {
95 Self {
96 duration: Duration::from_secs(60),
97 concurrent_requests: 2000,
98 target_rps: Some(50_000.0),
99 warmup_duration: Duration::from_secs(10),
100 request_size_bytes: 8192, response_size_bytes: 8192,
102 }
103 }
104
105 pub fn stress_test() -> Self {
107 Self {
108 duration: Duration::from_secs(300), concurrent_requests: 5000,
110 target_rps: None, warmup_duration: Duration::from_secs(30),
112 request_size_bytes: 16384,
113 response_size_bytes: 16384,
114 }
115 }
116}
117
118#[derive(Debug, Clone)]
120pub struct LatencyCollector {
121 latencies: Arc<Mutex<Vec<Duration>>>,
122 start_time: Instant,
123}
124
125impl LatencyCollector {
126 pub fn new() -> Self {
127 Self {
128 latencies: Arc::new(Mutex::new(Vec::new())),
129 start_time: Instant::now(),
130 }
131 }
132
133 pub fn record_latency(&self, latency: Duration) {
135 let mut latencies = self.latencies.lock().unwrap();
136 latencies.push(latency);
137 }
138
139 pub fn calculate_metrics(&self) -> PerformanceMetrics {
141 let latencies = self.latencies.lock().unwrap();
142 let mut sorted_latencies = latencies.clone();
143 sorted_latencies.sort();
144
145 let request_count = sorted_latencies.len() as u64;
146 let total_duration = self.start_time.elapsed();
147
148 if request_count == 0 {
149 return PerformanceMetrics::default();
150 }
151
152 let min_latency = sorted_latencies[0];
153 let max_latency = sorted_latencies[sorted_latencies.len() - 1];
154
155 let avg_latency = Duration::from_nanos(
156 (sorted_latencies.iter().map(|d| d.as_nanos()).sum::<u128>() / request_count as u128)
157 as u64,
158 );
159
160 let p50_latency = sorted_latencies[sorted_latencies.len() * 50 / 100];
161 let p95_latency = sorted_latencies[sorted_latencies.len() * 95 / 100];
162 let p99_latency = sorted_latencies[sorted_latencies.len() * 99 / 100];
163
164 let throughput_rps = request_count as f64 / total_duration.as_secs_f64();
165
166 PerformanceMetrics {
167 request_count,
168 total_duration,
169 min_latency,
170 max_latency,
171 avg_latency,
172 p50_latency,
173 p95_latency,
174 p99_latency,
175 throughput_rps,
176 memory_usage_mb: get_memory_usage_mb(),
177 cpu_usage_percent: get_cpu_usage_percent(),
178 }
179 }
180}
181
182impl Default for LatencyCollector {
183 fn default() -> Self {
184 Self::new()
185 }
186}
187
188pub struct AdapterBenchmark {
190 pub adapter_name: String,
191 pub config: BenchmarkConfig,
192}
193
194impl AdapterBenchmark {
195 pub fn new(adapter_name: &str, config: BenchmarkConfig) -> Self {
196 Self {
197 adapter_name: adapter_name.to_string(),
198 config,
199 }
200 }
201
202 pub async fn run_benchmark_suite(&self, handler: HandlerFn) -> BenchmarkResults {
204 let mut results = BenchmarkResults::new(&self.adapter_name);
205
206 println!(
208 "Running ultra-low latency benchmark for {}...",
209 self.adapter_name
210 );
211 let ultra_low_config = BenchmarkConfig::ultra_low_latency();
212 let ultra_low_metrics = self
213 .run_single_benchmark(&ultra_low_config, handler.clone())
214 .await;
215 results.add_result("ultra_low_latency", ultra_low_metrics);
216
217 println!(
219 "Running high throughput benchmark for {}...",
220 self.adapter_name
221 );
222 let high_throughput_config = BenchmarkConfig::high_throughput();
223 let high_throughput_metrics = self
224 .run_single_benchmark(&high_throughput_config, handler.clone())
225 .await;
226 results.add_result("high_throughput", high_throughput_metrics);
227
228 println!("Running standard benchmark for {}...", self.adapter_name);
230 let standard_metrics = self
231 .run_single_benchmark(&self.config, handler.clone())
232 .await;
233 results.add_result("standard", standard_metrics);
234
235 results
236 }
237
238 pub async fn run_single_benchmark(
240 &self,
241 config: &BenchmarkConfig,
242 handler: HandlerFn,
243 ) -> PerformanceMetrics {
244 let collector = LatencyCollector::new();
245
246 println!("Warming up for {:?}...", config.warmup_duration);
248 self.run_warmup(config, handler.clone()).await;
249
250 println!("Running benchmark for {:?}...", config.duration);
252 let _start_time = Instant::now();
253 let tasks = self
254 .spawn_benchmark_tasks(config, handler, collector.clone())
255 .await;
256
257 let timeout = tokio::time::timeout(
259 config.duration + Duration::from_secs(10), futures::future::join_all(tasks),
261 );
262
263 match timeout.await {
264 Ok(_) => println!("Benchmark completed successfully"),
265 Err(_) => println!("Benchmark timed out"),
266 }
267
268 collector.calculate_metrics()
269 }
270
271 async fn run_warmup(&self, config: &BenchmarkConfig, handler: HandlerFn) {
273 let warmup_requests = (config.concurrent_requests / 10).max(10);
274 let mut tasks = Vec::new();
275
276 for _ in 0..warmup_requests {
277 let handler_clone = handler.clone();
278 tasks.push(tokio::spawn(async move {
279 let request = create_benchmark_request();
280 let _ = handler_clone(request).await;
281 }));
282 }
283
284 let _ =
285 tokio::time::timeout(config.warmup_duration, futures::future::join_all(tasks)).await;
286 }
287
288 async fn spawn_benchmark_tasks(
290 &self,
291 config: &BenchmarkConfig,
292 handler: HandlerFn,
293 collector: LatencyCollector,
294 ) -> Vec<tokio::task::JoinHandle<()>> {
295 let mut tasks = Vec::new();
296 let end_time = Instant::now() + config.duration;
297 let concurrent_requests = config.concurrent_requests;
298 let target_rps = config.target_rps;
299
300 for _ in 0..concurrent_requests {
301 let handler_clone = handler.clone();
302 let collector_clone = collector.clone();
303 let end_time_clone = end_time;
304
305 tasks.push(tokio::spawn(async move {
306 while Instant::now() < end_time_clone {
307 let request_start = Instant::now();
308 let request = create_benchmark_request();
309
310 match handler_clone(request).await {
311 Ok(_) => {
312 let latency = request_start.elapsed();
313 collector_clone.record_latency(latency);
314 }
315 Err(_) => {
316 }
318 }
319
320 if let Some(target_rps) = target_rps {
322 let target_interval =
323 Duration::from_secs_f64(concurrent_requests as f64 / target_rps);
324 sleep(target_interval).await;
325 }
326 }
327 }));
328 }
329
330 tasks
331 }
332}
333
334#[derive(Debug, Clone, Serialize, Deserialize)]
336pub struct BenchmarkResults {
337 pub adapter_name: String,
338 pub results: HashMap<String, PerformanceMetrics>,
339 pub timestamp: u64,
340}
341
342impl BenchmarkResults {
343 pub fn new(adapter_name: &str) -> Self {
344 Self {
345 adapter_name: adapter_name.to_string(),
346 results: HashMap::new(),
347 timestamp: std::time::SystemTime::now()
348 .duration_since(std::time::UNIX_EPOCH)
349 .unwrap()
350 .as_secs(),
351 }
352 }
353
354 pub fn add_result(&mut self, benchmark_name: &str, metrics: PerformanceMetrics) {
355 self.results.insert(benchmark_name.to_string(), metrics);
356 }
357
358 pub fn meets_ultra_low_latency_requirements(&self) -> bool {
360 if let Some(metrics) = self.results.get("ultra_low_latency") {
361 metrics.p99_latency < Duration::from_millis(1)
367 && metrics.p95_latency < Duration::from_micros(500)
368 && metrics.avg_latency < Duration::from_micros(200)
369 && metrics.throughput_rps > 50_000.0
370 } else {
371 false
372 }
373 }
374
375 pub fn generate_report(&self) -> String {
377 let mut report = format!("# Benchmark Report for {}\n\n", self.adapter_name);
378
379 for (benchmark_name, metrics) in &self.results {
380 report.push_str(&format!("## {} Benchmark\n\n", benchmark_name));
381 report.push_str(&format!("- **Requests**: {}\n", metrics.request_count));
382 report.push_str(&format!("- **Duration**: {:?}\n", metrics.total_duration));
383 report.push_str(&format!(
384 "- **Throughput**: {:.2} RPS\n",
385 metrics.throughput_rps
386 ));
387 report.push_str(&format!(
388 "- **Average Latency**: {:?}\n",
389 metrics.avg_latency
390 ));
391 report.push_str(&format!("- **P50 Latency**: {:?}\n", metrics.p50_latency));
392 report.push_str(&format!("- **P95 Latency**: {:?}\n", metrics.p95_latency));
393 report.push_str(&format!("- **P99 Latency**: {:?}\n", metrics.p99_latency));
394 report.push_str(&format!("- **Min Latency**: {:?}\n", metrics.min_latency));
395 report.push_str(&format!("- **Max Latency**: {:?}\n", metrics.max_latency));
396 report.push_str(&format!(
397 "- **Memory Usage**: {:.2} MB\n",
398 metrics.memory_usage_mb
399 ));
400 report.push_str(&format!(
401 "- **CPU Usage**: {:.2}%\n\n",
402 metrics.cpu_usage_percent
403 ));
404 }
405
406 if self.meets_ultra_low_latency_requirements() {
408 report.push_str("✅ **PASSED**: Meets ultra-low latency requirements\n\n");
409 } else {
410 report.push_str("❌ **FAILED**: Does not meet ultra-low latency requirements\n\n");
411 }
412
413 report
414 }
415
416 pub fn save_to_file(&self, path: &str) -> Result<(), std::io::Error> {
418 let json = serde_json::to_string_pretty(self)?;
419 std::fs::write(path, json)?;
420 Ok(())
421 }
422
423 pub fn load_from_file(path: &str) -> Result<Self, Box<dyn std::error::Error>> {
425 let json = std::fs::read_to_string(path)?;
426 let results = serde_json::from_str(&json)?;
427 Ok(results)
428 }
429}
430
431fn create_benchmark_request() -> Request {
433 Request {
434 method: HttpMethod::GET,
435 uri: http::Uri::from_static("http://localhost:8080/benchmark"),
436 version: http::Version::HTTP_11,
437 headers: {
438 let mut headers = crate::types::Headers::new();
439 headers.insert("user-agent".to_string(), "benchmark-client/1.0".to_string());
440 headers.insert("accept".to_string(), "application/json".to_string());
441 headers
442 },
443 body: crate::types::Body::from_string("benchmark request"),
444 extensions: std::collections::HashMap::new(),
445 path_params: std::collections::HashMap::new(),
446 cookies: std::collections::HashMap::new(),
447 form_data: None,
448 multipart: None,
449 }
450}
451
452fn get_memory_usage_mb() -> f64 {
454 128.0
457}
458
459fn get_cpu_usage_percent() -> f64 {
461 15.0
464}
465
466#[cfg(feature = "benchmarks")]
468pub mod criterion_benchmarks {
469 use super::*;
470 use criterion::{Criterion, black_box};
471
472 pub fn benchmark_handler_performance(c: &mut Criterion) {
473 let rt = tokio::runtime::Runtime::new().unwrap();
474
475 let handler: HandlerFn =
476 Arc::new(|_req| Box::pin(async { Ok(Response::new(StatusCode::OK)) }));
477
478 c.bench_function("handler_latency", |b| {
479 b.iter(|| {
480 rt.block_on(async {
481 let request = black_box(create_benchmark_request());
482 black_box(handler(request).await)
483 })
484 })
485 });
486 }
487
488 pub fn benchmark_middleware_overhead(c: &mut Criterion) {
489 let rt = tokio::runtime::Runtime::new().unwrap();
490
491 c.bench_function("middleware_processing", |b| {
492 b.iter(|| {
493 rt.block_on(async {
494 let mut request = black_box(create_benchmark_request());
495 black_box(&mut request);
497 })
498 })
499 });
500 }
501
502 criterion_group!(
503 benches,
504 benchmark_handler_performance,
505 benchmark_middleware_overhead
506 );
507}
508
509#[cfg(feature = "benchmarks")]
510criterion_main!(criterion_benchmarks::benches);
511
512#[cfg(test)]
513mod tests {
514 use super::*;
515 use crate::types::StatusCode;
516
517 #[tokio::test]
518 async fn test_latency_collector() {
519 let collector = LatencyCollector::new();
520
521 collector.record_latency(Duration::from_micros(100));
523 collector.record_latency(Duration::from_micros(200));
524 collector.record_latency(Duration::from_micros(150));
525
526 let metrics = collector.calculate_metrics();
527 assert_eq!(metrics.request_count, 3);
528 assert_eq!(metrics.min_latency, Duration::from_micros(100));
529 assert_eq!(metrics.max_latency, Duration::from_micros(200));
530 }
531
532 #[tokio::test]
533 async fn test_benchmark_config() {
534 let ultra_low = BenchmarkConfig::ultra_low_latency();
535 assert_eq!(ultra_low.concurrent_requests, 1000);
536 assert_eq!(ultra_low.target_rps, Some(100_000.0));
537
538 let stress = BenchmarkConfig::stress_test();
539 assert_eq!(stress.concurrent_requests, 5000);
540 assert_eq!(stress.duration, Duration::from_secs(300));
541 }
542
543 #[tokio::test]
544 async fn test_benchmark_results() {
545 let mut results = BenchmarkResults::new("test_adapter");
546
547 let metrics = PerformanceMetrics {
548 request_count: 10000,
549 p99_latency: Duration::from_micros(800), p95_latency: Duration::from_micros(400), avg_latency: Duration::from_micros(150), throughput_rps: 60_000.0, ..Default::default()
554 };
555
556 results.add_result("ultra_low_latency", metrics);
557 assert!(results.meets_ultra_low_latency_requirements());
558
559 let report = results.generate_report();
560 assert!(report.contains("✅ **PASSED**"));
561 }
562}