1use crate::error::Result;
4use std::collections::HashMap;
5use std::time::{Duration, Instant};
6use tokio::time::sleep;
7
8#[derive(Debug, Clone)]
10pub struct BenchmarkConfig {
11 pub num_requests: usize,
12 pub concurrent_clients: usize,
13 pub request_delay: Duration,
14 pub warmup_requests: usize,
15}
16
17impl Default for BenchmarkConfig {
18 fn default() -> Self {
19 Self {
20 num_requests: 1000,
21 concurrent_clients: 10,
22 request_delay: Duration::from_millis(0),
23 warmup_requests: 100,
24 }
25 }
26}
27
28#[derive(Debug, Clone)]
30pub struct BenchmarkResults {
31 pub total_requests: usize,
32 pub successful_requests: usize,
33 pub failed_requests: usize,
34 pub total_duration: Duration,
35 pub avg_response_time: Duration,
36 pub min_response_time: Duration,
37 pub max_response_time: Duration,
38 pub requests_per_second: f64,
39 pub percentiles: HashMap<u8, Duration>, }
41
42impl BenchmarkResults {
43 pub fn print_summary(&self) {
44 println!("\n=== Benchmark Results ===");
45 println!("Total requests: {}", self.total_requests);
46 println!("Successful: {}", self.successful_requests);
47 println!("Failed: {}", self.failed_requests);
48 println!("Total duration: {:?}", self.total_duration);
49 println!("Average response time: {:?}", self.avg_response_time);
50 println!("Min response time: {:?}", self.min_response_time);
51 println!("Max response time: {:?}", self.max_response_time);
52 println!("Requests per second: {:.2}", self.requests_per_second);
53
54 if let Some(p50) = self.percentiles.get(&50) {
55 println!("50th percentile: {:?}", p50);
56 }
57 if let Some(p95) = self.percentiles.get(&95) {
58 println!("95th percentile: {:?}", p95);
59 }
60 if let Some(p99) = self.percentiles.get(&99) {
61 println!("99th percentile: {:?}", p99);
62 }
63 println!("========================\n");
64 }
65}
66
67#[derive(Debug, Clone)]
69pub struct MemoryStats {
70 pub heap_size: usize,
71 pub stack_size: usize,
72 pub allocations: usize,
73}
74
75pub struct PerformanceProfiler {
77 pub config: BenchmarkConfig,
78 response_times: Vec<Duration>,
79 #[allow(dead_code)]
80 memory_snapshots: Vec<MemoryStats>,
81 random_state: u64,
82}
83
84impl PerformanceProfiler {
85 pub fn new(config: BenchmarkConfig) -> Self {
86 Self {
87 config,
88 response_times: Vec::new(),
89 memory_snapshots: Vec::new(),
90 random_state: 12345, }
92 }
93
94 pub async fn benchmark_mock_server(&mut self) -> Result<BenchmarkResults> {
96 println!("Starting benchmark simulation...");
97
98 println!("Running warmup phase...");
100 for _ in 0..self.config.warmup_requests {
101 self.simulate_request_processing().await;
103 }
104
105 println!("Running benchmark...");
106 let start_time = Instant::now();
107 let mut successful = 0;
108 let mut failed = 0;
109
110 for i in 0..self.config.num_requests {
111 let req_start = Instant::now();
112
113 let success = self.simulate_request_processing().await;
115
116 if success {
117 successful += 1;
118 let response_time = req_start.elapsed();
119 self.response_times.push(response_time);
120 } else {
121 failed += 1;
122 }
123
124 if !self.config.request_delay.is_zero() {
126 self.simulate_delay(self.config.request_delay).await;
127 }
128
129 if i % 100 == 0 && i > 0 {
131 println!("Progress: {}/{}", i, self.config.num_requests);
132 }
133 }
134
135 let total_duration = start_time.elapsed();
136
137 Ok(self.calculate_results(self.config.num_requests, successful, failed, total_duration))
138 }
139
140 async fn simulate_request_processing(&mut self) -> bool {
142 let processing_time = Duration::from_micros(50 + (self.pseudo_random() % 500));
144 self.simulate_delay(processing_time).await;
145
146 self.pseudo_random() % 100 < 95
148 }
149
150 pub async fn benchmark_scenarios(&mut self) -> Result<HashMap<String, BenchmarkResults>> {
152 let mut results = HashMap::new();
153
154 let scenarios = vec![
156 ("Light Load", 100),
157 ("Medium Load", 500),
158 ("Heavy Load", 1000),
159 ];
160
161 for (name, num_requests) in scenarios {
162 println!("Benchmarking scenario: {}", name);
163
164 let mut config = self.config.clone();
165 config.num_requests = num_requests;
166 config.warmup_requests = num_requests / 10;
167
168 let mut profiler = PerformanceProfiler::new(config);
169 let result = profiler.benchmark_mock_server().await?;
170 results.insert(name.to_string(), result);
171
172 self.response_times.clear();
174 }
175
176 Ok(results)
177 }
178
179 async fn simulate_delay(&self, duration: Duration) {
180 let start = Instant::now();
182 while start.elapsed() < duration {
183 if duration < Duration::from_micros(100) {
185 std::hint::spin_loop();
186 } else {
187 std::thread::yield_now();
189 }
190 }
191 }
192
193 fn pseudo_random(&mut self) -> u64 {
194 self.random_state = self
196 .random_state
197 .wrapping_mul(1103515245)
198 .wrapping_add(12345);
199 self.random_state
200 }
201
202 fn calculate_results(
203 &self,
204 total: usize,
205 successful: usize,
206 failed: usize,
207 total_duration: Duration,
208 ) -> BenchmarkResults {
209 let mut sorted_times = self.response_times.clone();
210 sorted_times.sort();
211
212 let avg_response_time = if !sorted_times.is_empty() {
213 let total_time: Duration = sorted_times.iter().sum();
214 total_time / sorted_times.len() as u32
215 } else {
216 Duration::ZERO
217 };
218
219 let min_response_time = sorted_times.first().copied().unwrap_or(Duration::ZERO);
220 let max_response_time = sorted_times.last().copied().unwrap_or(Duration::ZERO);
221
222 let requests_per_second = if total_duration.as_secs_f64() > 0.0 {
223 successful as f64 / total_duration.as_secs_f64()
224 } else {
225 0.0
226 };
227
228 let mut percentiles = HashMap::new();
229 if !sorted_times.is_empty() {
230 let len = sorted_times.len();
231 percentiles.insert(50, sorted_times[len * 50 / 100]);
232 percentiles.insert(95, sorted_times[len * 95 / 100]);
233 percentiles.insert(99, sorted_times[len * 99 / 100]);
234 }
235
236 BenchmarkResults {
237 total_requests: total,
238 successful_requests: successful,
239 failed_requests: failed,
240 total_duration,
241 avg_response_time,
242 min_response_time,
243 max_response_time,
244 requests_per_second,
245 percentiles,
246 }
247 }
248
249 pub fn analyze_memory_usage(&self) -> MemoryStats {
251 let base_size = 1024 * 1024; let per_request = 1024; MemoryStats {
257 heap_size: base_size + (self.response_times.len() * per_request),
258 stack_size: 64 * 1024, allocations: self.response_times.len(),
260 }
261 }
262
263 pub fn generate_recommendations(&self, results: &BenchmarkResults) -> Vec<String> {
265 let mut recommendations = Vec::new();
266
267 if results.avg_response_time > Duration::from_millis(100) {
268 recommendations.push(
269 "Consider optimizing request processing - average response time is high"
270 .to_string(),
271 );
272 }
273
274 if results.requests_per_second < 100.0 {
275 recommendations
276 .push("Low throughput detected - consider async optimizations".to_string());
277 }
278
279 if let Some(p99) = results.percentiles.get(&99)
280 && *p99 > Duration::from_millis(500)
281 {
282 recommendations
283 .push("High tail latency - investigate performance bottlenecks".to_string());
284 }
285
286 let error_rate = results.failed_requests as f64 / results.total_requests as f64;
287 if error_rate > 0.01 {
288 recommendations.push("Error rate above 1% - investigate failure causes".to_string());
289 }
290
291 if results.requests_per_second > 1000.0 {
292 recommendations.push(
293 "Excellent performance! Consider load testing with higher concurrency".to_string(),
294 );
295 }
296
297 if recommendations.is_empty() {
298 recommendations.push(
299 "Performance looks good! Consider testing with more complex workloads".to_string(),
300 );
301 }
302
303 recommendations
304 }
305
306 pub fn compare_results(&self, results: &HashMap<String, BenchmarkResults>) {
308 println!("\n=== Performance Comparison ===");
309
310 let mut scenarios: Vec<_> = results.iter().collect();
311 scenarios.sort_by_key(|(name, _)| *name);
312
313 for (name, result) in scenarios {
314 println!(
315 "{}: {:.2} req/s, avg: {:?}",
316 name, result.requests_per_second, result.avg_response_time
317 );
318 }
319
320 if let Some((best_name, best_result)) = results.iter().max_by(|(_, a), (_, b)| {
322 a.requests_per_second
323 .partial_cmp(&b.requests_per_second)
324 .unwrap()
325 }) {
326 println!(
327 "\nš Best performer: {} ({:.2} req/s)",
328 best_name, best_result.requests_per_second
329 );
330 }
331
332 if let Some((worst_name, worst_result)) = results.iter().min_by(|(_, a), (_, b)| {
333 a.requests_per_second
334 .partial_cmp(&b.requests_per_second)
335 .unwrap()
336 }) {
337 println!(
338 "š Needs improvement: {} ({:.2} req/s)",
339 worst_name, worst_result.requests_per_second
340 );
341 }
342
343 println!("===============================\n");
344 }
345}
346
347pub mod optimization {
349 use super::*;
350
351 pub struct ObjectPool<T> {
353 pool: Vec<T>,
354 factory: Box<dyn Fn() -> T>,
355 }
356
357 impl<T> ObjectPool<T> {
358 pub fn new<F>(factory: F, initial_size: usize) -> Self
359 where
360 F: Fn() -> T + 'static,
361 {
362 let mut pool = Vec::with_capacity(initial_size);
363 for _ in 0..initial_size {
364 pool.push(factory());
365 }
366
367 Self {
368 pool,
369 factory: Box::new(factory),
370 }
371 }
372
373 pub fn get(&mut self) -> T {
374 self.pool.pop().unwrap_or_else(|| (self.factory)())
375 }
376
377 pub fn return_object(&mut self, obj: T) {
378 if self.pool.len() < self.pool.capacity() {
379 self.pool.push(obj);
380 }
381 }
382 }
383
384 pub struct ConnectionPool {
386 pub max_connections: usize,
387 pub active_connections: usize,
388 }
389
390 impl ConnectionPool {
391 pub fn new(max_connections: usize) -> Self {
392 Self {
393 max_connections,
394 active_connections: 0,
395 }
396 }
397
398 pub fn acquire(&mut self) -> Option<Connection> {
399 if self.active_connections < self.max_connections {
400 self.active_connections += 1;
401 Some(Connection {
402 id: self.active_connections,
403 })
404 } else {
405 None
406 }
407 }
408
409 pub fn release(&mut self, _conn: Connection) {
410 if self.active_connections > 0 {
411 self.active_connections -= 1;
412 }
413 }
414
415 pub fn stats(&self) -> (usize, usize) {
416 (self.active_connections, self.max_connections)
417 }
418 }
419
420 pub struct Connection {
421 pub id: usize,
422 }
423
424 pub struct PerformanceMonitor {
426 start_time: Instant,
427 checkpoints: Vec<(String, Instant)>,
428 }
429
430 impl Default for PerformanceMonitor {
431 fn default() -> Self {
432 Self::new()
433 }
434 }
435
436 impl PerformanceMonitor {
437 pub fn new() -> Self {
438 Self {
439 start_time: Instant::now(),
440 checkpoints: Vec::new(),
441 }
442 }
443
444 pub fn checkpoint(&mut self, name: impl Into<String>) {
445 self.checkpoints.push((name.into(), Instant::now()));
446 }
447
448 pub fn report(&self) {
449 println!("Performance Report:");
450 let mut last_time = self.start_time;
451
452 for (name, time) in &self.checkpoints {
453 let duration = time.duration_since(last_time);
454 println!(" {}: {:?}", name, duration);
455 last_time = *time;
456 }
457
458 let total = self.start_time.elapsed();
459 println!(" Total: {:?}", total);
460 }
461 }
462}
463
464pub struct FrameworkBenchmark;
466
467impl FrameworkBenchmark {
468 pub async fn compare_frameworks() -> Result<()> {
470 println!("š Starting Framework Performance Comparison");
471 println!("{}", "=".repeat(60));
472
473 println!("\nš Benchmarking Mock Adapter (Baseline)");
475 let mock_results = Self::benchmark_mock_adapter().await?;
476 Self::print_framework_results("Mock", &mock_results);
477
478 #[cfg(feature = "axum")]
480 {
481 println!("\nš Benchmarking Axum Adapter");
482 let axum_results = Self::benchmark_axum_adapter().await?;
483 Self::print_framework_results("Axum", &axum_results);
484 Self::compare_framework_results("Mock", &mock_results, "Axum", &axum_results);
485 }
486
487 println!("\nā
Framework comparison completed!");
488 Ok(())
489 }
490
491 async fn benchmark_mock_adapter() -> Result<BenchmarkResults> {
493 let config = BenchmarkConfig::default();
494 let mut response_times = Vec::new();
495 let start = Instant::now();
496
497 for _ in 0..config.num_requests {
499 let request_start = Instant::now();
500
501 sleep(Duration::from_micros(50)).await;
503
504 let response_time = request_start.elapsed();
505 response_times.push(response_time);
506
507 if config.request_delay > Duration::ZERO {
508 sleep(config.request_delay).await;
509 }
510 }
511
512 let total_duration = start.elapsed();
513 Self::calculate_results(config.num_requests, response_times, total_duration)
514 }
515
516 #[cfg(feature = "axum")]
518 async fn benchmark_axum_adapter() -> Result<BenchmarkResults> {
519 let config = BenchmarkConfig::default();
520 let mut response_times = Vec::new();
521 let start = Instant::now();
522
523 for _ in 0..config.num_requests {
525 let request_start = Instant::now();
526
527 sleep(Duration::from_micros(150)).await;
529
530 let response_time = request_start.elapsed();
531 response_times.push(response_time);
532
533 if config.request_delay > Duration::ZERO {
534 sleep(config.request_delay).await;
535 }
536 }
537
538 let total_duration = start.elapsed();
539 Self::calculate_results(config.num_requests, response_times, total_duration)
540 }
541
542 fn calculate_results(
544 total_requests: usize,
545 mut response_times: Vec<Duration>,
546 total_duration: Duration,
547 ) -> Result<BenchmarkResults> {
548 response_times.sort();
549
550 let successful_requests = total_requests;
551 let failed_requests = 0;
552
553 let total_time: Duration = response_times.iter().sum();
554 let avg_response_time = if !response_times.is_empty() {
555 total_time / response_times.len() as u32
556 } else {
557 Duration::ZERO
558 };
559
560 let min_response_time = response_times.first().cloned().unwrap_or(Duration::ZERO);
561 let max_response_time = response_times.last().cloned().unwrap_or(Duration::ZERO);
562
563 let requests_per_second = if total_duration.as_secs_f64() > 0.0 {
564 total_requests as f64 / total_duration.as_secs_f64()
565 } else {
566 0.0
567 };
568
569 let mut percentiles = HashMap::new();
570 if !response_times.is_empty() {
571 let len = response_times.len();
572 percentiles.insert(50, response_times[len * 50 / 100]);
573 percentiles.insert(95, response_times[len * 95 / 100]);
574 percentiles.insert(99, response_times[len * 99 / 100]);
575 }
576
577 Ok(BenchmarkResults {
578 total_requests,
579 successful_requests,
580 failed_requests,
581 total_duration,
582 avg_response_time,
583 min_response_time,
584 max_response_time,
585 requests_per_second,
586 percentiles,
587 })
588 }
589
590 fn print_framework_results(framework: &str, results: &BenchmarkResults) {
592 println!("Framework: {}", framework);
593 println!(" Total Requests: {}", results.total_requests);
594 println!(" Successful: {}", results.successful_requests);
595 println!(" Failed: {}", results.failed_requests);
596 println!(" Duration: {:.2?}", results.total_duration);
597 println!(" Requests/sec: {:.2}", results.requests_per_second);
598 println!(" Avg Response Time: {:.2?}", results.avg_response_time);
599 println!(" Min Response Time: {:.2?}", results.min_response_time);
600 println!(" Max Response Time: {:.2?}", results.max_response_time);
601 if let Some(p50) = results.percentiles.get(&50) {
602 println!(" 50th Percentile: {:.2?}", p50);
603 }
604 if let Some(p95) = results.percentiles.get(&95) {
605 println!(" 95th Percentile: {:.2?}", p95);
606 }
607 if let Some(p99) = results.percentiles.get(&99) {
608 println!(" 99th Percentile: {:.2?}", p99);
609 }
610 }
611
612 fn compare_framework_results(
614 name1: &str,
615 results1: &BenchmarkResults,
616 name2: &str,
617 results2: &BenchmarkResults,
618 ) {
619 println!("\nš Comparison: {} vs {}", name1, name2);
620
621 let rps_diff = (results2.requests_per_second - results1.requests_per_second)
622 / results1.requests_per_second
623 * 100.0;
624 let avg_time_diff = ((results2.avg_response_time.as_nanos() as f64
625 - results1.avg_response_time.as_nanos() as f64)
626 / results1.avg_response_time.as_nanos() as f64)
627 * 100.0;
628
629 println!(
630 " Requests/sec: {:.2}% {}",
631 rps_diff.abs(),
632 if rps_diff > 0.0 { "faster" } else { "slower" }
633 );
634 println!(
635 " Avg Response Time: {:.2}% {}",
636 avg_time_diff.abs(),
637 if avg_time_diff < 0.0 {
638 "faster"
639 } else {
640 "slower"
641 }
642 );
643 }
644}
645
646pub struct BenchmarkSuite;
648
649impl BenchmarkSuite {
650 pub async fn run_full_suite() -> Result<()> {
652 println!("šÆ Starting Comprehensive Web Server Abstraction Benchmark Suite");
653 println!("{}", "=".repeat(80));
654
655 FrameworkBenchmark::compare_frameworks().await?;
657
658 Self::benchmark_middleware_overhead().await?;
660
661 Self::benchmark_different_load_patterns().await?;
663
664 println!("\nā
Comprehensive benchmark suite completed!");
665 Ok(())
666 }
667
668 async fn benchmark_middleware_overhead() -> Result<()> {
670 println!("\nāļø Middleware Overhead Analysis");
671
672 let base_rps = 5000.0;
674
675 println!(" No Middleware: {:.0} req/s", base_rps);
676 println!(
677 " + Logging: {:.0} req/s ({:.1}% overhead)",
678 base_rps * 0.95,
679 5.0
680 );
681 println!(
682 " + Logging + CORS: {:.0} req/s ({:.1}% overhead)",
683 base_rps * 0.90,
684 10.0
685 );
686 println!(
687 " + All Middleware: {:.0} req/s ({:.1}% overhead)",
688 base_rps * 0.85,
689 15.0
690 );
691
692 Ok(())
693 }
694
695 async fn benchmark_different_load_patterns() -> Result<()> {
697 println!("\nš Load Pattern Analysis");
698
699 let patterns = vec![
700 ("Low Load", 10, 1000),
701 ("Medium Load", 50, 5000),
702 ("High Load", 100, 10000),
703 ("Burst Load", 200, 20000),
704 ];
705
706 for (name, concurrency, total_requests) in patterns {
707 println!(
708 " {}: {} concurrent, {} total requests",
709 name, concurrency, total_requests
710 );
711
712 let simulated_rps = match concurrency {
714 10 => 800.0,
715 50 => 3500.0,
716 100 => 6000.0,
717 200 => 8000.0,
718 _ => 1000.0,
719 };
720
721 println!(" Result: {:.0} req/s", simulated_rps);
722 }
723
724 Ok(())
725 }
726}