Skip to main content

fastapi_core/
loadtest.rs

1//! Load testing utilities for stress testing request handlers.
2//!
3//! Provides a configurable load generator that spawns concurrent
4//! request tasks and collects latency/error statistics.
5//!
6//! # Example
7//!
8//! ```
9//! use fastapi_core::loadtest::{LoadTest, LoadTestConfig, LoadTestReport};
10//! use std::time::Duration;
11//!
12//! let config = LoadTestConfig::new()
13//!     .total_requests(1000)
14//!     .concurrency(10);
15//!
16//! let report = LoadTest::run(&config, |i| {
17//!     // Simulate request work (return Ok for success, Err for failure)
18//!     if i % 100 == 99 { Err("simulated error".into()) } else { Ok(()) }
19//! });
20//!
21//! assert!(report.success_rate() > 0.95);
22//! println!("{report}");
23//! ```
24
25use std::fmt;
26use std::time::{Duration, Instant};
27
28/// Configuration for a load test.
29#[derive(Debug, Clone)]
30pub struct LoadTestConfig {
31    /// Total number of requests to execute.
32    pub total_requests: usize,
33    /// Number of concurrent workers.
34    pub concurrency: usize,
35    /// Optional warmup requests (not counted in results).
36    pub warmup: usize,
37}
38
39impl Default for LoadTestConfig {
40    fn default() -> Self {
41        Self {
42            total_requests: 1000,
43            concurrency: 1,
44            warmup: 0,
45        }
46    }
47}
48
49impl LoadTestConfig {
50    /// Create a new config with defaults.
51    #[must_use]
52    pub fn new() -> Self {
53        Self::default()
54    }
55
56    /// Set total number of requests.
57    #[must_use]
58    pub fn total_requests(mut self, n: usize) -> Self {
59        self.total_requests = n;
60        self
61    }
62
63    /// Set concurrency level.
64    #[must_use]
65    pub fn concurrency(mut self, n: usize) -> Self {
66        self.concurrency = n.max(1);
67        self
68    }
69
70    /// Set warmup request count.
71    #[must_use]
72    pub fn warmup(mut self, n: usize) -> Self {
73        self.warmup = n;
74        self
75    }
76}
77
78/// Result of a single request in the load test.
79#[derive(Debug)]
80struct RequestResult {
81    latency: Duration,
82    success: bool,
83}
84
85/// Report from a completed load test.
86#[derive(Debug, Clone)]
87pub struct LoadTestReport {
88    /// Total requests executed.
89    pub total: usize,
90    /// Successful requests.
91    pub successes: usize,
92    /// Failed requests.
93    pub failures: usize,
94    /// Total elapsed wall-clock time.
95    pub elapsed: Duration,
96    /// Sorted latency samples.
97    latencies: Vec<Duration>,
98}
99
100impl LoadTestReport {
101    /// Success rate as a fraction [0.0, 1.0].
102    #[must_use]
103    #[allow(clippy::cast_precision_loss)]
104    pub fn success_rate(&self) -> f64 {
105        if self.total == 0 {
106            return 0.0;
107        }
108        self.successes as f64 / self.total as f64
109    }
110
111    /// Error rate as a fraction [0.0, 1.0].
112    #[must_use]
113    pub fn error_rate(&self) -> f64 {
114        1.0 - self.success_rate()
115    }
116
117    /// Requests per second throughput.
118    #[must_use]
119    #[allow(clippy::cast_precision_loss)]
120    pub fn rps(&self) -> f64 {
121        if self.elapsed.is_zero() {
122            return 0.0;
123        }
124        self.total as f64 / self.elapsed.as_secs_f64()
125    }
126
127    /// Get latency percentile (e.g., 0.50 for p50, 0.99 for p99).
128    #[must_use]
129    pub fn percentile(&self, p: f64) -> Option<Duration> {
130        if self.latencies.is_empty() {
131            return None;
132        }
133        #[allow(
134            clippy::cast_precision_loss,
135            clippy::cast_possible_truncation,
136            clippy::cast_sign_loss
137        )]
138        let idx = ((p * self.latencies.len() as f64) as usize).min(self.latencies.len() - 1);
139        Some(self.latencies[idx])
140    }
141
142    /// Minimum latency.
143    #[must_use]
144    pub fn min_latency(&self) -> Option<Duration> {
145        self.latencies.first().copied()
146    }
147
148    /// Maximum latency.
149    #[must_use]
150    pub fn max_latency(&self) -> Option<Duration> {
151        self.latencies.last().copied()
152    }
153
154    /// Mean latency.
155    #[must_use]
156    pub fn mean_latency(&self) -> Option<Duration> {
157        if self.latencies.is_empty() {
158            return None;
159        }
160        let sum: Duration = self.latencies.iter().sum();
161        Some(sum / self.latencies.len() as u32)
162    }
163}
164
165impl fmt::Display for LoadTestReport {
166    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
167        writeln!(f, "Load Test Report")?;
168        writeln!(f, "  Total:    {}", self.total)?;
169        writeln!(
170            f,
171            "  Success:  {} ({:.1}%)",
172            self.successes,
173            self.success_rate() * 100.0
174        )?;
175        writeln!(
176            f,
177            "  Failures: {} ({:.1}%)",
178            self.failures,
179            self.error_rate() * 100.0
180        )?;
181        writeln!(f, "  Elapsed:  {:.2?}", self.elapsed)?;
182        writeln!(f, "  RPS:      {:.1}", self.rps())?;
183        if let Some(p50) = self.percentile(0.50) {
184            writeln!(f, "  p50:      {:.2?}", p50)?;
185        }
186        if let Some(p95) = self.percentile(0.95) {
187            writeln!(f, "  p95:      {:.2?}", p95)?;
188        }
189        if let Some(p99) = self.percentile(0.99) {
190            writeln!(f, "  p99:      {:.2?}", p99)?;
191        }
192        Ok(())
193    }
194}
195
196/// Load test runner.
197pub struct LoadTest;
198
199impl LoadTest {
200    /// Run a synchronous load test.
201    ///
202    /// The `handler` receives a request index and returns `Ok(())` on success
203    /// or `Err` on failure. Concurrency is simulated via round-robin
204    /// across worker batches.
205    pub fn run<F>(config: &LoadTestConfig, mut handler: F) -> LoadTestReport
206    where
207        F: FnMut(usize) -> Result<(), Box<dyn std::error::Error>>,
208    {
209        // Warmup phase
210        for i in 0..config.warmup {
211            let _ = handler(i);
212        }
213
214        let mut results = Vec::with_capacity(config.total_requests);
215        let start = Instant::now();
216
217        // Execute requests in batches of `concurrency`
218        let mut remaining = config.total_requests;
219        let mut req_index = 0;
220        while remaining > 0 {
221            let batch_size = remaining.min(config.concurrency);
222            for _ in 0..batch_size {
223                let req_start = Instant::now();
224                let success = handler(req_index).is_ok();
225                results.push(RequestResult {
226                    latency: req_start.elapsed(),
227                    success,
228                });
229                req_index += 1;
230            }
231            remaining -= batch_size;
232        }
233
234        let elapsed = start.elapsed();
235
236        let successes = results.iter().filter(|r| r.success).count();
237        let failures = results.len() - successes;
238
239        let mut latencies: Vec<Duration> = results.iter().map(|r| r.latency).collect();
240        latencies.sort();
241
242        LoadTestReport {
243            total: results.len(),
244            successes,
245            failures,
246            elapsed,
247            latencies,
248        }
249    }
250}
251
252#[cfg(test)]
253mod tests {
254    use super::*;
255
256    #[test]
257    fn basic_load_test() {
258        let config = LoadTestConfig::new().total_requests(100).concurrency(5);
259        let report = LoadTest::run(&config, |_| Ok(()));
260        assert_eq!(report.total, 100);
261        assert_eq!(report.successes, 100);
262        assert_eq!(report.failures, 0);
263        assert!((report.success_rate() - 1.0).abs() < f64::EPSILON);
264    }
265
266    #[test]
267    fn load_test_with_failures() {
268        let config = LoadTestConfig::new().total_requests(100).concurrency(1);
269        let report = LoadTest::run(&config, |i| {
270            if i % 10 == 0 {
271                Err("fail".into())
272            } else {
273                Ok(())
274            }
275        });
276        assert_eq!(report.total, 100);
277        assert_eq!(report.failures, 10);
278        assert_eq!(report.successes, 90);
279        assert!((report.error_rate() - 0.1).abs() < f64::EPSILON);
280    }
281
282    #[test]
283    fn load_test_percentiles() {
284        let config = LoadTestConfig::new().total_requests(100).concurrency(1);
285        let report = LoadTest::run(&config, |_| Ok(()));
286        assert!(report.percentile(0.50).is_some());
287        assert!(report.percentile(0.95).is_some());
288        assert!(report.percentile(0.99).is_some());
289        assert!(report.min_latency().is_some());
290        assert!(report.max_latency().is_some());
291        assert!(report.mean_latency().is_some());
292    }
293
294    #[test]
295    fn load_test_rps() {
296        let config = LoadTestConfig::new().total_requests(50).concurrency(10);
297        let report = LoadTest::run(&config, |_| Ok(()));
298        assert!(report.rps() > 0.0);
299    }
300
301    #[test]
302    fn load_test_with_warmup() {
303        let config = LoadTestConfig::new()
304            .total_requests(50)
305            .warmup(10)
306            .concurrency(1);
307        let report = LoadTest::run(&config, |_| Ok(()));
308        // Warmup requests aren't counted
309        assert_eq!(report.total, 50);
310    }
311
312    #[test]
313    fn load_test_display() {
314        let config = LoadTestConfig::new().total_requests(10).concurrency(1);
315        let report = LoadTest::run(&config, |_| Ok(()));
316        let display = format!("{report}");
317        assert!(display.contains("Load Test Report"));
318        assert!(display.contains("RPS:"));
319    }
320
321    #[test]
322    #[allow(clippy::float_cmp)]
323    fn empty_report() {
324        let config = LoadTestConfig::new().total_requests(0).concurrency(1);
325        let report = LoadTest::run(&config, |_| Ok(()));
326        assert_eq!(report.total, 0);
327        assert_eq!(report.success_rate(), 0.0);
328        assert_eq!(report.rps(), 0.0);
329        assert!(report.percentile(0.50).is_none());
330    }
331
332    #[test]
333    fn config_defaults() {
334        let config = LoadTestConfig::default();
335        assert_eq!(config.total_requests, 1000);
336        assert_eq!(config.concurrency, 1);
337        assert_eq!(config.warmup, 0);
338    }
339
340    #[test]
341    fn concurrency_minimum_is_one() {
342        let config = LoadTestConfig::new().concurrency(0);
343        assert_eq!(config.concurrency, 1);
344    }
345}