use std::{
collections::HashMap,
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
},
time::{Duration, Instant},
};
#[derive(Debug, Clone)]
pub struct LoadTestResult {
pub total_requests: usize,
pub success_count: usize,
pub error_count: usize,
pub error_rate: f64,
pub total_time: Duration,
pub throughput: f64,
pub latency_stats: LatencyStats,
pub error_types: HashMap<String, usize>,
}
#[derive(Debug, Clone)]
pub struct LatencyStats {
pub min: Duration,
pub max: Duration,
pub mean: Duration,
pub median: Duration,
pub p50: Duration,
pub p90: Duration,
pub p95: Duration,
pub p99: Duration,
pub std_dev: Option<Duration>,
}
impl Default for LatencyStats {
fn default() -> Self {
Self {
min: Duration::ZERO,
max: Duration::ZERO,
mean: Duration::ZERO,
median: Duration::ZERO,
p50: Duration::ZERO,
p90: Duration::ZERO,
p95: Duration::ZERO,
p99: Duration::ZERO,
std_dev: None,
}
}
}
#[derive(Debug, Clone)]
pub struct LoadTestConfig {
pub concurrency: usize,
pub total_requests: usize,
pub requests_per_worker: usize,
pub warmup_requests: usize,
pub request_delay: Option<Duration>,
pub timeout: Option<Duration>,
pub verbose: bool,
}
impl Default for LoadTestConfig {
fn default() -> Self {
Self {
concurrency: 10,
total_requests: 1000,
requests_per_worker: 0,
warmup_requests: 100,
request_delay: None,
timeout: None,
verbose: false,
}
}
}
impl LoadTestConfig {
pub fn new() -> Self {
Self::default()
}
pub fn concurrency(mut self, concurrency: usize) -> Self {
self.concurrency = concurrency;
self
}
pub fn total_requests(mut self, total: usize) -> Self {
self.total_requests = total;
self
}
pub fn requests_per_worker(mut self, count: usize) -> Self {
self.requests_per_worker = count;
self
}
pub fn warmup_requests(mut self, count: usize) -> Self {
self.warmup_requests = count;
self
}
pub fn request_delay(mut self, delay: Duration) -> Self {
self.request_delay = Some(delay);
self
}
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = Some(timeout);
self
}
pub fn verbose(mut self, verbose: bool) -> Self {
self.verbose = verbose;
self
}
}
#[derive(Debug, Clone)]
pub struct RequestResult {
pub success: bool,
pub latency: Duration,
pub error: Option<String>,
}
impl RequestResult {
pub fn success(latency: Duration) -> Self {
Self { success: true, latency, error: None }
}
pub fn error(latency: Duration, error: String) -> Self {
Self { success: false, latency, error: Some(error) }
}
}
fn calculate_latency_stats(latencies: &[Duration]) -> LatencyStats {
if latencies.is_empty() {
return LatencyStats::default();
}
let mut sorted = latencies.to_vec();
sorted.sort();
let count = sorted.len();
let total_ns: u128 = sorted.iter().map(|d| d.as_nanos()).sum();
let mean_ns = total_ns / count as u128;
let min = sorted[0];
let max = sorted[count - 1];
let mean = Duration::from_nanos(mean_ns as u64);
let median = sorted[count / 2];
let p50 = sorted[(count as f64 * 0.5) as usize];
let p90_idx = ((count as f64 * 0.9) as usize).min(count - 1);
let p95_idx = ((count as f64 * 0.95) as usize).min(count - 1);
let p99_idx = ((count as f64 * 0.99) as usize).min(count - 1);
let p90 = sorted[p90_idx];
let p95 = sorted[p95_idx];
let p99 = sorted[p99_idx];
let std_dev = if count >= 2 {
let mean_f64 = mean_ns as f64;
let variance: f64 = sorted.iter().map(|d| (d.as_nanos() as f64 - mean_f64).powi(2)).sum::<f64>() / (count - 1) as f64;
Some(Duration::from_nanos(variance.sqrt() as u64))
}
else {
None
};
LatencyStats { min, max, mean, median, p50, p90, p95, p99, std_dev }
}
pub struct SyncLoadTester {
config: LoadTestConfig,
}
impl SyncLoadTester {
pub fn new(config: LoadTestConfig) -> Self {
Self { config }
}
pub fn run<F>(&self, request_fn: F) -> LoadTestResult
where
F: Fn() -> RequestResult + Send + Sync + 'static,
{
let start_time = Instant::now();
let total_success = Arc::new(AtomicUsize::new(0));
let total_error = Arc::new(AtomicUsize::new(0));
let latencies = Arc::new(parking_lot::Mutex::new(Vec::new()));
let error_types = Arc::new(parking_lot::Mutex::new(HashMap::new()));
let concurrency = self.config.concurrency;
let requests_per_worker = if self.config.requests_per_worker > 0 {
self.config.requests_per_worker
}
else {
(self.config.total_requests + concurrency - 1) / concurrency
};
let request_fn = Arc::new(request_fn);
let config = self.config.clone();
let mut handles = Vec::with_capacity(concurrency);
for _ in 0..concurrency {
let request_fn = Arc::clone(&request_fn);
let total_success = Arc::clone(&total_success);
let total_error = Arc::clone(&total_error);
let latencies = Arc::clone(&latencies);
let error_types = Arc::clone(&error_types);
let config = config.clone();
let handle = std::thread::spawn(move || {
for i in 0..(requests_per_worker + config.warmup_requests / concurrency) {
let is_warmup = i < config.warmup_requests / concurrency;
let request_start = Instant::now();
let result = request_fn();
let latency = request_start.elapsed();
if !is_warmup {
latencies.lock().push(latency);
if result.success {
total_success.fetch_add(1, Ordering::Relaxed);
}
else {
total_error.fetch_add(1, Ordering::Relaxed);
if let Some(error) = result.error {
let mut errors = error_types.lock();
*errors.entry(error).or_insert(0) += 1;
}
}
}
if let Some(delay) = config.request_delay {
std::thread::sleep(delay);
}
}
});
handles.push(handle);
}
for handle in handles {
let _ = handle.join();
}
let total_time = start_time.elapsed();
let total_requests = total_success.load(Ordering::Relaxed) + total_error.load(Ordering::Relaxed);
let success_count = total_success.load(Ordering::Relaxed);
let error_count = total_error.load(Ordering::Relaxed);
let error_rate = if total_requests > 0 { error_count as f64 / total_requests as f64 } else { 0.0 };
let throughput = if total_time.as_secs_f64() > 0.0 { total_requests as f64 / total_time.as_secs_f64() } else { 0.0 };
let latency_stats = calculate_latency_stats(&latencies.lock());
let error_types = error_types.lock().clone();
LoadTestResult {
total_requests,
success_count,
error_count,
error_rate,
total_time,
throughput,
latency_stats,
error_types,
}
}
}
impl LoadTestResult {
pub fn to_string(&self) -> String {
format!(
"Load Test Result:\n\
Total Requests: {}\n\
Success: {}\n\
Errors: {}\n\
Error Rate: {:.2}%\n\
Total Time: {:.2}s\n\
Throughput: {:.2} req/s\n\
Latency:\n\
Min: {:?}\n\
Max: {:?}\n\
Mean: {:?}\n\
Median: {:?}\n\
P50: {:?}\n\
P90: {:?}\n\
P95: {:?}\n\
P99: {:?}\n\
Std Dev: {:?}\n\
Error Types: {:?}",
self.total_requests,
self.success_count,
self.error_count,
self.error_rate * 100.0,
self.total_time.as_secs_f64(),
self.throughput,
self.latency_stats.min,
self.latency_stats.max,
self.latency_stats.mean,
self.latency_stats.median,
self.latency_stats.p50,
self.latency_stats.p90,
self.latency_stats.p95,
self.latency_stats.p99,
self.latency_stats.std_dev,
self.error_types,
)
}
}