#![allow(clippy::all)]
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::time::sleep;
use trustformers::error::TrustformersError;
type Result<T> = std::result::Result<T, TrustformersError>;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StressTestConfig {
pub concurrency: usize,
pub total_requests: u64,
pub duration: Option<Duration>,
pub request_delay: Option<Duration>,
pub memory_limit_mb: Option<u64>,
pub cpu_limit_percent: Option<f32>,
pub request_timeout: Duration,
pub ramp_up_duration: Option<Duration>,
pub ramp_down_duration: Option<Duration>,
pub enable_memory_leak_detection: bool,
pub enable_performance_degradation_detection: bool,
pub acceptable_error_rate: f32,
pub scenario: StressTestScenario,
}
impl Default for StressTestConfig {
fn default() -> Self {
Self {
concurrency: 10,
total_requests: 1000,
duration: None,
request_delay: Some(Duration::from_millis(100)),
memory_limit_mb: Some(4096), cpu_limit_percent: Some(80.0),
request_timeout: Duration::from_secs(30),
ramp_up_duration: Some(Duration::from_secs(30)),
ramp_down_duration: Some(Duration::from_secs(10)),
enable_memory_leak_detection: true,
enable_performance_degradation_detection: true,
acceptable_error_rate: 5.0, scenario: StressTestScenario::TextGeneration,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum StressTestScenario {
TextGeneration,
TextClassification,
Mixed,
MemoryPressure,
HighThroughput,
LongRunning,
BurstTraffic,
ResourceExhaustion,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StressTestResults {
pub total_requests: u64,
pub successful_requests: u64,
pub failed_requests: u64,
pub error_rate: f32,
pub duration: Duration,
pub throughput: f64,
pub average_latency: Duration,
pub median_latency: Duration,
pub p95_latency: Duration,
pub p99_latency: Duration,
pub max_latency: Duration,
pub memory_stats: MemoryStats,
pub cpu_stats: CpuStats,
pub performance_degradation_detected: bool,
pub memory_leak_detected: bool,
pub test_passed: bool,
pub failure_reasons: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryStats {
pub initial_memory_mb: u64,
pub peak_memory_mb: u64,
pub final_memory_mb: u64,
pub memory_growth_mb: i64,
pub memory_samples: Vec<MemorySample>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CpuStats {
pub average_cpu_percent: f32,
pub peak_cpu_percent: f32,
pub cpu_samples: Vec<CpuSample>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemorySample {
pub timestamp: Duration,
pub memory_mb: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CpuSample {
pub timestamp: Duration,
pub cpu_percent: f32,
}
#[derive(Debug)]
pub struct StressTestMetrics {
pub total_requests: AtomicU64,
pub successful_requests: AtomicU64,
pub failed_requests: AtomicU64,
pub latency_samples: Arc<tokio::sync::Mutex<Vec<Duration>>>,
pub memory_samples: Arc<tokio::sync::Mutex<Vec<MemorySample>>>,
pub cpu_samples: Arc<tokio::sync::Mutex<Vec<CpuSample>>>,
pub start_time: Instant,
}
impl Default for StressTestMetrics {
fn default() -> Self {
Self::new()
}
}
impl StressTestMetrics {
pub fn new() -> Self {
Self {
total_requests: AtomicU64::new(0),
successful_requests: AtomicU64::new(0),
failed_requests: AtomicU64::new(0),
latency_samples: Arc::new(tokio::sync::Mutex::new(Vec::new())),
memory_samples: Arc::new(tokio::sync::Mutex::new(Vec::new())),
cpu_samples: Arc::new(tokio::sync::Mutex::new(Vec::new())),
start_time: Instant::now(),
}
}
pub fn record_request(&self, success: bool, latency: Duration) {
self.total_requests.fetch_add(1, Ordering::Relaxed);
if success {
self.successful_requests.fetch_add(1, Ordering::Relaxed);
} else {
self.failed_requests.fetch_add(1, Ordering::Relaxed);
}
let latency_samples = self.latency_samples.clone();
tokio::spawn(async move {
let mut samples = latency_samples.lock().await;
samples.push(latency);
});
}
pub async fn record_memory_usage(&self, memory_mb: u64) {
let mut samples = self.memory_samples.lock().await;
samples.push(MemorySample {
timestamp: self.start_time.elapsed(),
memory_mb,
});
}
pub async fn record_cpu_usage(&self, cpu_percent: f32) {
let mut samples = self.cpu_samples.lock().await;
samples.push(CpuSample {
timestamp: self.start_time.elapsed(),
cpu_percent,
});
}
pub async fn get_results(&self) -> StressTestResults {
let total_requests = self.total_requests.load(Ordering::Relaxed);
let successful_requests = self.successful_requests.load(Ordering::Relaxed);
let failed_requests = self.failed_requests.load(Ordering::Relaxed);
let error_rate = if total_requests > 0 {
(failed_requests as f32 / total_requests as f32) * 100.0
} else {
0.0
};
let duration = self.start_time.elapsed();
let throughput = total_requests as f64 / duration.as_secs_f64();
let mut latency_samples = self.latency_samples.lock().await;
latency_samples.sort();
let average_latency = if !latency_samples.is_empty() {
let sum: Duration = latency_samples.iter().sum();
sum / latency_samples.len() as u32
} else {
Duration::from_millis(0)
};
let median_latency = if !latency_samples.is_empty() {
let mid = latency_samples.len() / 2;
latency_samples[mid]
} else {
Duration::from_millis(0)
};
let p95_latency = if !latency_samples.is_empty() {
let idx = (latency_samples.len() as f64 * 0.95) as usize;
latency_samples[idx.min(latency_samples.len() - 1)]
} else {
Duration::from_millis(0)
};
let p99_latency = if !latency_samples.is_empty() {
let idx = (latency_samples.len() as f64 * 0.99) as usize;
latency_samples[idx.min(latency_samples.len() - 1)]
} else {
Duration::from_millis(0)
};
let max_latency = latency_samples.iter().max().copied().unwrap_or(Duration::from_millis(0));
let memory_samples = self.memory_samples.lock().await;
let memory_stats = if !memory_samples.is_empty() {
let initial_memory =
memory_samples.first().expect("operation failed in test").memory_mb;
let final_memory = memory_samples.last().expect("operation failed in test").memory_mb;
let peak_memory = memory_samples.iter().map(|s| s.memory_mb).max().unwrap_or(0);
MemoryStats {
initial_memory_mb: initial_memory,
peak_memory_mb: peak_memory,
final_memory_mb: final_memory,
memory_growth_mb: final_memory as i64 - initial_memory as i64,
memory_samples: memory_samples.clone(),
}
} else {
MemoryStats {
initial_memory_mb: 0,
peak_memory_mb: 0,
final_memory_mb: 0,
memory_growth_mb: 0,
memory_samples: Vec::new(),
}
};
let cpu_samples = self.cpu_samples.lock().await;
let cpu_stats = if !cpu_samples.is_empty() {
let average_cpu =
cpu_samples.iter().map(|s| s.cpu_percent).sum::<f32>() / cpu_samples.len() as f32;
let peak_cpu = cpu_samples.iter().map(|s| s.cpu_percent).fold(0.0f32, |a, b| a.max(b));
CpuStats {
average_cpu_percent: average_cpu,
peak_cpu_percent: peak_cpu,
cpu_samples: cpu_samples.clone(),
}
} else {
CpuStats {
average_cpu_percent: 0.0,
peak_cpu_percent: 0.0,
cpu_samples: Vec::new(),
}
};
let performance_degradation_detected =
self.detect_performance_degradation(&latency_samples).await;
let memory_leak_detected = self.detect_memory_leak(&memory_stats).await;
let mut failure_reasons = Vec::new();
let mut test_passed = true;
if error_rate > 5.0 {
failure_reasons.push(format!("High error rate: {:.2}%", error_rate));
test_passed = false;
}
if performance_degradation_detected {
failure_reasons.push("Performance degradation detected".to_string());
test_passed = false;
}
if memory_leak_detected {
failure_reasons.push("Memory leak detected".to_string());
test_passed = false;
}
if throughput < 1.0 {
failure_reasons.push(format!("Poor throughput: {:.2} req/s", throughput));
test_passed = false;
}
StressTestResults {
total_requests,
successful_requests,
failed_requests,
error_rate,
duration,
throughput,
average_latency,
median_latency,
p95_latency,
p99_latency,
max_latency,
memory_stats,
cpu_stats,
performance_degradation_detected,
memory_leak_detected,
test_passed,
failure_reasons,
}
}
async fn detect_performance_degradation(&self, latencies: &[Duration]) -> bool {
if latencies.len() < 20 {
return false; }
let split_point = latencies.len() / 3;
let early_latencies: Vec<f64> = latencies[..split_point]
.iter()
.map(|d| d.as_secs_f64() * 1000.0) .collect();
let late_latencies: Vec<f64> = latencies[latencies.len() - split_point..]
.iter()
.map(|d| d.as_secs_f64() * 1000.0)
.collect();
let early_avg: f64 = early_latencies.iter().sum::<f64>() / early_latencies.len() as f64;
let late_avg: f64 = late_latencies.iter().sum::<f64>() / late_latencies.len() as f64;
let degradation_threshold = 1.5; late_avg > early_avg * degradation_threshold
}
async fn detect_memory_leak(&self, memory_stats: &MemoryStats) -> bool {
if memory_stats.memory_samples.len() < 10 {
return false; }
let samples = &memory_stats.memory_samples;
let sample_count = samples.len();
let n = sample_count as f64;
let x_sum: f64 = (0..sample_count).map(|i| i as f64).sum();
let y_sum: f64 = samples.iter().map(|s| s.memory_mb as f64).sum();
let xy_sum: f64 =
samples.iter().enumerate().map(|(i, s)| i as f64 * s.memory_mb as f64).sum();
let x2_sum: f64 = (0..sample_count).map(|i| (i as f64).powi(2)).sum();
let slope = (n * xy_sum - x_sum * y_sum) / (n * x2_sum - x_sum * x_sum);
let leak_threshold = 1.0; slope > leak_threshold
}
}
pub struct StressTestRunner {
config: StressTestConfig,
metrics: Arc<StressTestMetrics>,
}
impl StressTestRunner {
pub fn new(config: StressTestConfig) -> Self {
Self {
config,
metrics: Arc::new(StressTestMetrics::new()),
}
}
pub async fn run(&self) -> Result<StressTestResults> {
println!("Starting stress test with config: {:?}", self.config);
self.start_resource_monitoring().await;
match self.config.scenario {
StressTestScenario::TextGeneration => self.run_text_generation_stress().await?,
StressTestScenario::TextClassification => self.run_text_classification_stress().await?,
StressTestScenario::Mixed => self.run_mixed_workload_stress().await?,
StressTestScenario::MemoryPressure => self.run_memory_pressure_stress().await?,
StressTestScenario::HighThroughput => self.run_high_throughput_stress().await?,
StressTestScenario::LongRunning => self.run_long_running_stress().await?,
StressTestScenario::BurstTraffic => self.run_burst_traffic_stress().await?,
StressTestScenario::ResourceExhaustion => self.run_resource_exhaustion_stress().await?,
}
let results = self.metrics.get_results().await;
println!("Stress test completed. Results: {:?}", results);
Ok(results)
}
async fn start_resource_monitoring(&self) {
let metrics = self.metrics.clone();
let max_duration = self.config.duration.unwrap_or(Duration::from_secs(60));
tokio::spawn(async move {
let start_time = Instant::now();
while start_time.elapsed() < max_duration {
let memory_mb = get_memory_usage_mb().await;
metrics.record_memory_usage(memory_mb).await;
sleep(Duration::from_millis(500)).await;
}
});
let metrics = self.metrics.clone();
tokio::spawn(async move {
let start_time = Instant::now();
while start_time.elapsed() < max_duration {
let cpu_percent = get_cpu_usage_percent().await;
metrics.record_cpu_usage(cpu_percent).await;
sleep(Duration::from_millis(500)).await;
}
});
}
async fn run_text_generation_stress(&self) -> Result<()> {
println!("Running text generation stress test...");
let test_inputs = vec![
"The future of AI is",
"Once upon a time",
"In a world where",
"The key to success",
"Explain the concept of",
];
let mut tasks = Vec::new();
for _i in 0..self.config.concurrency {
let metrics = self.metrics.clone();
let config = self.config.clone();
let inputs = test_inputs.clone();
let task = tokio::spawn(async move {
let _thread_id = _i;
let mut requests_sent = 0;
loop {
if let Some(duration) = config.duration {
if metrics.start_time.elapsed() > duration {
break;
}
} else if metrics.total_requests.load(Ordering::Relaxed)
>= config.total_requests
{
break;
}
let input = &inputs[requests_sent % inputs.len()];
let start = Instant::now();
let result = Self::make_text_generation_request(input).await;
let latency = start.elapsed();
metrics.record_request(result.is_ok(), latency);
requests_sent += 1;
if let Some(delay) = config.request_delay {
sleep(delay).await;
}
}
});
tasks.push(task);
}
for task in tasks {
task.await.expect("async operation failed");
}
Ok(())
}
async fn run_text_classification_stress(&self) -> Result<()> {
println!("Running text classification stress test...");
let test_inputs = vec![
"This is a great product!",
"I hate this service.",
"The weather is nice today.",
"This movie was boring.",
"I love programming in Rust.",
];
let mut tasks = Vec::new();
for _i in 0..self.config.concurrency {
let metrics = self.metrics.clone();
let config = self.config.clone();
let inputs = test_inputs.clone();
let task = tokio::spawn(async move {
let mut requests_sent = 0;
loop {
if let Some(duration) = config.duration {
if metrics.start_time.elapsed() > duration {
break;
}
} else if metrics.total_requests.load(Ordering::Relaxed)
>= config.total_requests
{
break;
}
let input = &inputs[requests_sent % inputs.len()];
let start = Instant::now();
let result = Self::make_text_classification_request(input).await;
let latency = start.elapsed();
metrics.record_request(result.is_ok(), latency);
requests_sent += 1;
if let Some(delay) = config.request_delay {
sleep(delay).await;
}
}
});
tasks.push(task);
}
for task in tasks {
task.await.expect("async operation failed");
}
Ok(())
}
async fn run_mixed_workload_stress(&self) -> Result<()> {
println!("Running mixed workload stress test...");
let mut tasks = Vec::new();
for _i in 0..self.config.concurrency {
let metrics = self.metrics.clone();
let config = self.config.clone();
let task = tokio::spawn(async move {
let mut requests_sent = 0;
loop {
if let Some(duration) = config.duration {
if metrics.start_time.elapsed() > duration {
break;
}
} else if metrics.total_requests.load(Ordering::Relaxed)
>= config.total_requests
{
break;
}
let start = Instant::now();
let result = if requests_sent % 2 == 0 {
Self::make_text_generation_request("Hello world").await
} else {
Self::make_text_classification_request("This is a test").await
};
let latency = start.elapsed();
metrics.record_request(result.is_ok(), latency);
requests_sent += 1;
if let Some(delay) = config.request_delay {
sleep(delay).await;
}
}
});
tasks.push(task);
}
for task in tasks {
task.await.expect("async operation failed");
}
Ok(())
}
async fn run_memory_pressure_stress(&self) -> Result<()> {
println!("Running memory pressure stress test...");
let mut tasks = Vec::new();
for _i in 0..self.config.concurrency {
let metrics = self.metrics.clone();
let config = self.config.clone();
let task = tokio::spawn(async move {
let mut _requests_sent = 0;
loop {
if let Some(duration) = config.duration {
if metrics.start_time.elapsed() > duration {
break;
}
} else if metrics.total_requests.load(Ordering::Relaxed)
>= config.total_requests
{
break;
}
let large_input = "A".repeat(10000);
let start = Instant::now();
let result = Self::make_text_generation_request(&large_input).await;
let latency = start.elapsed();
metrics.record_request(result.is_ok(), latency);
_requests_sent += 1;
if let Some(delay) = config.request_delay {
sleep(delay).await;
}
}
});
tasks.push(task);
}
for task in tasks {
task.await.expect("async operation failed");
}
Ok(())
}
async fn run_high_throughput_stress(&self) -> Result<()> {
println!("Running high throughput stress test...");
let mut config = self.config.clone();
config.request_delay = None;
let mut tasks = Vec::new();
for _i in 0..config.concurrency {
let metrics = self.metrics.clone();
let config = config.clone();
let task = tokio::spawn(async move {
let mut _requests_sent = 0;
loop {
if let Some(duration) = config.duration {
if metrics.start_time.elapsed() > duration {
break;
}
} else if metrics.total_requests.load(Ordering::Relaxed)
>= config.total_requests
{
break;
}
let start = Instant::now();
let result = Self::make_text_classification_request("test").await;
let latency = start.elapsed();
metrics.record_request(result.is_ok(), latency);
_requests_sent += 1;
}
});
tasks.push(task);
}
for task in tasks {
task.await.expect("async operation failed");
}
Ok(())
}
async fn run_long_running_stress(&self) -> Result<()> {
println!("Running long running stress test...");
self.run_text_generation_stress().await
}
async fn run_burst_traffic_stress(&self) -> Result<()> {
println!("Running burst traffic stress test...");
let burst_duration = Duration::from_secs(2);
let rest_duration = Duration::from_secs(1);
let total_duration = self.config.duration.unwrap_or(Duration::from_secs(10));
let start_time = Instant::now();
let mut in_burst = true;
let mut last_switch = start_time;
while start_time.elapsed() < total_duration {
if in_burst {
let mut tasks = Vec::new();
for _i in 0..self.config.concurrency {
let metrics = self.metrics.clone();
let task = tokio::spawn(async move {
let start = Instant::now();
let result = Self::make_text_classification_request("burst test").await;
let latency = start.elapsed();
metrics.record_request(result.is_ok(), latency);
});
tasks.push(task);
}
for task in tasks {
task.await.expect("async operation failed");
}
if last_switch.elapsed() > burst_duration {
in_burst = false;
last_switch = Instant::now();
}
} else {
sleep(Duration::from_millis(50)).await;
if last_switch.elapsed() > rest_duration {
in_burst = true;
last_switch = Instant::now();
}
}
}
Ok(())
}
async fn run_resource_exhaustion_stress(&self) -> Result<()> {
println!("Running resource exhaustion stress test...");
let max_concurrency = self.config.concurrency * 2; let mut current_concurrency = 1;
while current_concurrency <= max_concurrency {
println!("Testing with concurrency: {}", current_concurrency);
let mut tasks = Vec::new();
for _i in 0..current_concurrency {
let metrics = self.metrics.clone();
let task = tokio::spawn(async move {
for _ in 0..2 {
let start = Instant::now();
let result = Self::make_text_generation_request("resource test").await;
let latency = start.elapsed();
metrics.record_request(result.is_ok(), latency);
}
});
tasks.push(task);
}
for task in tasks {
task.await.expect("async operation failed");
}
current_concurrency += 1;
sleep(Duration::from_millis(100)).await; }
Ok(())
}
async fn make_text_generation_request(input: &str) -> Result<String> {
sleep(Duration::from_millis(50 + (input.len() as u64 * 2))).await;
if input.contains("fail") {
return Err(TrustformersError::Pipeline {
message: "Simulated failure".to_string(),
pipeline_type: "text-generation".to_string(),
suggestion: Some("Try again".to_string()),
recovery_actions: vec![],
});
}
Ok(format!("Generated response for: {}", input))
}
async fn make_text_classification_request(input: &str) -> Result<String> {
sleep(Duration::from_millis(20 + (input.len() as u64))).await;
if input.contains("error") {
return Err(TrustformersError::Pipeline {
message: "Simulated error".to_string(),
pipeline_type: "text-classification".to_string(),
suggestion: Some("Try again".to_string()),
recovery_actions: vec![],
});
}
Ok(format!("Classification result for: {}", input))
}
}
async fn get_memory_usage_mb() -> u64 {
1024 }
async fn get_cpu_usage_percent() -> f32 {
25.0 }
impl StressTestConfig {
pub fn light() -> Self {
Self {
concurrency: 2,
total_requests: 10,
duration: Some(Duration::from_secs(5)),
request_delay: Some(Duration::from_millis(100)),
..Default::default()
}
}
pub fn medium() -> Self {
Self {
concurrency: 4,
total_requests: 20,
duration: Some(Duration::from_secs(10)),
request_delay: Some(Duration::from_millis(50)),
..Default::default()
}
}
pub fn heavy() -> Self {
Self {
concurrency: 50,
total_requests: 10000,
duration: Some(Duration::from_secs(1800)),
request_delay: Some(Duration::from_millis(50)),
..Default::default()
}
}
pub fn extreme() -> Self {
Self {
concurrency: 100,
total_requests: 100000,
duration: Some(Duration::from_secs(3600)),
request_delay: None,
memory_limit_mb: Some(8192), cpu_limit_percent: Some(95.0),
..Default::default()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_light_stress_text_generation() {
let config = StressTestConfig::light();
let runner = StressTestRunner::new(config);
let results = runner.run().await.expect("async operation failed");
assert!(results.error_rate < 10.0); assert!(results.total_requests > 0);
assert!(results.successful_requests > 0);
}
#[tokio::test]
async fn test_medium_stress_text_classification() {
let mut config = StressTestConfig::medium();
config.scenario = StressTestScenario::TextClassification;
let runner = StressTestRunner::new(config);
let results = runner.run().await.expect("async operation failed");
assert!(results.error_rate < 5.0); assert!(results.throughput > 0.0);
assert!(results.average_latency < Duration::from_secs(1));
}
#[tokio::test]
async fn test_mixed_workload_stress() {
let mut config = StressTestConfig::light();
config.scenario = StressTestScenario::Mixed;
let runner = StressTestRunner::new(config);
let results = runner.run().await.expect("async operation failed");
assert!(results.total_requests > 0);
assert!(results.successful_requests > 0);
}
#[tokio::test]
async fn test_memory_pressure_stress() {
let mut config = StressTestConfig::light();
config.scenario = StressTestScenario::MemoryPressure;
config.memory_limit_mb = Some(512);
let runner = StressTestRunner::new(config);
let results = runner.run().await.expect("async operation failed");
assert!(results.total_requests > 0);
assert!(results.memory_stats.peak_memory_mb > 0);
}
#[tokio::test]
async fn test_high_throughput_stress() {
let mut config = StressTestConfig::light();
config.scenario = StressTestScenario::HighThroughput;
config.request_delay = None;
let runner = StressTestRunner::new(config);
let results = runner.run().await.expect("async operation failed");
assert!(results.throughput > 0.0);
assert!(results.total_requests > 0);
}
#[tokio::test]
async fn test_burst_traffic_stress() {
let mut config = StressTestConfig::light();
config.scenario = StressTestScenario::BurstTraffic;
config.duration = Some(Duration::from_secs(5));
let runner = StressTestRunner::new(config);
let results = runner.run().await.expect("async operation failed");
assert!(results.total_requests > 0);
assert!(results.duration <= Duration::from_secs(10)); }
#[tokio::test]
async fn test_stress_test_metrics() {
let metrics = StressTestMetrics::new();
metrics.record_request(true, Duration::from_millis(100));
metrics.record_request(false, Duration::from_millis(200));
metrics.record_memory_usage(1024).await;
metrics.record_cpu_usage(50.0).await;
let results = metrics.get_results().await;
assert_eq!(results.total_requests, 2);
assert_eq!(results.successful_requests, 1);
assert_eq!(results.failed_requests, 1);
assert_eq!(results.error_rate, 50.0);
assert_eq!(results.memory_stats.memory_samples.len(), 1);
assert_eq!(results.cpu_stats.cpu_samples.len(), 1);
}
#[tokio::test]
async fn test_stress_test_config_presets() {
let light = StressTestConfig::light();
assert_eq!(light.concurrency, 2);
assert_eq!(light.total_requests, 10);
let medium = StressTestConfig::medium();
assert_eq!(medium.concurrency, 4);
assert_eq!(medium.total_requests, 20);
let heavy = StressTestConfig::heavy();
assert_eq!(heavy.concurrency, 50);
assert_eq!(heavy.total_requests, 10000);
let extreme = StressTestConfig::extreme();
assert_eq!(extreme.concurrency, 100);
assert_eq!(extreme.total_requests, 100000);
}
}