1use crate::error::{CoreError, CoreResult, ErrorContext};
12use crate::testing::{TestConfig, TestResult};
13use std::sync::Arc;
14use std::thread;
15use std::time::{Duration, Instant};
16
17#[derive(Debug, Clone)]
19pub struct StressTestConfig {
20 pub max_memory: usize,
22 pub thread_count: usize,
24 pub duration: Duration,
26 pub memory_step: usize,
28 pub cpu_intensity: usize,
30 pub detect_leaks: bool,
32 pub performance_threshold: f64,
34}
35
36impl Default for StressTestConfig {
37 fn default() -> Self {
38 Self {
39 max_memory: 1024 * 1024 * 1024, thread_count: std::thread::available_parallelism()
41 .map(|n| n.get())
42 .unwrap_or(4),
43 duration: Duration::from_secs(60),
44 memory_step: 1024 * 1024, cpu_intensity: 1000000,
46 detect_leaks: true,
47 performance_threshold: 1000.0,
48 }
49 }
50}
51
52impl StressTestConfig {
53 pub fn new() -> Self {
55 Self::default()
56 }
57
58 pub fn with_max_memory(mut self, memory: usize) -> Self {
60 self.max_memory = memory;
61 self
62 }
63
64 pub fn with_thread_count(mut self, count: usize) -> Self {
66 self.thread_count = count;
67 self
68 }
69
70 pub fn with_duration(mut self, duration: Duration) -> Self {
72 self.duration = duration;
73 self
74 }
75
76 pub fn with_memory_step(mut self, step: usize) -> Self {
78 self.memory_step = step;
79 self
80 }
81
82 pub fn with_cpu_intensity(mut self, intensity: usize) -> Self {
84 self.cpu_intensity = intensity;
85 self
86 }
87
88 pub fn with_leak_detection(mut self, detect: bool) -> Self {
90 self.detect_leaks = detect;
91 self
92 }
93
94 pub fn with_performance_threshold(mut self, threshold: f64) -> Self {
96 self.performance_threshold = threshold;
97 self
98 }
99}
100
101#[derive(Debug, Clone)]
103pub struct StressTestResult {
104 pub test_name: String,
106 pub peak_memory: usize,
108 pub ops_per_second: f64,
110 pub total_operations: usize,
112 pub duration: Duration,
114 pub memory_leaks_detected: bool,
116 pub performance_threshold_met: bool,
118 pub error: Option<String>,
120 pub metrics: std::collections::HashMap<String, f64>,
122}
123
124impl StressTestResult {
125 pub fn new(testname: String) -> Self {
127 Self {
128 test_name: testname,
129 peak_memory: 0,
130 ops_per_second: 0.0,
131 total_operations: 0,
132 duration: Duration::from_secs(0),
133 memory_leaks_detected: false,
134 performance_threshold_met: false,
135 error: None,
136 metrics: std::collections::HashMap::new(),
137 }
138 }
139
140 pub fn with_peak_memory(mut self, memory: usize) -> Self {
142 self.peak_memory = memory;
143 self
144 }
145
146 pub fn with_ops_per_second(mut self, ops: f64) -> Self {
148 self.ops_per_second = ops;
149 self
150 }
151
152 pub fn with_total_operations(mut self, ops: usize) -> Self {
154 self.total_operations = ops;
155 self
156 }
157
158 pub fn with_duration(mut self, duration: Duration) -> Self {
160 self.duration = duration;
161 self
162 }
163
164 pub fn with_memory_leaks(mut self, detected: bool) -> Self {
166 self.memory_leaks_detected = detected;
167 self
168 }
169
170 pub fn with_performance_threshold(mut self, met: bool) -> Self {
172 self.performance_threshold_met = met;
173 self
174 }
175
176 pub fn witherror(mut self, error: String) -> Self {
178 self.error = Some(error);
179 self
180 }
181
182 pub fn with_metric(mut self, name: String, value: f64) -> Self {
184 self.metrics.insert(name, value);
185 self
186 }
187}
188
189pub struct MemoryStressTester {
191 config: StressTestConfig,
192}
193
194impl MemoryStressTester {
195 pub fn new(config: StressTestConfig) -> Self {
197 Self { config }
198 }
199
200 pub fn test_progressive_allocation(&self) -> CoreResult<StressTestResult> {
202 let start_time = Instant::now();
203 let mut result = StressTestResult::new("progressive_allocation".to_string());
204 let mut allocations = Vec::new();
205 let mut current_memory = 0;
206
207 let initial_memory = self.get_memory_usage()?;
209
210 while current_memory < self.config.max_memory {
211 let chunk_size = self
213 .config
214 .memory_step
215 .min(self.config.max_memory - current_memory);
216
217 match self.allocate_chunk(chunk_size) {
218 Ok(chunk) => {
219 allocations.push(chunk);
220 current_memory += chunk_size;
221
222 if let Ok(memory) = self.get_memory_usage() {
224 result.peak_memory = result.peak_memory.max(memory - initial_memory);
225 }
226 }
227 Err(e) => {
228 result = result.witherror(format!(
229 "Allocation failed at {} bytes: {:?}",
230 current_memory, e
231 ));
232 break;
233 }
234 }
235
236 if start_time.elapsed() > self.config.duration {
238 break;
239 }
240 }
241
242 drop(allocations);
244
245 if self.config.detect_leaks {
247 let final_memory = self.get_memory_usage()?;
248 let leak_detected = final_memory > initial_memory + 1024 * 1024; result = result.with_memory_leaks(leak_detected);
250 }
251
252 result = result
253 .with_duration(start_time.elapsed())
254 .with_total_operations(current_memory / self.config.memory_step);
255
256 Ok(result)
257 }
258
259 pub fn test_fragmented_allocation(&self) -> CoreResult<StressTestResult> {
261 let start_time = Instant::now();
262 let mut result = StressTestResult::new("fragmented_allocation".to_string());
263 let mut allocations = Vec::new();
264 let mut operations = 0;
265
266 while start_time.elapsed() < self.config.duration {
268 for _ in 0..10 {
270 if let Ok(chunk) = self.allocate_chunk(1024) {
271 allocations.push(chunk);
272 operations += 1;
273 }
274 }
275
276 let mut i = 0;
278 allocations.retain(|_| {
279 i += 1;
280 i % 2 == 0
281 });
282
283 if let Ok(memory) = self.get_memory_usage() {
285 result.peak_memory = result.peak_memory.max(memory);
286 }
287 }
288
289 drop(allocations);
291
292 let ops_per_second = operations as f64 / start_time.elapsed().as_secs_f64();
293 let threshold_met = ops_per_second >= self.config.performance_threshold;
294
295 result = result
296 .with_duration(start_time.elapsed())
297 .with_total_operations(operations)
298 .with_ops_per_second(ops_per_second)
299 .with_performance_threshold(threshold_met);
300
301 Ok(result)
302 }
303
304 fn allocate_chunk(&self, size: usize) -> CoreResult<Vec<u8>> {
306 let mut chunk = Vec::new();
307 chunk.try_reserve(size).map_err(|e| {
308 CoreError::MemoryError(ErrorContext::new(format!(
309 "Failed to allocate {} bytes: {}",
310 size, e
311 )))
312 })?;
313
314 chunk.resize(size, 42);
316 Ok(chunk)
317 }
318
319 fn get_memory_usage(&self) -> CoreResult<usize> {
321 #[cfg(target_os = "linux")]
322 {
323 use std::fs;
324 let status = fs::read_to_string("/proc/self/status").map_err(|e| {
325 CoreError::IoError(ErrorContext::new(format!(
326 "Failed to read memory status: {}",
327 e
328 )))
329 })?;
330
331 for line in status.lines() {
332 if line.starts_with("VmRSS:") {
333 let parts: Vec<&str> = line.split_whitespace().collect();
334 if parts.len() >= 2 {
335 let kb: usize = parts[1].parse().map_err(|e| {
336 CoreError::ValidationError(crate::error::ErrorContext::new(format!(
337 "Failed to parse memory: {}",
338 e
339 )))
340 })?;
341 return Ok(kb * 1024);
342 }
343 }
344 }
345 }
346
347 Ok(0)
349 }
350}
351
352pub struct CpuStressTester {
354 config: StressTestConfig,
355}
356
357impl CpuStressTester {
358 pub fn new(config: StressTestConfig) -> Self {
360 Self { config }
361 }
362
363 pub fn test_cpu_intensive_workload(&self) -> CoreResult<StressTestResult> {
365 let start_time = Instant::now();
366 let mut result = StressTestResult::new("cpu_intensive_workload".to_string());
367 let mut total_operations = 0;
368
369 while start_time.elapsed() < self.config.duration {
370 let operations = self.cpu_intensive_computation(self.config.cpu_intensity)?;
372 total_operations += operations;
373 }
374
375 let ops_per_second = total_operations as f64 / start_time.elapsed().as_secs_f64();
376 let threshold_met = ops_per_second >= self.config.performance_threshold;
377
378 result = result
379 .with_duration(start_time.elapsed())
380 .with_total_operations(total_operations)
381 .with_ops_per_second(ops_per_second)
382 .with_performance_threshold(threshold_met);
383
384 Ok(result)
385 }
386
387 pub fn test_concurrent_cpu_workload(&self) -> CoreResult<StressTestResult> {
389 let start_time = Instant::now();
390 let mut result = StressTestResult::new("concurrent_cpu_workload".to_string());
391
392 let config = Arc::new(self.config.clone());
393 let results = Arc::new(std::sync::Mutex::new(Vec::new()));
394
395 let mut handles = Vec::new();
397 for thread_id in 0..self.config.thread_count {
398 let config = Arc::clone(&config);
399 let results = Arc::clone(&results);
400
401 let handle = thread::spawn(move || {
402 let mut operations = 0;
403 while start_time.elapsed() < config.duration {
404 if let Ok(ops) =
405 Self::cpu_intensive_computation_static(config.cpu_intensity / 10)
406 {
407 operations += ops;
408 }
409 }
410
411 if let Ok(mut results) = results.lock() {
412 results.push((thread_id, operations));
413 }
414 });
415
416 handles.push(handle);
417 }
418
419 for handle in handles {
421 handle.join().map_err(|_| {
422 CoreError::ComputationError(crate::error::ErrorContext::new("Thread join failed"))
423 })?;
424 }
425
426 let results_guard = results.lock().map_err(|_| {
428 CoreError::ComputationError(crate::error::ErrorContext::new("Failed to lock results"))
429 })?;
430
431 let total_operations: usize = results_guard.iter().map(|(_, ops)| ops).sum();
432 let ops_per_second = total_operations as f64 / start_time.elapsed().as_secs_f64();
433 let threshold_met = ops_per_second >= self.config.performance_threshold;
434
435 result = result
436 .with_duration(start_time.elapsed())
437 .with_total_operations(total_operations)
438 .with_ops_per_second(ops_per_second)
439 .with_performance_threshold(threshold_met)
440 .with_metric("threads_used".to_string(), self.config.thread_count as f64);
441
442 Ok(result)
443 }
444
445 fn cpu_intensive_computation(&self, iterations: usize) -> CoreResult<usize> {
447 Self::cpu_intensive_computation_static(iterations)
448 }
449
450 fn cpu_intensive_computation_static(iterations: usize) -> CoreResult<usize> {
452 let mut sum = 0u64;
453 let mut operations = 0;
454
455 for i in 0..iterations {
456 sum = sum.wrapping_add((i as u64).wrapping_mul(17).wrapping_add(23));
458 sum = sum.wrapping_mul(31);
459 operations += 1;
460 }
461
462 if sum == 0 {
464 return Err(CoreError::ComputationError(
465 crate::error::ErrorContext::new("Unexpected computation result"),
466 ));
467 }
468
469 Ok(operations)
470 }
471}
472
473pub struct ConcurrencyStressTester {
475 config: StressTestConfig,
476}
477
478impl ConcurrencyStressTester {
479 pub fn new(config: StressTestConfig) -> Self {
481 Self { config }
482 }
483
484 pub fn test_shared_resource_contention(&self) -> CoreResult<StressTestResult> {
486 let start_time = Instant::now();
487 let mut result = StressTestResult::new("shared_resource_contention".to_string());
488
489 let shared_counter = Arc::new(std::sync::Mutex::new(0u64));
490 let config = Arc::new(self.config.clone());
491 let results = Arc::new(std::sync::Mutex::new(Vec::new()));
492
493 let mut handles = Vec::new();
495 for thread_id in 0..self.config.thread_count {
496 let counter = Arc::clone(&shared_counter);
497 let config = Arc::clone(&config);
498 let results = Arc::clone(&results);
499
500 let handle = thread::spawn(move || {
501 let mut operations = 0;
502 while start_time.elapsed() < config.duration {
503 if let Ok(mut counter) = counter.lock() {
505 *counter += 1;
506 operations += 1;
507
508 for _ in 0..100 {
510 *counter = counter.wrapping_add(1).wrapping_sub(1);
511 }
512 }
513
514 thread::yield_now();
516 }
517
518 if let Ok(mut results) = results.lock() {
519 results.push((thread_id, operations));
520 }
521 });
522
523 handles.push(handle);
524 }
525
526 for handle in handles {
528 handle.join().map_err(|_| {
529 CoreError::ComputationError(crate::error::ErrorContext::new("Thread join failed"))
530 })?;
531 }
532
533 let results_guard = results.lock().map_err(|_| {
535 CoreError::ComputationError(crate::error::ErrorContext::new("Failed to lock results"))
536 })?;
537
538 let total_operations: usize = results_guard.iter().map(|(_, ops)| ops).sum();
539 let ops_per_second = total_operations as f64 / start_time.elapsed().as_secs_f64();
540 let threshold_met = ops_per_second >= self.config.performance_threshold;
541
542 let final_counter = *shared_counter.lock().map_err(|_| {
544 CoreError::ComputationError(crate::error::ErrorContext::new("Failed to lock counter"))
545 })?;
546
547 result = result
548 .with_duration(start_time.elapsed())
549 .with_total_operations(total_operations)
550 .with_ops_per_second(ops_per_second)
551 .with_performance_threshold(threshold_met)
552 .with_metric("final_counter_value".to_string(), final_counter as f64)
553 .with_metric("threads_used".to_string(), self.config.thread_count as f64);
554
555 Ok(result)
556 }
557
558 pub fn test_lock_free_performance(&self) -> CoreResult<StressTestResult> {
560 let start_time = Instant::now();
561 let mut result = StressTestResult::new("lock_free_performance".to_string());
562
563 let atomic_counter = Arc::new(std::sync::atomic::AtomicU64::new(0));
564 let config = Arc::new(self.config.clone());
565 let results = Arc::new(std::sync::Mutex::new(Vec::new()));
566
567 let mut handles = Vec::new();
569 for thread_id in 0..self.config.thread_count {
570 let counter = Arc::clone(&atomic_counter);
571 let config = Arc::clone(&config);
572 let results = Arc::clone(&results);
573
574 let handle = thread::spawn(move || {
575 let mut operations = 0;
576 while start_time.elapsed() < config.duration {
577 counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
579 operations += 1;
580
581 let old_value = counter.load(std::sync::atomic::Ordering::Relaxed);
583 counter.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
584 counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
585 operations += 2;
586 }
587
588 if let Ok(mut results) = results.lock() {
589 results.push((thread_id, operations));
590 }
591 });
592
593 handles.push(handle);
594 }
595
596 for handle in handles {
598 handle.join().map_err(|_| {
599 CoreError::ComputationError(crate::error::ErrorContext::new("Thread join failed"))
600 })?;
601 }
602
603 let results_guard = results.lock().map_err(|_| {
605 CoreError::ComputationError(crate::error::ErrorContext::new("Failed to lock results"))
606 })?;
607
608 let total_operations: usize = results_guard.iter().map(|(_, ops)| ops).sum();
609 let ops_per_second = total_operations as f64 / start_time.elapsed().as_secs_f64();
610 let threshold_met = ops_per_second >= self.config.performance_threshold;
611
612 let final_counter = atomic_counter.load(std::sync::atomic::Ordering::Relaxed);
613
614 result = result
615 .with_duration(start_time.elapsed())
616 .with_total_operations(total_operations)
617 .with_ops_per_second(ops_per_second)
618 .with_performance_threshold(threshold_met)
619 .with_metric("final_atomic_value".to_string(), final_counter as f64)
620 .with_metric("threads_used".to_string(), self.config.thread_count as f64);
621
622 Ok(result)
623 }
624}
625
626pub struct StressTestUtils;
628
629impl StressTestUtils {
630 pub fn create_stress_test_suite(name: &str, config: TestConfig) -> crate::testing::TestSuite {
632 let mut suite = crate::testing::TestSuite::new(name, config);
633 let stress_config = StressTestConfig::default()
634 .with_duration(Duration::from_secs(10)) .with_max_memory(64 * 1024 * 1024); let stress_config_clone1 = stress_config.clone();
639 suite.add_test("memory_progressive_allocation", move |_runner| {
640 let tester = MemoryStressTester::new(stress_config_clone1.clone());
641 let result = tester.test_progressive_allocation()?;
642
643 if let Some(error) = result.error {
644 return Ok(TestResult::failure(
645 result.duration,
646 result.total_operations,
647 error,
648 ));
649 }
650
651 Ok(
652 TestResult::success(std::time::Duration::from_secs(1), result.total_operations)
653 .with_memory_usage(result.peak_memory),
654 )
655 });
656
657 let stress_config_clone2 = stress_config.clone();
658 suite.add_test("memory_fragmented_allocation", move |_runner| {
659 let tester = MemoryStressTester::new(stress_config_clone2.clone());
660 let result = tester.test_fragmented_allocation()?;
661
662 if let Some(error) = result.error {
663 return Ok(TestResult::failure(
664 result.duration,
665 result.total_operations,
666 error,
667 ));
668 }
669
670 Ok(
671 TestResult::success(std::time::Duration::from_secs(1), result.total_operations)
672 .with_memory_usage(result.peak_memory),
673 )
674 });
675
676 let stress_config_clone = stress_config.clone();
678 suite.add_test("cpu_intensive_workload", move |_runner| {
679 let tester = CpuStressTester::new(stress_config_clone.clone());
680 let result = tester.test_cpu_intensive_workload()?;
681
682 if let Some(error) = result.error {
683 return Ok(TestResult::failure(
684 result.duration,
685 result.total_operations,
686 error,
687 ));
688 }
689
690 Ok(TestResult::success(
691 std::time::Duration::from_secs(1),
692 result.total_operations,
693 ))
694 });
695
696 let stress_config_clone2 = stress_config.clone();
697 suite.add_test("concurrent_cpu_workload", move |_runner| {
698 let tester = CpuStressTester::new(stress_config_clone2.clone());
699 let result = tester.test_concurrent_cpu_workload()?;
700
701 if let Some(error) = result.error {
702 return Ok(TestResult::failure(
703 result.duration,
704 result.total_operations,
705 error,
706 ));
707 }
708
709 Ok(TestResult::success(
710 std::time::Duration::from_secs(1),
711 result.total_operations,
712 ))
713 });
714
715 let stress_config_clone3 = stress_config.clone();
717 suite.add_test("shared_resource_contention", move |_runner| {
718 let tester = ConcurrencyStressTester::new(stress_config_clone3.clone());
719 let result = tester.test_shared_resource_contention()?;
720
721 if let Some(error) = result.error {
722 return Ok(TestResult::failure(
723 result.duration,
724 result.total_operations,
725 error,
726 ));
727 }
728
729 Ok(TestResult::success(
730 std::time::Duration::from_secs(1),
731 result.total_operations,
732 ))
733 });
734
735 let stress_config_clone4 = stress_config.clone();
736 suite.add_test("lock_free_performance", move |_runner| {
737 let tester = ConcurrencyStressTester::new(stress_config_clone4.clone());
738 let result = tester.test_lock_free_performance()?;
739
740 if let Some(error) = result.error {
741 return Ok(TestResult::failure(
742 result.duration,
743 result.total_operations,
744 error,
745 ));
746 }
747
748 Ok(TestResult::success(
749 std::time::Duration::from_secs(1),
750 result.total_operations,
751 ))
752 });
753
754 suite
755 }
756}
757
758#[cfg(test)]
759mod tests {
760 use super::*;
761
762 #[test]
763 fn test_stress_config() {
764 let config = StressTestConfig::new()
765 .with_max_memory(512 * 1024 * 1024)
766 .with_thread_count(8)
767 .with_duration(Duration::from_secs(30))
768 .with_cpu_intensity(500000);
769
770 assert_eq!(config.max_memory, 512 * 1024 * 1024);
771 assert_eq!(config.thread_count, 8);
772 assert_eq!(config.duration, Duration::from_secs(30));
773 assert_eq!(config.cpu_intensity, 500000);
774 }
775
776 #[test]
777 fn test_memory_stress_tester() {
778 let config = StressTestConfig::default()
779 .with_max_memory(1024 * 1024) .with_duration(Duration::from_millis(100));
781
782 let tester = MemoryStressTester::new(config);
783
784 let result = tester.test_progressive_allocation();
786 assert!(result.is_ok());
787 }
788
789 #[test]
790 fn test_cpu_stress_tester() {
791 let config = StressTestConfig::default()
792 .with_cpu_intensity(1000)
793 .with_duration(Duration::from_millis(100));
794
795 let tester = CpuStressTester::new(config);
796
797 let result = tester.test_cpu_intensive_workload();
799 assert!(result.is_ok());
800
801 let result = result.expect("Operation failed");
802 assert!(result.total_operations > 0);
803 }
804
805 #[test]
806 fn test_concurrency_stress_tester() {
807 let config = StressTestConfig::default()
808 .with_thread_count(2)
809 .with_duration(Duration::from_millis(100));
810
811 let tester = ConcurrencyStressTester::new(config);
812
813 let result = tester.test_shared_resource_contention();
815 assert!(result.is_ok());
816
817 let result = result.expect("Operation failed");
818 assert!(result.total_operations > 0);
819 }
820}