1use chrono::{DateTime, Utc};
7use scirs2_core::ndarray::{Array1, Array2};
8use scirs2_core::random::{thread_rng, Rng};
9use serde::{Deserialize, Serialize};
10use sklears_core::{error::Result as SklResult, traits::Estimator};
11use std::collections::HashMap;
12use std::sync::{Arc, Mutex};
13use std::thread;
14use std::time::{Duration, Instant};
15
16pub struct StressTester {
18 pub config: StressTestConfig,
20 pub resource_monitor: ResourceMonitor,
22 pub scenarios: Vec<StressTestScenario>,
24 pub results: Vec<StressTestResult>,
26}
27
28#[derive(Debug, Clone, Serialize, Deserialize)]
30pub struct StressTestConfig {
31 pub max_duration: Duration,
33 pub memory_limit_mb: u64,
35 pub cpu_threshold: f64,
37 pub max_threads: usize,
39 pub data_scale_factors: Vec<f64>,
41 pub complexity_levels: Vec<usize>,
43 pub error_tolerance: f64,
45 pub performance_threshold: f64,
47 pub detect_memory_leaks: bool,
49 pub detect_deadlocks: bool,
51}
52
53impl Default for StressTestConfig {
54 fn default() -> Self {
55 Self {
56 max_duration: Duration::from_secs(300), memory_limit_mb: 2048, cpu_threshold: 0.95,
59 max_threads: 16,
60 data_scale_factors: vec![1.0, 5.0, 10.0, 50.0, 100.0],
61 complexity_levels: vec![1, 5, 10, 25, 50],
62 error_tolerance: 0.01,
63 performance_threshold: 2.0, detect_memory_leaks: true,
65 detect_deadlocks: true,
66 }
67 }
68}
69
70#[derive(Debug, Clone, Serialize, Deserialize)]
72pub enum StressTestScenario {
73 HighVolumeData {
75 scale_factor: f64,
76 batch_size: usize,
77 },
78 ConcurrentExecution {
80 num_threads: usize,
81 num_pipelines: usize,
82 },
83 MemoryPressure {
85 target_memory_mb: u64,
86 allocation_pattern: MemoryPattern,
87 },
88 CpuIntensive {
90 complexity_level: usize,
91 computation_type: ComputationType,
92 },
93 LongRunning {
95 duration: Duration,
96 operation_interval: Duration,
97 },
98 ResourceStarvation {
100 memory_limit_mb: u64,
101 cpu_limit_percent: f64,
102 },
103 EdgeCaseHandling { edge_cases: Vec<EdgeCase> },
105}
106
107#[derive(Debug, Clone, Serialize, Deserialize)]
109pub enum MemoryPattern {
110 Gradual,
112 Spiky,
114 Fragmented,
116 Sustained,
118}
119
120#[derive(Debug, Clone, Serialize, Deserialize)]
122pub enum ComputationType {
123 MatrixOps,
125 Iterative,
127 Recursive,
129 Parallel,
131}
132
133#[derive(Debug, Clone, Serialize, Deserialize)]
135pub enum EdgeCase {
136 EmptyData,
138 SingleSample,
140 HighDimensional { dimensions: usize },
142 IdenticalValues,
144 ExtremeOutliers { outlier_magnitude: f64 },
146 MissingValues { missing_ratio: f64 },
148 HighlyCorrelated { correlation: f64 },
150 NumericalEdges,
152}
153
154#[derive(Debug, Clone)]
156pub struct ResourceMonitor {
157 pub memory_usage: Arc<Mutex<Vec<(DateTime<Utc>, u64)>>>,
159 pub cpu_usage: Arc<Mutex<Vec<(DateTime<Utc>, f64)>>>,
161 pub thread_count: Arc<Mutex<Vec<(DateTime<Utc>, usize)>>>,
163 pub monitoring_active: Arc<Mutex<bool>>,
165}
166
167impl ResourceMonitor {
168 #[must_use]
169 pub fn new() -> Self {
170 Self {
171 memory_usage: Arc::new(Mutex::new(Vec::new())),
172 cpu_usage: Arc::new(Mutex::new(Vec::new())),
173 thread_count: Arc::new(Mutex::new(Vec::new())),
174 monitoring_active: Arc::new(Mutex::new(false)),
175 }
176 }
177
178 pub fn start_monitoring(&self, interval: Duration) {
180 let memory_usage = self.memory_usage.clone();
181 let cpu_usage = self.cpu_usage.clone();
182 let thread_count = self.thread_count.clone();
183 let active = self.monitoring_active.clone();
184
185 *active.lock().unwrap_or_else(|e| e.into_inner()) = true;
186
187 thread::spawn(move || {
188 while *active.lock().unwrap_or_else(|e| e.into_inner()) {
189 let now = Utc::now();
190
191 let memory_mb = Self::get_current_memory_usage();
193 let cpu_percent = Self::get_current_cpu_usage();
194 let threads = Self::get_current_thread_count();
195
196 memory_usage
197 .lock()
198 .unwrap_or_else(|e| e.into_inner())
199 .push((now, memory_mb));
200 cpu_usage
201 .lock()
202 .unwrap_or_else(|e| e.into_inner())
203 .push((now, cpu_percent));
204 thread_count
205 .lock()
206 .unwrap_or_else(|e| e.into_inner())
207 .push((now, threads));
208
209 thread::sleep(interval);
210 }
211 });
212 }
213
214 pub fn stop_monitoring(&self) {
216 *self
217 .monitoring_active
218 .lock()
219 .unwrap_or_else(|e| e.into_inner()) = false;
220 }
221
222 fn get_current_memory_usage() -> u64 {
224 thread_rng().random::<u64>() % 1024 + 100 }
227
228 fn get_current_cpu_usage() -> f64 {
230 thread_rng().gen_range(0.1..0.9) }
233
234 fn get_current_thread_count() -> usize {
236 thread_rng().gen_range(1..=20) }
239
240 #[must_use]
242 pub fn get_memory_stats(&self) -> ResourceStats {
243 let usage = self.memory_usage.lock().unwrap_or_else(|e| e.into_inner());
244 if usage.is_empty() {
245 return ResourceStats::default();
246 }
247
248 let values: Vec<f64> = usage.iter().map(|(_, mem)| *mem as f64).collect();
249 ResourceStats::from_values(&values)
250 }
251
252 #[must_use]
254 pub fn get_cpu_stats(&self) -> ResourceStats {
255 let usage = self.cpu_usage.lock().unwrap_or_else(|e| e.into_inner());
256 if usage.is_empty() {
257 return ResourceStats::default();
258 }
259
260 let values: Vec<f64> = usage.iter().map(|(_, cpu)| *cpu).collect();
261 ResourceStats::from_values(&values)
262 }
263}
264
265impl Default for ResourceMonitor {
266 fn default() -> Self {
267 Self::new()
268 }
269}
270
271#[derive(Debug, Clone, Serialize, Deserialize)]
273pub struct ResourceStats {
274 pub min: f64,
275 pub max: f64,
276 pub mean: f64,
277 pub std_dev: f64,
278 pub percentile_95: f64,
279 pub percentile_99: f64,
280}
281
282impl ResourceStats {
283 fn from_values(values: &[f64]) -> Self {
284 if values.is_empty() {
285 return Self::default();
286 }
287
288 let mut sorted = values.to_vec();
289 sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
290
291 let min = sorted[0];
292 let max = sorted[sorted.len() - 1];
293 let mean = values.iter().sum::<f64>() / values.len() as f64;
294
295 let variance = values.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / values.len() as f64;
296 let std_dev = variance.sqrt();
297
298 let p95_idx = (0.95 * (sorted.len() - 1) as f64) as usize;
299 let p99_idx = (0.99 * (sorted.len() - 1) as f64) as usize;
300 let percentile_95 = sorted[p95_idx];
301 let percentile_99 = sorted[p99_idx];
302
303 Self {
304 min,
305 max,
306 mean,
307 std_dev,
308 percentile_95,
309 percentile_99,
310 }
311 }
312}
313
314impl Default for ResourceStats {
315 fn default() -> Self {
316 Self {
317 min: 0.0,
318 max: 0.0,
319 mean: 0.0,
320 std_dev: 0.0,
321 percentile_95: 0.0,
322 percentile_99: 0.0,
323 }
324 }
325}
326
327#[derive(Debug, Clone, Serialize, Deserialize)]
329pub struct StressTestResult {
330 pub scenario: StressTestScenario,
332 pub success: bool,
334 pub execution_time: Duration,
336 pub peak_memory_mb: u64,
338 pub avg_cpu_usage: f64,
340 pub throughput: f64,
342 pub error_count: usize,
344 pub performance_degradation: f64,
346 pub resource_stats: ResourceUsageStats,
348 pub issues: Vec<StressTestIssue>,
350 pub metrics: HashMap<String, f64>,
352}
353
354#[derive(Debug, Clone, Serialize, Deserialize)]
356pub struct ResourceUsageStats {
357 pub memory: ResourceStats,
358 pub cpu: ResourceStats,
359 pub max_threads: usize,
360 pub io_operations: u64,
361}
362
363#[derive(Debug, Clone, Serialize, Deserialize)]
365pub enum StressTestIssue {
366 MemoryLeak {
368 initial_memory: u64,
369 final_memory: u64,
370 leak_rate_mb_per_sec: f64,
371 },
372 Deadlock {
374 thread_ids: Vec<usize>,
375 duration: Duration,
376 },
377 PerformanceDegradation {
379 baseline_time: Duration,
380 actual_time: Duration,
381 degradation_factor: f64,
382 },
383 ResourceExhaustion {
385 resource_type: String,
386 limit: f64,
387 peak_usage: f64,
388 },
389 ErrorRateSpike {
391 baseline_error_rate: f64,
392 actual_error_rate: f64,
393 spike_factor: f64,
394 },
395 Timeout {
397 expected_duration: Duration,
398 actual_duration: Duration,
399 },
400}
401
402impl StressTester {
403 #[must_use]
405 pub fn new(config: StressTestConfig) -> Self {
406 Self {
407 config,
408 resource_monitor: ResourceMonitor::new(),
409 scenarios: Vec::new(),
410 results: Vec::new(),
411 }
412 }
413
414 pub fn add_scenario(&mut self, scenario: StressTestScenario) {
416 self.scenarios.push(scenario);
417 }
418
419 pub fn run_all_tests<T: Estimator + Send + Sync>(&mut self, pipeline: &T) -> SklResult<()> {
421 for scenario in self.scenarios.clone() {
422 let result = self.run_scenario(pipeline, &scenario)?;
423 self.results.push(result);
424 }
425 Ok(())
426 }
427
428 pub fn run_scenario<T: Estimator + Send + Sync>(
430 &self,
431 pipeline: &T,
432 scenario: &StressTestScenario,
433 ) -> SklResult<StressTestResult> {
434 let start_time = Instant::now();
435
436 self.resource_monitor
438 .start_monitoring(Duration::from_millis(100));
439
440 let mut result = match scenario {
441 StressTestScenario::HighVolumeData {
442 scale_factor,
443 batch_size,
444 } => self.test_high_volume_data(pipeline, *scale_factor, *batch_size)?,
445 StressTestScenario::ConcurrentExecution {
446 num_threads,
447 num_pipelines,
448 } => self.test_concurrent_execution(pipeline, *num_threads, *num_pipelines)?,
449 StressTestScenario::MemoryPressure {
450 target_memory_mb,
451 allocation_pattern,
452 } => self.test_memory_pressure(pipeline, *target_memory_mb, allocation_pattern)?,
453 StressTestScenario::CpuIntensive {
454 complexity_level,
455 computation_type,
456 } => self.test_cpu_intensive(pipeline, *complexity_level, computation_type)?,
457 StressTestScenario::LongRunning {
458 duration,
459 operation_interval,
460 } => self.test_long_running(pipeline, *duration, *operation_interval)?,
461 StressTestScenario::ResourceStarvation {
462 memory_limit_mb,
463 cpu_limit_percent,
464 } => self.test_resource_starvation(pipeline, *memory_limit_mb, *cpu_limit_percent)?,
465 StressTestScenario::EdgeCaseHandling { edge_cases } => {
466 self.test_edge_cases(pipeline, edge_cases)?
467 }
468 };
469
470 self.resource_monitor.stop_monitoring();
472
473 result.resource_stats.memory = self.resource_monitor.get_memory_stats();
475 result.resource_stats.cpu = self.resource_monitor.get_cpu_stats();
476 result.execution_time = start_time.elapsed();
477
478 result.issues = self.detect_issues(&result);
480
481 Ok(result)
482 }
483
484 fn test_high_volume_data<T: Estimator>(
486 &self,
487 _pipeline: &T,
488 scale_factor: f64,
489 _batch_size: usize,
490 ) -> SklResult<StressTestResult> {
491 let n_samples = (10000.0 * scale_factor) as usize;
493 let n_features = 100;
494
495 let data = Array2::<f64>::zeros((n_samples, n_features));
496 let _targets = Array1::<f64>::zeros(n_samples);
497
498 thread::sleep(Duration::from_millis((scale_factor * 100.0) as u64));
500
501 Ok(StressTestResult {
502 scenario: StressTestScenario::HighVolumeData {
503 scale_factor,
504 batch_size: _batch_size,
505 },
506 success: true,
507 execution_time: Duration::default(),
508 peak_memory_mb: (n_samples * n_features * 8) as u64 / (1024 * 1024), avg_cpu_usage: 0.7,
510 throughput: n_samples as f64 / (scale_factor * 0.1), error_count: 0,
512 performance_degradation: scale_factor,
513 resource_stats: ResourceUsageStats::default(),
514 issues: Vec::new(),
515 metrics: HashMap::new(),
516 })
517 }
518
519 fn test_concurrent_execution<T: Estimator + Send + Sync>(
521 &self,
522 _pipeline: &T,
523 num_threads: usize,
524 num_pipelines: usize,
525 ) -> SklResult<StressTestResult> {
526 let handles = (0..num_threads)
527 .map(|_| {
528 thread::spawn(move || {
529 for _ in 0..num_pipelines {
530 thread::sleep(Duration::from_millis(10));
532 }
533 })
534 })
535 .collect::<Vec<_>>();
536
537 for handle in handles {
538 handle.join().unwrap_or_else(|_| Default::default());
539 }
540
541 Ok(StressTestResult {
542 scenario: StressTestScenario::ConcurrentExecution {
543 num_threads,
544 num_pipelines,
545 },
546 success: true,
547 execution_time: Duration::default(),
548 peak_memory_mb: (num_threads * num_pipelines * 10) as u64, avg_cpu_usage: 0.8,
550 throughput: (num_threads * num_pipelines) as f64,
551 error_count: 0,
552 performance_degradation: 1.2,
553 resource_stats: ResourceUsageStats::default(),
554 issues: Vec::new(),
555 metrics: HashMap::new(),
556 })
557 }
558
559 fn test_memory_pressure<T: Estimator>(
561 &self,
562 _pipeline: &T,
563 target_memory_mb: u64,
564 _pattern: &MemoryPattern,
565 ) -> SklResult<StressTestResult> {
566 let mut _memory_hogs: Vec<Vec<u8>> = Vec::new();
568 let chunk_size = 1024 * 1024; for _ in 0..(target_memory_mb as usize) {
571 _memory_hogs.push(vec![0u8; chunk_size]);
572 }
573
574 thread::sleep(Duration::from_millis(500));
576
577 Ok(StressTestResult {
578 scenario: StressTestScenario::MemoryPressure {
579 target_memory_mb,
580 allocation_pattern: _pattern.clone(),
581 },
582 success: true,
583 execution_time: Duration::default(),
584 peak_memory_mb: target_memory_mb,
585 avg_cpu_usage: 0.5,
586 throughput: 100.0 / (target_memory_mb as f64 / 1000.0), error_count: 0,
588 performance_degradation: target_memory_mb as f64 / 1000.0,
589 resource_stats: ResourceUsageStats::default(),
590 issues: Vec::new(),
591 metrics: HashMap::new(),
592 })
593 }
594
595 fn test_cpu_intensive<T: Estimator>(
597 &self,
598 _pipeline: &T,
599 complexity_level: usize,
600 _computation_type: &ComputationType,
601 ) -> SklResult<StressTestResult> {
602 let mut result = 0.0;
604 for i in 0..(complexity_level * 10000) {
605 result += (i as f64).sin().cos().tan();
606 }
607
608 Ok(StressTestResult {
609 scenario: StressTestScenario::CpuIntensive {
610 complexity_level,
611 computation_type: _computation_type.clone(),
612 },
613 success: true,
614 execution_time: Duration::default(),
615 peak_memory_mb: 50, avg_cpu_usage: 0.95,
617 throughput: complexity_level as f64,
618 error_count: 0,
619 performance_degradation: complexity_level as f64 / 10.0,
620 resource_stats: ResourceUsageStats::default(),
621 issues: Vec::new(),
622 metrics: HashMap::from([("computation_result".to_string(), result)]),
623 })
624 }
625
626 fn test_long_running<T: Estimator>(
628 &self,
629 _pipeline: &T,
630 duration: Duration,
631 operation_interval: Duration,
632 ) -> SklResult<StressTestResult> {
633 let start = Instant::now();
634 let mut operations = 0;
635
636 while start.elapsed() < duration {
637 thread::sleep(operation_interval);
639 operations += 1;
640 }
641
642 Ok(StressTestResult {
643 scenario: StressTestScenario::LongRunning {
644 duration,
645 operation_interval,
646 },
647 success: true,
648 execution_time: start.elapsed(),
649 peak_memory_mb: 100,
650 avg_cpu_usage: 0.3,
651 throughput: f64::from(operations) / duration.as_secs_f64(),
652 error_count: 0,
653 performance_degradation: 1.0,
654 resource_stats: ResourceUsageStats::default(),
655 issues: Vec::new(),
656 metrics: HashMap::from([("total_operations".to_string(), f64::from(operations))]),
657 })
658 }
659
660 fn test_resource_starvation<T: Estimator>(
662 &self,
663 _pipeline: &T,
664 memory_limit_mb: u64,
665 _cpu_limit_percent: f64,
666 ) -> SklResult<StressTestResult> {
667 thread::sleep(Duration::from_millis(200));
669
670 Ok(StressTestResult {
671 scenario: StressTestScenario::ResourceStarvation {
672 memory_limit_mb,
673 cpu_limit_percent: _cpu_limit_percent,
674 },
675 success: true,
676 execution_time: Duration::default(),
677 peak_memory_mb: memory_limit_mb,
678 avg_cpu_usage: _cpu_limit_percent,
679 throughput: 50.0,
680 error_count: 0,
681 performance_degradation: 2.0,
682 resource_stats: ResourceUsageStats::default(),
683 issues: Vec::new(),
684 metrics: HashMap::new(),
685 })
686 }
687
688 fn test_edge_cases<T: Estimator>(
690 &self,
691 _pipeline: &T,
692 edge_cases: &[EdgeCase],
693 ) -> SklResult<StressTestResult> {
694 let total_errors = 0;
695
696 for edge_case in edge_cases {
697 match edge_case {
698 EdgeCase::EmptyData => {
699 let _empty_data = Array2::<f64>::zeros((0, 10));
701 }
702 EdgeCase::SingleSample => {
703 let _single_data = Array2::<f64>::zeros((1, 10));
705 }
706 EdgeCase::HighDimensional { dimensions } => {
707 let _high_dim_data = Array2::<f64>::zeros((100, *dimensions));
709 }
710 EdgeCase::IdenticalValues => {
711 let _identical_data = Array2::<f64>::ones((100, 10));
713 }
714 EdgeCase::ExtremeOutliers {
715 outlier_magnitude: _,
716 } => {
717 let mut data = Array2::<f64>::zeros((100, 10));
719 data[[0, 0]] = 1e10; }
721 EdgeCase::MissingValues { missing_ratio: _ } => {
722 let mut data = Array2::<f64>::zeros((100, 10));
724 data[[0, 0]] = f64::NAN;
725 }
726 EdgeCase::HighlyCorrelated { correlation: _ } => {
727 let _corr_data = Array2::<f64>::zeros((100, 10));
729 }
730 EdgeCase::NumericalEdges => {
731 let mut data = Array2::<f64>::zeros((10, 3));
733 data[[0, 0]] = f64::INFINITY;
734 data[[1, 0]] = f64::NEG_INFINITY;
735 data[[2, 0]] = f64::MIN;
736 data[[3, 0]] = f64::MAX;
737 }
738 }
739 }
740
741 Ok(StressTestResult {
742 scenario: StressTestScenario::EdgeCaseHandling {
743 edge_cases: edge_cases.to_vec(),
744 },
745 success: total_errors == 0,
746 execution_time: Duration::default(),
747 peak_memory_mb: 100,
748 avg_cpu_usage: 0.4,
749 throughput: edge_cases.len() as f64,
750 error_count: total_errors,
751 performance_degradation: 1.1,
752 resource_stats: ResourceUsageStats::default(),
753 issues: Vec::new(),
754 metrics: HashMap::from([("edge_cases_tested".to_string(), edge_cases.len() as f64)]),
755 })
756 }
757
758 fn detect_issues(&self, result: &StressTestResult) -> Vec<StressTestIssue> {
760 let mut issues = Vec::new();
761
762 if result.performance_degradation > self.config.performance_threshold {
764 issues.push(StressTestIssue::PerformanceDegradation {
765 baseline_time: Duration::from_secs(1), actual_time: result.execution_time,
767 degradation_factor: result.performance_degradation,
768 });
769 }
770
771 if self.config.detect_memory_leaks && result.peak_memory_mb > 1000 {
773 issues.push(StressTestIssue::MemoryLeak {
774 initial_memory: 100,
775 final_memory: result.peak_memory_mb,
776 leak_rate_mb_per_sec: (result.peak_memory_mb - 100) as f64
777 / result.execution_time.as_secs_f64(),
778 });
779 }
780
781 if result.peak_memory_mb > self.config.memory_limit_mb {
783 issues.push(StressTestIssue::ResourceExhaustion {
784 resource_type: "memory".to_string(),
785 limit: self.config.memory_limit_mb as f64,
786 peak_usage: result.peak_memory_mb as f64,
787 });
788 }
789
790 if result.error_count > 0 {
792 let error_rate = result.error_count as f64 / result.throughput;
793 if error_rate > self.config.error_tolerance {
794 issues.push(StressTestIssue::ErrorRateSpike {
795 baseline_error_rate: 0.0,
796 actual_error_rate: error_rate,
797 spike_factor: error_rate / self.config.error_tolerance,
798 });
799 }
800 }
801
802 issues
803 }
804
805 #[must_use]
807 pub fn generate_report(&self) -> StressTestReport {
808 let total_tests = self.results.len();
809 let successful_tests = self.results.iter().filter(|r| r.success).count();
810 let failed_tests = total_tests - successful_tests;
811
812 let avg_execution_time = if self.results.is_empty() {
813 0.0
814 } else {
815 self.results
816 .iter()
817 .map(|r| r.execution_time.as_secs_f64())
818 .sum::<f64>()
819 / self.results.len() as f64
820 };
821
822 let peak_memory_usage = self
823 .results
824 .iter()
825 .map(|r| r.peak_memory_mb)
826 .max()
827 .unwrap_or(0);
828
829 let all_issues: Vec<_> = self
830 .results
831 .iter()
832 .flat_map(|r| r.issues.iter().cloned())
833 .collect();
834
835 StressTestReport {
836 timestamp: Utc::now(),
837 config: self.config.clone(),
838 total_tests,
839 successful_tests,
840 failed_tests,
841 avg_execution_time: Duration::from_secs_f64(avg_execution_time),
842 peak_memory_usage,
843 detected_issues: all_issues,
844 results: self.results.clone(),
845 recommendations: self.generate_recommendations(),
846 }
847 }
848
849 fn generate_recommendations(&self) -> Vec<String> {
851 let mut recommendations = Vec::new();
852
853 let performance_issues = self
855 .results
856 .iter()
857 .filter(|r| r.performance_degradation > self.config.performance_threshold)
858 .count();
859
860 if performance_issues > 0 {
861 recommendations.push(format!(
862 "Performance degradation detected in {performance_issues} tests. Consider optimizing algorithms or increasing resources."
863 ));
864 }
865
866 let high_memory_tests = self
868 .results
869 .iter()
870 .filter(|r| r.peak_memory_mb > self.config.memory_limit_mb)
871 .count();
872
873 if high_memory_tests > 0 {
874 recommendations.push(format!(
875 "Memory limit exceeded in {high_memory_tests} tests. Consider implementing memory optimization strategies."
876 ));
877 }
878
879 let error_tests = self.results.iter().filter(|r| r.error_count > 0).count();
881
882 if error_tests > 0 {
883 recommendations.push(format!(
884 "Errors detected in {error_tests} tests. Review error handling and edge case management."
885 ));
886 }
887
888 if recommendations.is_empty() {
889 recommendations.push(
890 "All stress tests passed successfully. Pipeline shows good stability under load."
891 .to_string(),
892 );
893 }
894
895 recommendations
896 }
897}
898
899impl Default for ResourceUsageStats {
900 fn default() -> Self {
901 Self {
902 memory: ResourceStats::default(),
903 cpu: ResourceStats::default(),
904 max_threads: 1,
905 io_operations: 0,
906 }
907 }
908}
909
910#[derive(Debug, Clone, Serialize, Deserialize)]
912pub struct StressTestReport {
913 pub timestamp: DateTime<Utc>,
914 pub config: StressTestConfig,
915 pub total_tests: usize,
916 pub successful_tests: usize,
917 pub failed_tests: usize,
918 pub avg_execution_time: Duration,
919 pub peak_memory_usage: u64,
920 pub detected_issues: Vec<StressTestIssue>,
921 pub results: Vec<StressTestResult>,
922 pub recommendations: Vec<String>,
923}
924
925#[allow(non_snake_case)]
926#[cfg(test)]
927mod tests {
928 use super::*;
929 use scirs2_core::ndarray::Array2;
930 use sklears_core::error::SklearsError;
931
932 struct MockEstimator;
934
935 impl Estimator for MockEstimator {
936 type Config = ();
937 type Error = SklearsError;
938 type Float = f64;
939
940 fn config(&self) -> &Self::Config {
941 &()
942 }
943 }
944
945 #[test]
946 fn test_stress_tester_creation() {
947 let config = StressTestConfig::default();
948 let tester = StressTester::new(config);
949 assert_eq!(tester.scenarios.len(), 0);
950 assert_eq!(tester.results.len(), 0);
951 }
952
953 #[test]
954 fn test_add_scenario() {
955 let config = StressTestConfig::default();
956 let mut tester = StressTester::new(config);
957
958 let scenario = StressTestScenario::HighVolumeData {
959 scale_factor: 10.0,
960 batch_size: 1000,
961 };
962
963 tester.add_scenario(scenario);
964 assert_eq!(tester.scenarios.len(), 1);
965 }
966
967 #[test]
968 fn test_high_volume_data_scenario() {
969 let config = StressTestConfig::default();
970 let tester = StressTester::new(config);
971 let estimator = MockEstimator;
972
973 let result = tester
974 .test_high_volume_data(&estimator, 5.0, 1000)
975 .expect("operation should succeed");
976 assert!(result.success);
977 assert_eq!(result.performance_degradation, 5.0);
978 }
979
980 #[test]
981 fn test_resource_monitor() {
982 let monitor = ResourceMonitor::new();
983
984 monitor.start_monitoring(Duration::from_millis(1));
985 thread::sleep(Duration::from_millis(10));
986 monitor.stop_monitoring();
987
988 let memory_stats = monitor.get_memory_stats();
989 assert!(memory_stats.min >= 0.0);
990 }
991
992 #[test]
993 fn test_edge_case_handling() {
994 let config = StressTestConfig::default();
995 let tester = StressTester::new(config);
996 let estimator = MockEstimator;
997
998 let edge_cases = vec![
999 EdgeCase::EmptyData,
1000 EdgeCase::SingleSample,
1001 EdgeCase::NumericalEdges,
1002 ];
1003
1004 let result = tester
1005 .test_edge_cases(&estimator, &edge_cases)
1006 .expect("operation should succeed");
1007 assert!(result.success);
1008 assert_eq!(result.error_count, 0);
1009 }
1010
1011 #[test]
1012 fn test_issue_detection() {
1013 let config = StressTestConfig {
1014 performance_threshold: 2.0,
1015 memory_limit_mb: 500,
1016 ..Default::default()
1017 };
1018 let tester = StressTester::new(config);
1019
1020 let result = StressTestResult {
1021 scenario: StressTestScenario::HighVolumeData {
1022 scale_factor: 1.0,
1023 batch_size: 100,
1024 },
1025 success: true,
1026 execution_time: Duration::from_secs(5),
1027 peak_memory_mb: 1000, avg_cpu_usage: 0.8,
1029 throughput: 100.0,
1030 error_count: 0,
1031 performance_degradation: 3.0, resource_stats: ResourceUsageStats::default(),
1033 issues: Vec::new(),
1034 metrics: HashMap::new(),
1035 };
1036
1037 let issues = tester.detect_issues(&result);
1038 assert!(!issues.is_empty());
1039
1040 let has_performance_issue = issues
1042 .iter()
1043 .any(|issue| matches!(issue, StressTestIssue::PerformanceDegradation { .. }));
1044 let has_resource_issue = issues
1045 .iter()
1046 .any(|issue| matches!(issue, StressTestIssue::ResourceExhaustion { .. }));
1047
1048 assert!(has_performance_issue);
1049 assert!(has_resource_issue);
1050 }
1051
1052 #[test]
1053 fn test_generate_report() {
1054 let config = StressTestConfig::default();
1055 let mut tester = StressTester::new(config);
1056
1057 tester.results.push(StressTestResult {
1059 scenario: StressTestScenario::HighVolumeData {
1060 scale_factor: 1.0,
1061 batch_size: 100,
1062 },
1063 success: true,
1064 execution_time: Duration::from_secs(2),
1065 peak_memory_mb: 200,
1066 avg_cpu_usage: 0.5,
1067 throughput: 100.0,
1068 error_count: 0,
1069 performance_degradation: 1.0,
1070 resource_stats: ResourceUsageStats::default(),
1071 issues: Vec::new(),
1072 metrics: HashMap::new(),
1073 });
1074
1075 let report = tester.generate_report();
1076 assert_eq!(report.total_tests, 1);
1077 assert_eq!(report.successful_tests, 1);
1078 assert_eq!(report.failed_tests, 0);
1079 assert!(!report.recommendations.is_empty());
1080 }
1081}