1use crate::error::{OptimError, Result};
8use crate::performance_regression_detector::{
9 EnvironmentInfo, MetricType, MetricValue, PerformanceMeasurement, TestConfiguration,
10};
11use serde::{Deserialize, Serialize};
12use std::collections::HashMap;
13use std::path::PathBuf;
14use std::process::{Command, Stdio};
15use std::time::{Duration, Instant, SystemTime};
16
17use super::config::{CiCdPlatform, TestExecutionConfig, TestIsolationLevel};
18
19#[derive(Debug, Clone)]
21pub struct PerformanceTestSuite {
22 pub test_cases: Vec<PerformanceTestCase>,
24 pub config: TestSuiteConfig,
26 pub context: Option<CiCdContext>,
28 pub results: Vec<CiCdTestResult>,
30}
31
32#[derive(Debug, Clone, Serialize, Deserialize)]
34pub struct PerformanceTestCase {
35 pub name: String,
37 pub category: TestCategory,
39 pub executor: TestExecutor,
41 pub parameters: HashMap<String, String>,
43 pub baseline: Option<BaselineMetrics>,
45 pub timeout: Option<u64>,
47 pub iterations: usize,
49 pub warmup_iterations: usize,
51 pub dependencies: Vec<String>,
53 pub tags: Vec<String>,
55 pub environment_requirements: EnvironmentRequirements,
57 pub custom_config: HashMap<String, String>,
59}
60
61#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
63pub enum TestCategory {
64 Unit,
66 Integration,
68 System,
70 Load,
72 Stress,
74 Endurance,
76 Spike,
78 Volume,
80 Security,
82 Regression,
84 Benchmark,
86 Custom(String),
88}
89
90#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
92pub enum TestExecutor {
93 Criterion,
95 Custom(String),
97 Shell,
99 Docker { image: String, options: Vec<String> },
101 ExternalTool { tool: String, args: Vec<String> },
103 RustBinary { binary: String, args: Vec<String> },
105 Python { script: String, args: Vec<String> },
107}
108
109#[derive(Debug, Clone, Serialize, Deserialize)]
111pub struct TestSuiteConfig {
112 pub include_unit: bool,
114 pub include_integration: bool,
116 pub include_stress: bool,
118 pub include_load: bool,
120 pub include_security: bool,
122 pub default_timeout: u64,
124 pub parallel_execution: ParallelExecutionConfig,
126 pub resource_monitoring: ResourceMonitoringConfig,
128 pub filtering: TestFilteringConfig,
130 pub retry_config: TestRetryConfig,
132}
133
134#[derive(Debug, Clone, Serialize, Deserialize)]
136pub struct ParallelExecutionConfig {
137 pub enabled: bool,
139 pub max_concurrent: usize,
141 pub thread_pool_size: Option<usize>,
143 pub grouping_strategy: TestGroupingStrategy,
145 pub resource_allocation: ResourceAllocationConfig,
147}
148
149#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
151pub enum TestGroupingStrategy {
152 ByCategory,
154 ByExecutionTime,
156 ByResourceRequirements,
158 None,
160 Custom(String),
162}
163
164#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct ResourceAllocationConfig {
167 pub cpu_cores: Option<usize>,
169 pub memory_limit_mb: Option<usize>,
171 pub disk_limit_mb: Option<usize>,
173 pub network_limit_mbps: Option<f64>,
175}
176
177#[derive(Debug, Clone, Serialize, Deserialize)]
179pub struct ResourceMonitoringConfig {
180 pub monitor_cpu: bool,
182 pub monitor_memory: bool,
184 pub monitor_disk_io: bool,
186 pub monitor_network: bool,
188 pub monitoring_frequency_ms: u64,
190 pub alert_thresholds: ResourceAlertThresholds,
192}
193
194#[derive(Debug, Clone, Serialize, Deserialize)]
196pub struct ResourceAlertThresholds {
197 pub cpu_threshold: f64,
199 pub memory_threshold: f64,
201 pub disk_threshold: f64,
203 pub network_threshold: f64,
205}
206
207#[derive(Debug, Clone, Serialize, Deserialize, Default)]
209pub struct TestFilteringConfig {
210 pub include_categories: Vec<TestCategory>,
212 pub exclude_categories: Vec<TestCategory>,
214 pub include_tags: Vec<String>,
216 pub exclude_tags: Vec<String>,
218 pub include_patterns: Vec<String>,
220 pub exclude_patterns: Vec<String>,
222 pub platform_specific: bool,
224}
225
226#[derive(Debug, Clone, Serialize, Deserialize)]
228pub struct TestRetryConfig {
229 pub enabled: bool,
231 pub max_retries: u32,
233 pub retry_delay_sec: u64,
235 pub backoff_multiplier: f64,
237 pub retry_on_failures: Vec<TestFailureType>,
239}
240
241#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
243pub enum TestFailureType {
244 Timeout,
246 ResourceExhaustion,
248 Network,
250 TransientError,
252 EnvironmentSetup,
254 All,
256}
257
258#[derive(Debug, Clone, Serialize, Deserialize, Default)]
260pub struct EnvironmentRequirements {
261 pub os: Option<String>,
263 pub architecture: Option<String>,
265 pub min_cpu_cores: Option<usize>,
267 pub min_memory_mb: Option<usize>,
269 pub required_env_vars: Vec<String>,
271 pub dependencies: Vec<SoftwareDependency>,
273 pub network_access: NetworkAccessRequirements,
275 pub file_permissions: Vec<FilePermissionRequirement>,
277}
278
279#[derive(Debug, Clone, Serialize, Deserialize)]
281pub struct SoftwareDependency {
282 pub name: String,
284 pub version: Option<String>,
286 pub source: DependencySource,
288 pub optional: bool,
290}
291
292#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
294pub enum DependencySource {
295 System,
297 Cargo,
299 Npm,
301 Pip,
303 Apt,
305 Yum,
307 Homebrew,
309 Custom(String),
311}
312
313#[derive(Debug, Clone, Serialize, Deserialize, Default)]
315pub struct NetworkAccessRequirements {
316 pub internet_access: bool,
318 pub required_ports: Vec<u16>,
320 pub required_hosts: Vec<String>,
322 pub max_latency_ms: Option<u64>,
324 pub min_bandwidth_mbps: Option<f64>,
326}
327
328#[derive(Debug, Clone, Serialize, Deserialize)]
330pub struct FilePermissionRequirement {
331 pub path: String,
333 pub permissions: u32,
335 pub readable: bool,
337 pub writable: bool,
339 pub executable: bool,
341}
342
343#[derive(Debug, Clone, Serialize, Deserialize)]
345pub struct BaselineMetrics {
346 pub execution_time: Option<MetricBaseline>,
348 pub memory_usage: Option<MetricBaseline>,
350 pub cpu_usage: Option<MetricBaseline>,
352 pub throughput: Option<MetricBaseline>,
354 pub custom_metrics: HashMap<String, MetricBaseline>,
356}
357
358#[derive(Debug, Clone, Serialize, Deserialize)]
360pub struct MetricBaseline {
361 pub expected_value: f64,
363 pub variance_threshold: f64,
365 pub upper_bound: Option<f64>,
367 pub lower_bound: Option<f64>,
369 pub unit: String,
371}
372
373#[derive(Debug, Clone, Serialize, Deserialize)]
375pub struct CiCdTestResult {
376 pub test_name: String,
378 pub status: TestExecutionStatus,
380 pub start_time: SystemTime,
382 pub end_time: Option<SystemTime>,
384 pub duration: Option<Duration>,
386 pub measurements: Vec<PerformanceMeasurement>,
388 pub error_message: Option<String>,
390 pub output: String,
392 pub resource_usage: ResourceUsageReport,
394 pub metadata: TestExecutionMetadata,
396 pub regression_analysis: Option<RegressionAnalysisResult>,
398}
399
400#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
402pub enum TestExecutionStatus {
403 Passed,
405 Failed,
407 Skipped,
409 TimedOut,
411 Error,
413 Pending,
415 Running,
417}
418
419#[derive(Debug, Clone, Serialize, Deserialize)]
421pub struct ResourceUsageReport {
422 pub peak_memory_mb: f64,
424 pub avg_cpu_percent: f64,
426 pub peak_cpu_percent: f64,
428 pub disk_io_mb: f64,
430 pub network_usage_mb: f64,
432 pub timeline: Vec<ResourceSnapshot>,
434}
435
436#[derive(Debug, Clone, Serialize, Deserialize)]
438pub struct ResourceSnapshot {
439 pub timestamp: SystemTime,
441 pub memory_mb: f64,
443 pub cpu_percent: f64,
445 pub disk_io_mbps: f64,
447 pub network_mbps: f64,
449}
450
451#[derive(Debug, Clone, Serialize, Deserialize)]
453pub struct TestExecutionMetadata {
454 pub executor: TestExecutor,
456 pub parameters: HashMap<String, String>,
458 pub environment: EnvironmentInfo,
460 pub git_info: Option<GitInfo>,
462 pub ci_context: Option<CiCdContext>,
464 pub test_config: TestConfiguration,
466}
467
468#[derive(Debug, Clone, Serialize, Deserialize)]
470pub struct GitInfo {
471 pub commit_hash: String,
473 pub branch: String,
475 pub commit_message: Option<String>,
477 pub author: Option<String>,
479 pub commit_time: Option<SystemTime>,
481 pub repository_url: Option<String>,
483 pub is_clean: bool,
485}
486
487#[derive(Debug, Clone, Serialize, Deserialize)]
489pub struct CiCdContext {
490 pub platform: CiCdPlatform,
492 pub build_id: String,
494 pub build_number: Option<u64>,
496 pub trigger: TriggerEvent,
498 pub environment_vars: HashMap<String, String>,
500 pub build_url: Option<String>,
502 pub pull_request: Option<PullRequestInfo>,
504 pub triggered_by: Option<String>,
506}
507
508#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
510pub enum TriggerEvent {
511 Push,
513 PullRequest,
515 Release,
517 Schedule,
519 Manual,
521 Api,
523 Webhook,
525}
526
527#[derive(Debug, Clone, Serialize, Deserialize)]
529pub struct PullRequestInfo {
530 pub number: u64,
532 pub source_branch: String,
534 pub target_branch: String,
536 pub title: String,
538 pub author: String,
540 pub url: Option<String>,
542}
543
544#[derive(Debug, Clone, Serialize, Deserialize)]
546pub struct RegressionAnalysisResult {
547 pub regression_detected: bool,
549 pub confidence: f64,
551 pub affected_metrics: Vec<String>,
553 pub performance_change_percent: f64,
555 pub statistical_significance: f64,
557 pub recommendations: Vec<String>,
559}
560
561impl PerformanceTestSuite {
562 pub fn new(config: TestSuiteConfig) -> Result<Self> {
564 Ok(Self {
565 test_cases: Vec::new(),
566 config,
567 context: None,
568 results: Vec::new(),
569 })
570 }
571
572 pub fn add_test_case(&mut self, test_case: PerformanceTestCase) {
574 self.test_cases.push(test_case);
575 }
576
577 pub fn set_context(&mut self, context: CiCdContext) {
579 self.context = Some(context);
580 }
581
582 pub fn execute(&mut self) -> Result<Vec<CiCdTestResult>> {
584 let filtered_tests = self.filter_test_cases()?;
586
587 let results = if self.config.parallel_execution.enabled {
589 self.execute_parallel(&filtered_tests)?
590 } else {
591 self.execute_sequential(&filtered_tests)?
592 };
593
594 self.results = results.clone();
595 Ok(results)
596 }
597
598 fn filter_test_cases(&self) -> Result<Vec<&PerformanceTestCase>> {
600 let mut filtered = Vec::new();
601
602 for test_case in &self.test_cases {
603 if self.should_include_test_case(test_case) {
604 filtered.push(test_case);
605 }
606 }
607
608 Ok(filtered)
609 }
610
611 fn should_include_test_case(&self, test_case: &PerformanceTestCase) -> bool {
613 let filtering = &self.config.filtering;
614
615 if !filtering.include_categories.is_empty()
617 && !filtering.include_categories.contains(&test_case.category)
618 {
619 return false;
620 }
621
622 if filtering.exclude_categories.contains(&test_case.category) {
624 return false;
625 }
626
627 if !filtering.include_tags.is_empty() {
629 let has_included_tag = filtering
630 .include_tags
631 .iter()
632 .any(|tag| test_case.tags.contains(tag));
633 if !has_included_tag {
634 return false;
635 }
636 }
637
638 for excluded_tag in &filtering.exclude_tags {
640 if test_case.tags.contains(excluded_tag) {
641 return false;
642 }
643 }
644
645 if !filtering.include_patterns.is_empty() {
647 let matches_pattern = filtering
648 .include_patterns
649 .iter()
650 .any(|pattern| test_case.name.contains(pattern));
651 if !matches_pattern {
652 return false;
653 }
654 }
655
656 for excluded_pattern in &filtering.exclude_patterns {
657 if test_case.name.contains(excluded_pattern) {
658 return false;
659 }
660 }
661
662 true
663 }
664
665 fn execute_sequential(
667 &self,
668 test_cases: &[&PerformanceTestCase],
669 ) -> Result<Vec<CiCdTestResult>> {
670 let mut results = Vec::new();
671
672 for test_case in test_cases {
673 let result = self.execute_single_test(test_case)?;
674 results.push(result);
675
676 if let TestExecutionStatus::Failed | TestExecutionStatus::Error =
678 results.last().expect("unwrap failed").status
679 {
680 }
683 }
684
685 Ok(results)
686 }
687
688 fn execute_parallel(&self, test_cases: &[&PerformanceTestCase]) -> Result<Vec<CiCdTestResult>> {
690 let mut results = Vec::new();
693
694 let max_concurrent = self.config.parallel_execution.max_concurrent;
695 let chunks: Vec<_> = test_cases.chunks(max_concurrent).collect();
696
697 for chunk in chunks {
698 let mut chunk_results = Vec::new();
699
700 for test_case in chunk {
701 let result = self.execute_single_test(test_case)?;
702 chunk_results.push(result);
703 }
704
705 results.extend(chunk_results);
706 }
707
708 Ok(results)
709 }
710
711 fn execute_single_test(&self, test_case: &PerformanceTestCase) -> Result<CiCdTestResult> {
713 let start_time = SystemTime::now();
714 let mut result = CiCdTestResult {
715 test_name: test_case.name.clone(),
716 status: TestExecutionStatus::Running,
717 start_time,
718 end_time: None,
719 duration: None,
720 measurements: Vec::new(),
721 error_message: None,
722 output: String::new(),
723 resource_usage: ResourceUsageReport::default(),
724 metadata: self.create_test_metadata(test_case)?,
725 regression_analysis: None,
726 };
727
728 if let Err(e) = self.check_environment_requirements(&test_case.environment_requirements) {
730 result.status = TestExecutionStatus::Error;
731 result.error_message = Some(format!("Environment requirements not met: {}", e));
732 result.end_time = Some(SystemTime::now());
733 return Ok(result);
734 }
735
736 match self.execute_test_by_executor(test_case) {
738 Ok((measurements, output)) => {
739 result.status = TestExecutionStatus::Passed;
740 result.measurements = measurements;
741 result.output = output;
742 }
743 Err(e) => {
744 result.status = TestExecutionStatus::Failed;
745 result.error_message = Some(e.to_string());
746 }
747 }
748
749 let end_time = SystemTime::now();
750 result.end_time = Some(end_time);
751 result.duration = end_time.duration_since(start_time).ok();
752
753 result.resource_usage = self.generate_resource_usage_report(start_time, end_time)?;
755
756 Ok(result)
757 }
758
759 fn execute_test_by_executor(
761 &self,
762 test_case: &PerformanceTestCase,
763 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
764 match &test_case.executor {
765 TestExecutor::Criterion => self.execute_criterion_test(test_case),
766 TestExecutor::Custom(cmd) => self.execute_custom_test(test_case, cmd),
767 TestExecutor::Shell => self.execute_shell_test(test_case),
768 TestExecutor::Docker { image, options } => {
769 self.execute_docker_test(test_case, image, options)
770 }
771 TestExecutor::ExternalTool { tool, args } => {
772 self.execute_external_tool_test(test_case, tool, args)
773 }
774 TestExecutor::RustBinary { binary, args } => {
775 self.execute_rust_binary_test(test_case, binary, args)
776 }
777 TestExecutor::Python { script, args } => {
778 self.execute_python_test(test_case, script, args)
779 }
780 }
781 }
782
783 fn execute_criterion_test(
785 &self,
786 test_case: &PerformanceTestCase,
787 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
788 let iterations = test_case
790 .parameters
791 .get("iterations")
792 .and_then(|s| s.parse::<usize>().ok())
793 .unwrap_or(test_case.iterations);
794
795 let start = Instant::now();
796 let result = self.execute_simple_loop_benchmark(iterations);
797 let duration = start.elapsed();
798
799 let mut metrics = HashMap::new();
800 metrics.insert(
801 crate::performance_regression_detector::MetricType::ExecutionTime,
802 MetricValue {
803 value: duration.as_secs_f64(),
804 std_dev: None,
805 sample_count: 1,
806 min_value: duration.as_secs_f64(),
807 max_value: duration.as_secs_f64(),
808 percentiles: None,
809 },
810 );
811
812 let measurement = PerformanceMeasurement {
813 timestamp: SystemTime::now(),
814 commithash: "unknown".to_string(), branch: "unknown".to_string(), build_config: "unknown".to_string(), environment: crate::performance_regression_detector::EnvironmentInfo {
818 os: std::env::consts::OS.to_string(),
819 cpu_model: std::env::consts::ARCH.to_string(),
820 cpu_cores: num_cpus::get(),
821 total_memory_mb: 0,
822 gpu_info: None,
823 compiler_version: "unknown".to_string(),
824 rust_version: "unknown".to_string(),
825 env_vars: HashMap::new(),
826 },
827 metrics,
828 test_config: crate::performance_regression_detector::TestConfiguration {
829 test_name: test_case.name.clone(),
830 parameters: test_case.parameters.clone(),
831 dataset_size: None,
832 iterations: Some(1),
833 batch_size: None,
834 precision: "f64".to_string(),
835 },
836 metadata: HashMap::new(),
837 };
838
839 let output = format!("Criterion benchmark completed in {:?}", duration);
840 Ok((vec![measurement], output))
841 }
842
843 fn execute_simple_loop_benchmark(&self, iterations: usize) -> f64 {
845 let start = Instant::now();
846 let mut sum = 0u64;
847 for i in 0..iterations {
848 sum = sum.wrapping_add(i as u64);
849 }
850 let _ = sum; start.elapsed().as_secs_f64()
852 }
853
854 fn execute_custom_test(
856 &self,
857 test_case: &PerformanceTestCase,
858 command: &str,
859 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
860 let output = Command::new("sh")
861 .arg("-c")
862 .arg(command)
863 .output()
864 .map_err(|e| {
865 OptimError::InvalidConfig(format!("Failed to execute custom test: {}", e))
866 })?;
867
868 let output_str = String::from_utf8_lossy(&output.stdout).to_string();
869
870 let measurements = self.parse_performance_output(&output_str, test_case)?;
872
873 Ok((measurements, output_str))
874 }
875
876 fn execute_shell_test(
878 &self,
879 test_case: &PerformanceTestCase,
880 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
881 let command = test_case.parameters.get("command").ok_or_else(|| {
882 OptimError::InvalidConfig("Shell test requires 'command' parameter".to_string())
883 })?;
884
885 self.execute_custom_test(test_case, command)
886 }
887
888 fn execute_docker_test(
890 &self,
891 test_case: &PerformanceTestCase,
892 image: &str,
893 options: &[String],
894 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
895 let mut cmd = Command::new("docker");
896 cmd.arg("run");
897
898 for option in options {
899 cmd.arg(option);
900 }
901
902 cmd.arg(image);
903
904 if let Some(command) = test_case.parameters.get("command") {
905 cmd.args(command.split_whitespace());
906 }
907
908 let output = cmd.output().map_err(|e| {
909 OptimError::InvalidConfig(format!("Failed to execute Docker test: {}", e))
910 })?;
911
912 let output_str = String::from_utf8_lossy(&output.stdout).to_string();
913 let measurements = self.parse_performance_output(&output_str, test_case)?;
914
915 Ok((measurements, output_str))
916 }
917
918 fn execute_external_tool_test(
920 &self,
921 test_case: &PerformanceTestCase,
922 tool: &str,
923 args: &[String],
924 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
925 let mut cmd = Command::new(tool);
926 cmd.args(args);
927
928 let output = cmd.output().map_err(|e| {
929 OptimError::InvalidConfig(format!("Failed to execute external tool: {}", e))
930 })?;
931
932 let output_str = String::from_utf8_lossy(&output.stdout).to_string();
933 let measurements = self.parse_performance_output(&output_str, test_case)?;
934
935 Ok((measurements, output_str))
936 }
937
938 fn execute_rust_binary_test(
940 &self,
941 test_case: &PerformanceTestCase,
942 binary: &str,
943 args: &[String],
944 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
945 let mut cmd = Command::new("cargo");
946 cmd.arg("run").arg("--bin").arg(binary).arg("--");
947 cmd.args(args);
948
949 let output = cmd.output().map_err(|e| {
950 OptimError::InvalidConfig(format!("Failed to execute Rust binary: {}", e))
951 })?;
952
953 let output_str = String::from_utf8_lossy(&output.stdout).to_string();
954 let measurements = self.parse_performance_output(&output_str, test_case)?;
955
956 Ok((measurements, output_str))
957 }
958
959 fn execute_python_test(
961 &self,
962 test_case: &PerformanceTestCase,
963 script: &str,
964 args: &[String],
965 ) -> Result<(Vec<PerformanceMeasurement>, String)> {
966 let mut cmd = Command::new("python");
967 cmd.arg(script);
968 cmd.args(args);
969
970 let output = cmd.output().map_err(|e| {
971 OptimError::InvalidConfig(format!("Failed to execute Python script: {}", e))
972 })?;
973
974 let output_str = String::from_utf8_lossy(&output.stdout).to_string();
975 let measurements = self.parse_performance_output(&output_str, test_case)?;
976
977 Ok((measurements, output_str))
978 }
979
980 fn parse_performance_output(
982 &self,
983 output: &str,
984 test_case: &PerformanceTestCase,
985 ) -> Result<Vec<PerformanceMeasurement>> {
986 let mut measurements = Vec::new();
987
988 for line in output.lines() {
990 if line.contains("time:") || line.contains("duration:") {
991 if let Some(duration_str) = self.extract_duration_from_line(line) {
992 if let Ok(duration) = duration_str.parse::<f64>() {
993 let mut metrics = HashMap::new();
994 metrics.insert(
995 crate::performance_regression_detector::MetricType::ExecutionTime,
996 MetricValue {
997 value: duration,
998 std_dev: None,
999 sample_count: 1,
1000 min_value: duration,
1001 max_value: duration,
1002 percentiles: None,
1003 },
1004 );
1005
1006 measurements.push(PerformanceMeasurement {
1007 timestamp: SystemTime::now(),
1008 commithash: "unknown".to_string(),
1009 branch: "unknown".to_string(),
1010 build_config: "unknown".to_string(),
1011 environment: crate::performance_regression_detector::EnvironmentInfo {
1012 os: std::env::consts::OS.to_string(),
1013 cpu_model: std::env::consts::ARCH.to_string(),
1014 cpu_cores: num_cpus::get(),
1015 total_memory_mb: 0,
1016 gpu_info: None,
1017 compiler_version: "unknown".to_string(),
1018 rust_version: "unknown".to_string(),
1019 env_vars: HashMap::new(),
1020 },
1021 metrics,
1022 test_config:
1023 crate::performance_regression_detector::TestConfiguration {
1024 test_name: test_case.name.clone(),
1025 parameters: test_case.parameters.clone(),
1026 dataset_size: None,
1027 iterations: Some(1),
1028 batch_size: None,
1029 precision: "f64".to_string(),
1030 },
1031 metadata: HashMap::new(),
1032 });
1033 }
1034 }
1035 }
1036 }
1037
1038 if measurements.is_empty() {
1040 let mut metrics = HashMap::new();
1041 metrics.insert(
1042 crate::performance_regression_detector::MetricType::ExecutionTime,
1043 MetricValue {
1044 value: 1.0, std_dev: None,
1046 sample_count: 1,
1047 min_value: 1.0,
1048 max_value: 1.0,
1049 percentiles: None,
1050 },
1051 );
1052
1053 measurements.push(PerformanceMeasurement {
1054 timestamp: SystemTime::now(),
1055 commithash: "unknown".to_string(),
1056 branch: "unknown".to_string(),
1057 build_config: "unknown".to_string(),
1058 environment: crate::performance_regression_detector::EnvironmentInfo {
1059 os: std::env::consts::OS.to_string(),
1060 cpu_model: std::env::consts::ARCH.to_string(),
1061 cpu_cores: num_cpus::get(),
1062 total_memory_mb: 0,
1063 gpu_info: None,
1064 compiler_version: "unknown".to_string(),
1065 rust_version: "unknown".to_string(),
1066 env_vars: HashMap::new(),
1067 },
1068 metrics,
1069 test_config: crate::performance_regression_detector::TestConfiguration {
1070 test_name: test_case.name.clone(),
1071 parameters: test_case.parameters.clone(),
1072 dataset_size: None,
1073 iterations: Some(1),
1074 batch_size: None,
1075 precision: "f64".to_string(),
1076 },
1077 metadata: HashMap::new(),
1078 });
1079 }
1080
1081 Ok(measurements)
1082 }
1083
1084 fn extract_duration_from_line(&self, line: &str) -> Option<String> {
1086 let parts: Vec<&str> = line.split_whitespace().collect();
1088 for i in 0..parts.len() {
1089 if (parts[i].contains("time") || parts[i].contains("duration")) && i + 1 < parts.len() {
1090 return Some(parts[i + 1].replace("ms", "").replace("s", ""));
1091 }
1092 }
1093 None
1094 }
1095
1096 fn check_environment_requirements(&self, requirements: &EnvironmentRequirements) -> Result<()> {
1098 if let Some(required_os) = &requirements.os {
1100 let current_os = std::env::consts::OS;
1101 if current_os != required_os {
1102 return Err(OptimError::InvalidConfig(format!(
1103 "Required OS: {}, Current OS: {}",
1104 required_os, current_os
1105 )));
1106 }
1107 }
1108
1109 if let Some(required_arch) = &requirements.architecture {
1111 let current_arch = std::env::consts::ARCH;
1112 if current_arch != required_arch {
1113 return Err(OptimError::InvalidConfig(format!(
1114 "Required architecture: {}, Current architecture: {}",
1115 required_arch, current_arch
1116 )));
1117 }
1118 }
1119
1120 if let Some(min_cores) = requirements.min_cpu_cores {
1122 let available_cores = num_cpus::get();
1123 if available_cores < min_cores {
1124 return Err(OptimError::InvalidConfig(format!(
1125 "Required CPU cores: {}, Available: {}",
1126 min_cores, available_cores
1127 )));
1128 }
1129 }
1130
1131 for env_var in &requirements.required_env_vars {
1133 if std::env::var(env_var).is_err() {
1134 return Err(OptimError::InvalidConfig(format!(
1135 "Required environment variable not set: {}",
1136 env_var
1137 )));
1138 }
1139 }
1140
1141 Ok(())
1142 }
1143
1144 fn create_test_metadata(
1146 &self,
1147 test_case: &PerformanceTestCase,
1148 ) -> Result<TestExecutionMetadata> {
1149 Ok(TestExecutionMetadata {
1150 executor: test_case.executor.clone(),
1151 parameters: test_case.parameters.clone(),
1152 environment: self.gather_environment_info()?,
1153 git_info: self.gather_git_info().ok(),
1154 ci_context: self.context.clone(),
1155 test_config: crate::performance_regression_detector::TestConfiguration {
1156 test_name: test_case.name.clone(),
1157 parameters: test_case.parameters.clone(),
1158 dataset_size: None,
1159 iterations: Some(test_case.iterations),
1160 batch_size: None,
1161 precision: "f64".to_string(),
1162 },
1163 })
1164 }
1165
1166 fn gather_environment_info(
1168 &self,
1169 ) -> Result<crate::performance_regression_detector::EnvironmentInfo> {
1170 Ok(crate::performance_regression_detector::EnvironmentInfo {
1171 os: std::env::consts::OS.to_string(),
1172 cpu_model: std::env::consts::ARCH.to_string(),
1173 cpu_cores: num_cpus::get(),
1174 total_memory_mb: 0, gpu_info: None,
1176 compiler_version: "unknown".to_string(),
1177 rust_version: "unknown".to_string(),
1178 env_vars: std::env::vars().collect(),
1179 })
1180 }
1181
1182 fn gather_git_info(&self) -> Result<GitInfo> {
1184 let commit_hash = Command::new("git")
1186 .args(["rev-parse", "HEAD"])
1187 .output()
1188 .map(|output| String::from_utf8_lossy(&output.stdout).trim().to_string())
1189 .unwrap_or_else(|_| "unknown".to_string());
1190
1191 let branch = Command::new("git")
1192 .args(["rev-parse", "--abbrev-ref", "HEAD"])
1193 .output()
1194 .map(|output| String::from_utf8_lossy(&output.stdout).trim().to_string())
1195 .unwrap_or_else(|_| "unknown".to_string());
1196
1197 Ok(GitInfo {
1198 commit_hash,
1199 branch,
1200 commit_message: None,
1201 author: None,
1202 commit_time: None,
1203 repository_url: None,
1204 is_clean: true, })
1206 }
1207
1208 fn generate_resource_usage_report(
1210 &self,
1211 start_time: SystemTime,
1212 end_time: SystemTime,
1213 ) -> Result<ResourceUsageReport> {
1214 Ok(ResourceUsageReport {
1215 peak_memory_mb: 100.0, avg_cpu_percent: 50.0, peak_cpu_percent: 80.0, disk_io_mb: 10.0, network_usage_mb: 1.0, timeline: vec![
1221 ResourceSnapshot {
1222 timestamp: start_time,
1223 memory_mb: 80.0,
1224 cpu_percent: 40.0,
1225 disk_io_mbps: 1.0,
1226 network_mbps: 0.1,
1227 },
1228 ResourceSnapshot {
1229 timestamp: end_time,
1230 memory_mb: 100.0,
1231 cpu_percent: 60.0,
1232 disk_io_mbps: 2.0,
1233 network_mbps: 0.2,
1234 },
1235 ],
1236 })
1237 }
1238
1239 pub fn get_statistics(&self) -> TestSuiteStatistics {
1241 let total_tests = self.results.len();
1242 let passed = self
1243 .results
1244 .iter()
1245 .filter(|r| r.status == TestExecutionStatus::Passed)
1246 .count();
1247 let failed = self
1248 .results
1249 .iter()
1250 .filter(|r| r.status == TestExecutionStatus::Failed)
1251 .count();
1252 let skipped = self
1253 .results
1254 .iter()
1255 .filter(|r| r.status == TestExecutionStatus::Skipped)
1256 .count();
1257 let errors = self
1258 .results
1259 .iter()
1260 .filter(|r| r.status == TestExecutionStatus::Error)
1261 .count();
1262
1263 let total_duration = self
1264 .results
1265 .iter()
1266 .filter_map(|r| r.duration)
1267 .fold(Duration::ZERO, |acc, d| acc + d);
1268
1269 TestSuiteStatistics {
1270 total_tests,
1271 passed,
1272 failed,
1273 skipped,
1274 errors,
1275 total_duration,
1276 success_rate: if total_tests > 0 {
1277 passed as f64 / total_tests as f64
1278 } else {
1279 0.0
1280 },
1281 }
1282 }
1283}
1284
1285#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
1287pub struct TestSuiteStatistics {
1288 pub total_tests: usize,
1290 pub passed: usize,
1292 pub failed: usize,
1294 pub skipped: usize,
1296 pub errors: usize,
1298 pub total_duration: Duration,
1300 pub success_rate: f64,
1302}
1303
1304impl Default for TestSuiteConfig {
1307 fn default() -> Self {
1308 Self {
1309 include_unit: true,
1310 include_integration: true,
1311 include_stress: false,
1312 include_load: false,
1313 include_security: false,
1314 default_timeout: 300, parallel_execution: ParallelExecutionConfig::default(),
1316 resource_monitoring: ResourceMonitoringConfig::default(),
1317 filtering: TestFilteringConfig::default(),
1318 retry_config: TestRetryConfig::default(),
1319 }
1320 }
1321}
1322
1323impl Default for ParallelExecutionConfig {
1324 fn default() -> Self {
1325 Self {
1326 enabled: true,
1327 max_concurrent: num_cpus::get(),
1328 thread_pool_size: None,
1329 grouping_strategy: TestGroupingStrategy::ByCategory,
1330 resource_allocation: ResourceAllocationConfig::default(),
1331 }
1332 }
1333}
1334
1335impl Default for ResourceAllocationConfig {
1336 fn default() -> Self {
1337 Self {
1338 cpu_cores: None,
1339 memory_limit_mb: Some(1024), disk_limit_mb: Some(10240), network_limit_mbps: None,
1342 }
1343 }
1344}
1345
1346impl Default for ResourceMonitoringConfig {
1347 fn default() -> Self {
1348 Self {
1349 monitor_cpu: true,
1350 monitor_memory: true,
1351 monitor_disk_io: false,
1352 monitor_network: false,
1353 monitoring_frequency_ms: 1000, alert_thresholds: ResourceAlertThresholds::default(),
1355 }
1356 }
1357}
1358
1359impl Default for ResourceAlertThresholds {
1360 fn default() -> Self {
1361 Self {
1362 cpu_threshold: 90.0, memory_threshold: 85.0, disk_threshold: 90.0, network_threshold: 100.0, }
1367 }
1368}
1369
1370impl Default for TestRetryConfig {
1371 fn default() -> Self {
1372 Self {
1373 enabled: true,
1374 max_retries: 3,
1375 retry_delay_sec: 5,
1376 backoff_multiplier: 2.0,
1377 retry_on_failures: vec![
1378 TestFailureType::Timeout,
1379 TestFailureType::TransientError,
1380 TestFailureType::Network,
1381 ],
1382 }
1383 }
1384}
1385
1386impl Default for ResourceUsageReport {
1387 fn default() -> Self {
1388 Self {
1389 peak_memory_mb: 0.0,
1390 avg_cpu_percent: 0.0,
1391 peak_cpu_percent: 0.0,
1392 disk_io_mb: 0.0,
1393 network_usage_mb: 0.0,
1394 timeline: Vec::new(),
1395 }
1396 }
1397}
1398
1399#[cfg(test)]
1400mod tests {
1401 use super::*;
1402
1403 #[test]
1404 fn test_test_suite_creation() {
1405 let config = TestSuiteConfig::default();
1406 let suite = PerformanceTestSuite::new(config);
1407 assert!(suite.is_ok());
1408 }
1409
1410 #[test]
1411 fn test_test_case_filtering() {
1412 let mut suite =
1413 PerformanceTestSuite::new(TestSuiteConfig::default()).expect("unwrap failed");
1414
1415 let test_case = PerformanceTestCase {
1416 name: "test1".to_string(),
1417 category: TestCategory::Unit,
1418 executor: TestExecutor::Criterion,
1419 parameters: HashMap::new(),
1420 baseline: None,
1421 timeout: None,
1422 iterations: 5,
1423 warmup_iterations: 1,
1424 dependencies: Vec::new(),
1425 tags: vec!["fast".to_string()],
1426 environment_requirements: EnvironmentRequirements::default(),
1427 custom_config: HashMap::new(),
1428 };
1429
1430 suite.add_test_case(test_case);
1431 assert_eq!(suite.test_cases.len(), 1);
1432 }
1433
1434 #[test]
1435 fn test_environment_requirements_validation() {
1436 let suite = PerformanceTestSuite::new(TestSuiteConfig::default()).expect("unwrap failed");
1437 let requirements = EnvironmentRequirements::default();
1438 assert!(suite.check_environment_requirements(&requirements).is_ok());
1439 }
1440
1441 #[test]
1442 fn test_test_execution_status() {
1443 assert_eq!(TestExecutionStatus::Passed, TestExecutionStatus::Passed);
1444 assert_ne!(TestExecutionStatus::Passed, TestExecutionStatus::Failed);
1445 }
1446
1447 #[test]
1448 fn test_resource_monitoring_config() {
1449 let config = ResourceMonitoringConfig::default();
1450 assert!(config.monitor_cpu);
1451 assert!(config.monitor_memory);
1452 assert_eq!(config.monitoring_frequency_ms, 1000);
1453 }
1454}