1use crate::capabilities::CapabilityProfile;
4use crate::error::{Result, SystemAnalysisError};
5use crate::resources::{ResourcePool, ResourceType, ResourceAmount, CapabilityLevel};
6use crate::types::{
7 SystemProfile, SystemInfo, CpuInfo, GpuInfo, MemoryInfo, StorageInfo, NetworkInfo,
8 NetworkInterface, CompatibilityResult, PerformanceEstimate, PerformanceTier,
9 ResourceUtilization, UpgradeRecommendation, OptimalConfiguration, WorkloadRequirements,
10 MissingRequirement, RequirementSeverity, Bottleneck, BottleneckImpact, UpgradePriority,
11 CostEstimate,
12};
13use crate::workloads::WorkloadType;
14use serde::{Deserialize, Serialize};
15use std::collections::HashMap;
16use sysinfo::{System, Disks, Networks};
17use tracing::{info, debug};
18
19#[derive(Debug, Clone)]
21pub struct SystemAnalyzer {
22 config: AnalyzerConfig,
24 cached_system_info: Option<SystemInfo>,
26 cached_capability_profile: Option<CapabilityProfile>,
28 resource_pool: ResourcePool,
30}
31
32#[derive(Debug, Clone, Serialize, Deserialize)]
34pub struct AnalyzerConfig {
35 pub enable_gpu_detection: bool,
37 pub enable_detailed_cpu_analysis: bool,
39 pub enable_network_testing: bool,
41 pub cache_duration_seconds: u64,
43 pub enable_benchmarking: bool,
45 pub benchmark_timeout_seconds: u64,
47}
48
49impl Default for AnalyzerConfig {
50 fn default() -> Self {
51 Self {
52 enable_gpu_detection: true,
53 enable_detailed_cpu_analysis: true,
54 enable_network_testing: false, cache_duration_seconds: 300, enable_benchmarking: false, benchmark_timeout_seconds: 30,
58 }
59 }
60}
61
62impl SystemAnalyzer {
63 pub fn new() -> Self {
65 Self::with_config(AnalyzerConfig::default())
66 }
67
68 pub fn with_config(config: AnalyzerConfig) -> Self {
70 Self {
71 config,
72 cached_system_info: None,
73 cached_capability_profile: None,
74 resource_pool: ResourcePool::new(),
75 }
76 }
77
78 pub async fn analyze_system(&mut self) -> Result<SystemProfile> {
80 info!("Starting system analysis");
81
82 let system_info = self.get_system_info().await?;
83 let capability_profile = CapabilityProfile::from_system_info(&system_info);
84
85 self.update_resource_pool(&capability_profile);
87
88 self.cached_system_info = Some(system_info.clone());
90 self.cached_capability_profile = Some(capability_profile.clone());
91
92 let system_profile = SystemProfile::new(
93 capability_profile.scores.cpu_score,
94 capability_profile.scores.gpu_score,
95 capability_profile.scores.memory_score,
96 capability_profile.scores.storage_score,
97 capability_profile.scores.network_score,
98 system_info,
99 );
100
101 info!("System analysis completed with overall score: {:.1}", system_profile.overall_score());
102 Ok(system_profile)
103 }
104
105 pub fn check_compatibility(
107 &self,
108 system_profile: &SystemProfile,
109 workload_requirements: &WorkloadRequirements,
110 ) -> Result<CompatibilityResult> {
111 debug!("Checking compatibility for workload: {}", workload_requirements.name);
112
113 let missing_requirements = self.find_missing_requirements(system_profile, workload_requirements)?;
115 let is_compatible = missing_requirements.is_empty();
116
117 let score = self.calculate_compatibility_score(system_profile, workload_requirements)?;
119
120 let performance_estimate = self.estimate_performance(system_profile, workload_requirements)?;
122
123 let bottlenecks = self.identify_bottlenecks(system_profile, workload_requirements)?;
125
126 let recommendations = self.generate_compatibility_recommendations(
128 system_profile,
129 workload_requirements,
130 &missing_requirements,
131 &bottlenecks,
132 );
133
134 Ok(CompatibilityResult {
135 is_compatible,
136 score,
137 performance_estimate,
138 missing_requirements,
139 bottlenecks,
140 recommendations,
141 })
142 }
143
144 pub fn predict_utilization(
146 &self,
147 system_profile: &SystemProfile,
148 workload_requirements: &WorkloadRequirements,
149 ) -> Result<ResourceUtilization> {
150 debug!("Predicting resource utilization for workload: {}", workload_requirements.name);
151
152 let base_utilization = if let Some(workload) = &workload_requirements.workload {
154 workload.estimated_utilization()
155 } else {
156 self.estimate_default_utilization(workload_requirements)?
158 };
159
160 let cpu_percent = self.adjust_cpu_utilization(
162 base_utilization.get(&ResourceType::CPU).copied().unwrap_or(50.0),
163 system_profile,
164 );
165
166 let gpu_percent = self.adjust_gpu_utilization(
167 base_utilization.get(&ResourceType::GPU).copied().unwrap_or(0.0),
168 system_profile,
169 );
170
171 let memory_percent = self.adjust_memory_utilization(
172 base_utilization.get(&ResourceType::Memory).copied().unwrap_or(40.0),
173 system_profile,
174 workload_requirements,
175 );
176
177 let storage_percent = base_utilization.get(&ResourceType::Storage).copied().unwrap_or(10.0);
178 let network_percent = base_utilization.get(&ResourceType::Network).copied().unwrap_or(5.0);
179
180 let mut peak_utilization = HashMap::new();
182 peak_utilization.insert(ResourceType::CPU, cpu_percent * 1.2);
183 peak_utilization.insert(ResourceType::GPU, gpu_percent * 1.1);
184 peak_utilization.insert(ResourceType::Memory, memory_percent * 1.05);
185 peak_utilization.insert(ResourceType::Storage, storage_percent * 2.0);
186 peak_utilization.insert(ResourceType::Network, network_percent * 3.0);
187
188 Ok(ResourceUtilization {
189 cpu_percent,
190 gpu_percent,
191 memory_percent,
192 storage_percent,
193 network_percent,
194 peak_utilization,
195 })
196 }
197
198 pub fn recommend_upgrades(
200 &self,
201 system_profile: &SystemProfile,
202 workload_requirements: &WorkloadRequirements,
203 ) -> Result<Vec<UpgradeRecommendation>> {
204 debug!("Generating upgrade recommendations for workload: {}", workload_requirements.name);
205
206 let mut recommendations = Vec::new();
207 let missing_requirements = self.find_missing_requirements(system_profile, workload_requirements)?;
208
209 for missing in &missing_requirements {
210 let recommendation = self.generate_upgrade_recommendation(
211 &missing.resource_type,
212 &missing.required,
213 &missing.available,
214 system_profile,
215 )?;
216 recommendations.push(recommendation);
217 }
218
219 recommendations.extend(self.generate_general_upgrade_recommendations(system_profile, workload_requirements)?);
221
222 recommendations.sort_by(|a, b| {
224 use UpgradePriority::*;
225 let priority_order = |p: &UpgradePriority| match p {
226 Critical => 0,
227 High => 1,
228 Medium => 2,
229 Low => 3,
230 };
231 priority_order(&a.priority).cmp(&priority_order(&b.priority))
232 });
233
234 Ok(recommendations)
235 }
236
237 pub fn find_optimal_configuration(
239 &self,
240 workload_requirements: &WorkloadRequirements,
241 ) -> Result<OptimalConfiguration> {
242 debug!("Finding optimal configuration for workload: {}", workload_requirements.name);
243
244 let cpu_recommendation = self.recommend_optimal_cpu(workload_requirements)?;
245 let gpu_recommendation = self.recommend_optimal_gpu(workload_requirements)?;
246 let memory_recommendation = self.recommend_optimal_memory(workload_requirements)?;
247 let storage_recommendation = self.recommend_optimal_storage(workload_requirements)?;
248 let network_recommendation = self.recommend_optimal_network(workload_requirements)?;
249
250 let total_cost = Some(CostEstimate {
252 min_cost: 2000.0,
253 max_cost: 8000.0,
254 currency: "USD".to_string(),
255 time_frame: "Current market prices".to_string(),
256 });
257
258 let performance_projection = PerformanceEstimate {
260 estimated_latency_ms: 25.0,
261 estimated_throughput: 50.0,
262 confidence: 0.85,
263 performance_tier: PerformanceTier::Excellent,
264 };
265
266 Ok(OptimalConfiguration {
267 cpu_recommendation,
268 gpu_recommendation,
269 memory_recommendation,
270 storage_recommendation,
271 network_recommendation,
272 total_cost,
273 performance_projection,
274 })
275 }
276
277 async fn get_system_info(&mut self) -> Result<SystemInfo> {
279 if let Some(cached) = &self.cached_system_info {
281 return Ok(cached.clone());
284 }
285
286 let mut system = System::new_all();
288 system.refresh_all();
289
290 let cpu_info = self.get_cpu_info(&system)?;
291 let gpu_info = self.get_gpu_info().await?;
292 let memory_info = self.get_memory_info(&system)?;
293 let storage_info = self.get_storage_info(&system)?;
294 let network_info = self.get_network_info(&system).await?;
295
296 let system_info = SystemInfo {
297 os_name: System::name().unwrap_or_else(|| "Unknown".to_string()),
298 os_version: System::os_version().unwrap_or_else(|| "Unknown".to_string()),
299 cpu_info,
300 gpu_info,
301 memory_info,
302 storage_info,
303 network_info,
304 };
305
306 Ok(system_info)
307 }
308
309 fn get_cpu_info(&self, system: &System) -> Result<CpuInfo> {
311 let cpus = system.cpus();
312
313 if cpus.is_empty() {
314 return Err(SystemAnalysisError::system_info("No CPU information available"));
315 }
316
317 let cpu = &cpus[0];
318 let physical_cores = system.physical_core_count().unwrap_or(1);
319 let logical_cores = cpus.len();
320
321 Ok(CpuInfo {
322 brand: cpu.brand().to_string(),
323 physical_cores,
324 logical_cores,
325 base_frequency: (cpu.frequency() as u64).max(1000), max_frequency: None, cache_size: None, architecture: std::env::consts::ARCH.to_string(),
329 })
330 }
331
332 async fn get_gpu_info(&self) -> Result<Vec<GpuInfo>> {
334 let mut gpus = Vec::new();
335
336 if !self.config.enable_gpu_detection {
337 return Ok(gpus);
338 }
339
340 #[cfg(feature = "gpu-detection")]
341 {
342 if let Ok(nvml) = nvml_wrapper::Nvml::init() {
344 if let Ok(device_count) = nvml.device_count() {
345 for i in 0..device_count {
346 if let Ok(device) = nvml.device_by_index(i) {
347 if let (Ok(name), Ok(memory_info)) = (device.name(), device.memory_info()) {
348 gpus.push(GpuInfo {
349 name,
350 vendor: "NVIDIA".to_string(),
351 vram_size: Some(memory_info.total / 1024 / 1024), compute_capability: device.cuda_compute_capability()
353 .map(|cc| format!("{}.{}", cc.major, cc.minor))
354 .ok(),
355 opencl_support: true, cuda_support: true,
357 });
358 }
359 }
360 }
361 }
362 }
363 }
364
365 if gpus.is_empty() {
367 gpus.push(GpuInfo {
370 name: "Integrated Graphics".to_string(),
371 vendor: "Unknown".to_string(),
372 vram_size: None,
373 compute_capability: None,
374 opencl_support: false,
375 cuda_support: false,
376 });
377 }
378
379 Ok(gpus)
380 }
381
382 fn get_memory_info(&self, system: &System) -> Result<MemoryInfo> {
384 Ok(MemoryInfo {
385 total_ram: system.total_memory() / 1024, available_ram: system.available_memory() / 1024, memory_type: None, memory_speed: None, })
390 }
391
392 fn get_storage_info(&self, _system: &System) -> Result<Vec<StorageInfo>> {
394 let mut storage_devices = Vec::new();
395 let disks = Disks::new_with_refreshed_list();
396
397 for disk in &disks {
398 let total_capacity = disk.total_space() / 1024 / 1024 / 1024; let available_capacity = disk.available_space() / 1024 / 1024 / 1024; let storage_type = match disk.name().to_str().unwrap_or("") {
403 name if name.contains("nvme") => "NVMe SSD".to_string(),
404 name if name.contains("ssd") => "SSD".to_string(),
405 _ => {
406 match disk.file_system().to_str().unwrap_or("") {
408 "NTFS" | "APFS" | "ext4" => "SSD".to_string(), _ => "HDD".to_string(), }
411 }
412 };
413
414 storage_devices.push(StorageInfo {
415 name: disk.name().to_str().unwrap_or("Unknown").to_string(),
416 storage_type,
417 total_capacity,
418 available_capacity,
419 read_speed: None, write_speed: None, });
422 }
423
424 if storage_devices.is_empty() {
425 storage_devices.push(StorageInfo {
427 name: "Primary Storage".to_string(),
428 storage_type: "Unknown".to_string(),
429 total_capacity: 100, available_capacity: 50,
431 read_speed: None,
432 write_speed: None,
433 });
434 }
435
436 Ok(storage_devices)
437 }
438
439 async fn get_network_info(&self, _system: &System) -> Result<NetworkInfo> {
441 let mut interfaces = Vec::new();
442 let networks = Networks::new_with_refreshed_list();
443
444 for (interface_name, network_data) in &networks {
445 interfaces.push(NetworkInterface {
446 name: interface_name.clone(),
447 interface_type: if interface_name.to_lowercase().contains("ethernet") {
448 "Ethernet".to_string()
449 } else if interface_name.to_lowercase().contains("wifi") ||
450 interface_name.to_lowercase().contains("wireless") {
451 "WiFi".to_string()
452 } else {
453 "Unknown".to_string()
454 },
455 mac_address: network_data.mac_address().to_string(),
456 ip_addresses: vec![], speed: None, });
459 }
460
461 Ok(NetworkInfo {
462 interfaces,
463 internet_connected: true, estimated_bandwidth: None, })
466 }
467
468 fn update_resource_pool(&mut self, capability_profile: &CapabilityProfile) {
470 self.resource_pool.set_resource(
471 ResourceType::CPU,
472 ResourceAmount::Score(capability_profile.scores.cpu_score),
473 );
474
475 self.resource_pool.set_resource(
476 ResourceType::GPU,
477 ResourceAmount::Score(capability_profile.scores.gpu_score),
478 );
479
480 self.resource_pool.set_resource(
481 ResourceType::Memory,
482 ResourceAmount::Gigabytes(capability_profile.memory_capabilities.total_ram_gb),
483 );
484
485 self.resource_pool.set_resource(
486 ResourceType::Storage,
487 ResourceAmount::Gigabytes(capability_profile.storage_capabilities.total_capacity_gb),
488 );
489
490 self.resource_pool.set_resource(
491 ResourceType::Network,
492 ResourceAmount::Score(capability_profile.scores.network_score),
493 );
494 }
495
496 fn find_missing_requirements(
498 &self,
499 _system_profile: &SystemProfile,
500 workload_requirements: &WorkloadRequirements,
501 ) -> Result<Vec<MissingRequirement>> {
502 let mut missing = Vec::new();
503
504 for req in &workload_requirements.resource_requirements {
505 if let Some(available) = self.resource_pool.get_resource(&req.resource_type) {
506 if !req.is_satisfied_by(available) {
507 missing.push(MissingRequirement {
508 resource_type: req.resource_type,
509 required: req.minimum.to_string(),
510 available: available.to_string(),
511 severity: if req.is_critical {
512 RequirementSeverity::Critical
513 } else {
514 RequirementSeverity::High
515 },
516 });
517 }
518 } else {
519 missing.push(MissingRequirement {
520 resource_type: req.resource_type,
521 required: req.minimum.to_string(),
522 available: "Not Available".to_string(),
523 severity: RequirementSeverity::Critical,
524 });
525 }
526 }
527
528 Ok(missing)
529 }
530
531 fn calculate_compatibility_score(
533 &self,
534 _system_profile: &SystemProfile,
535 workload_requirements: &WorkloadRequirements,
536 ) -> Result<f64> {
537 let satisfaction_score = self.resource_pool.satisfaction_score(&workload_requirements.resource_requirements);
538 Ok(satisfaction_score)
539 }
540
541 fn estimate_performance(
543 &self,
544 system_profile: &SystemProfile,
545 _workload_requirements: &WorkloadRequirements,
546 ) -> Result<PerformanceEstimate> {
547 let base_latency = 100.0; let base_throughput = 10.0; let score_multiplier = system_profile.overall_score() / 10.0;
553 let estimated_latency_ms = base_latency / score_multiplier.max(0.1);
554 let estimated_throughput = base_throughput * score_multiplier;
555
556 let confidence = if system_profile.overall_score() >= 7.0 {
557 0.9
558 } else if system_profile.overall_score() >= 5.0 {
559 0.7
560 } else {
561 0.5
562 };
563
564 let performance_tier = match system_profile.overall_score() {
565 score if score >= 8.0 => PerformanceTier::Excellent,
566 score if score >= 6.0 => PerformanceTier::Good,
567 score if score >= 4.0 => PerformanceTier::Fair,
568 _ => PerformanceTier::Poor,
569 };
570
571 Ok(PerformanceEstimate {
572 estimated_latency_ms,
573 estimated_throughput,
574 confidence,
575 performance_tier,
576 })
577 }
578
579 fn identify_bottlenecks(
581 &self,
582 system_profile: &SystemProfile,
583 _workload_requirements: &WorkloadRequirements,
584 ) -> Result<Vec<Bottleneck>> {
585 let mut bottlenecks = Vec::new();
586
587 let scores = [
589 (ResourceType::CPU, system_profile.cpu_score()),
590 (ResourceType::GPU, system_profile.gpu_score()),
591 (ResourceType::Memory, system_profile.memory_score()),
592 (ResourceType::Storage, system_profile.storage_score()),
593 (ResourceType::Network, system_profile.network_score()),
594 ];
595
596 let avg_score = scores.iter().map(|(_, score)| score).sum::<f64>() / scores.len() as f64;
597
598 for (resource_type, score) in scores {
599 if score < avg_score - 2.0 { let impact = if score < 3.0 {
601 BottleneckImpact::Severe
602 } else if score < 5.0 {
603 BottleneckImpact::Moderate
604 } else {
605 BottleneckImpact::Minor
606 };
607
608 let suggestions = self.generate_bottleneck_suggestions(&resource_type);
609
610 bottlenecks.push(Bottleneck {
611 resource_type,
612 description: format!("{} performance is below system average ({:.1} vs {:.1})",
613 resource_type, score, avg_score),
614 impact,
615 suggestions,
616 });
617 }
618 }
619
620 Ok(bottlenecks)
621 }
622
623 fn generate_bottleneck_suggestions(&self, resource_type: &ResourceType) -> Vec<String> {
625 match resource_type {
626 ResourceType::CPU => vec![
627 "Upgrade to a CPU with more cores or higher clock speed".to_string(),
628 "Consider CPUs with newer architecture (e.g., latest Intel or AMD)".to_string(),
629 "Ensure adequate cooling for sustained performance".to_string(),
630 ],
631 ResourceType::GPU => vec![
632 "Add a dedicated GPU for compute workloads".to_string(),
633 "Upgrade to a GPU with more VRAM".to_string(),
634 "Consider GPUs optimized for AI/ML workloads".to_string(),
635 ],
636 ResourceType::Memory => vec![
637 "Increase RAM capacity".to_string(),
638 "Upgrade to faster memory (higher frequency)".to_string(),
639 "Consider ECC memory for reliability".to_string(),
640 ],
641 ResourceType::Storage => vec![
642 "Upgrade to NVMe SSD for faster I/O".to_string(),
643 "Add more storage capacity".to_string(),
644 "Consider RAID configuration for performance".to_string(),
645 ],
646 ResourceType::Network => vec![
647 "Upgrade to gigabit Ethernet".to_string(),
648 "Improve WiFi signal strength".to_string(),
649 "Consider wired connection for consistency".to_string(),
650 ],
651 ResourceType::Custom(_) => vec![
652 "Review custom resource requirements".to_string(),
653 ],
654 }
655 }
656
657 fn generate_compatibility_recommendations(
659 &self,
660 system_profile: &SystemProfile,
661 workload_requirements: &WorkloadRequirements,
662 missing_requirements: &[MissingRequirement],
663 bottlenecks: &[Bottleneck],
664 ) -> Vec<String> {
665 let mut recommendations = Vec::new();
666
667 if missing_requirements.is_empty() && bottlenecks.is_empty() {
668 recommendations.push("System meets all requirements for optimal performance".to_string());
669 } else {
670 if !missing_requirements.is_empty() {
671 recommendations.push(format!("Address {} missing requirements", missing_requirements.len()));
672 }
673
674 if !bottlenecks.is_empty() {
675 recommendations.push(format!("Resolve {} system bottlenecks", bottlenecks.len()));
676 }
677
678 if let Some(workload) = &workload_requirements.workload {
680 match workload.workload_type() {
681 WorkloadType::AIInference => {
682 if system_profile.gpu_score() < 6.0 {
683 recommendations.push("Consider GPU acceleration for AI inference".to_string());
684 }
685 }
686 WorkloadType::MemoryIntensive => {
687 if system_profile.memory_score() < 7.0 {
688 recommendations.push("Increase memory capacity for memory-intensive workloads".to_string());
689 }
690 }
691 _ => {}
692 }
693 }
694 }
695
696 recommendations
697 }
698
699 fn adjust_cpu_utilization(&self, base_util: f64, system_profile: &SystemProfile) -> f64 {
701 let efficiency_factor = (system_profile.cpu_score() / 10.0).max(0.1);
703 (base_util / efficiency_factor).min(100.0)
704 }
705
706 fn adjust_gpu_utilization(&self, base_util: f64, system_profile: &SystemProfile) -> f64 {
707 if system_profile.gpu_score() < 3.0 {
708 0.0 } else {
710 let efficiency_factor = (system_profile.gpu_score() / 10.0).max(0.1);
711 (base_util / efficiency_factor).min(100.0)
712 }
713 }
714
715 fn adjust_memory_utilization(
716 &self,
717 _base_util: f64,
718 system_profile: &SystemProfile,
719 workload_requirements: &WorkloadRequirements,
720 ) -> f64 {
721 let memory_req = workload_requirements.resource_requirements
723 .iter()
724 .find(|req| req.resource_type == ResourceType::Memory)
725 .and_then(|req| match &req.minimum {
726 ResourceAmount::Gigabytes(gb) => Some(*gb),
727 _ => None,
728 })
729 .unwrap_or(4.0); let total_memory = system_profile.system_info.memory_info.total_ram as f64 / 1024.0; ((memory_req / total_memory) * 100.0).min(100.0)
733 }
734
735 fn estimate_default_utilization(&self, workload_requirements: &WorkloadRequirements) -> Result<HashMap<ResourceType, f64>> {
737 let mut utilization = HashMap::new();
738
739 let base_cpu = match workload_requirements.priority {
741 crate::types::WorkloadPriority::Critical => 80.0,
742 crate::types::WorkloadPriority::High => 60.0,
743 crate::types::WorkloadPriority::Medium => 40.0,
744 crate::types::WorkloadPriority::Low => 20.0,
745 };
746
747 utilization.insert(ResourceType::CPU, base_cpu);
748 utilization.insert(ResourceType::GPU, 0.0); utilization.insert(ResourceType::Memory, 30.0);
750 utilization.insert(ResourceType::Storage, 10.0);
751 utilization.insert(ResourceType::Network, 5.0);
752
753 Ok(utilization)
754 }
755
756 fn generate_upgrade_recommendation(
758 &self,
759 resource_type: &ResourceType,
760 required: &str,
761 available: &str,
762 _system_profile: &SystemProfile,
763 ) -> Result<UpgradeRecommendation> {
764 let (recommendation, estimated_improvement, priority) = match resource_type {
765 ResourceType::CPU => {
766 ("Upgrade to a higher-performance CPU with more cores".to_string(),
767 "30-50% performance improvement".to_string(),
768 UpgradePriority::High)
769 }
770 ResourceType::GPU => {
771 ("Add or upgrade GPU for compute acceleration".to_string(),
772 "2-10x performance improvement for GPU workloads".to_string(),
773 UpgradePriority::Critical)
774 }
775 ResourceType::Memory => {
776 (format!("Increase RAM from {} to {}", available, required),
777 "Eliminate memory bottlenecks".to_string(),
778 UpgradePriority::High)
779 }
780 ResourceType::Storage => {
781 ("Upgrade to faster NVMe SSD storage".to_string(),
782 "Reduce I/O latency by 50-90%".to_string(),
783 UpgradePriority::Medium)
784 }
785 ResourceType::Network => {
786 ("Upgrade network connection speed".to_string(),
787 "Reduce network latency and increase throughput".to_string(),
788 UpgradePriority::Low)
789 }
790 ResourceType::Custom(_) => {
791 ("Review custom resource requirements".to_string(),
792 "Variable improvement".to_string(),
793 UpgradePriority::Medium)
794 }
795 };
796
797 Ok(UpgradeRecommendation {
798 resource_type: *resource_type,
799 recommendation,
800 estimated_improvement,
801 cost_estimate: None, priority,
803 })
804 }
805
806 fn generate_general_upgrade_recommendations(
807 &self,
808 system_profile: &SystemProfile,
809 _workload_requirements: &WorkloadRequirements,
810 ) -> Result<Vec<UpgradeRecommendation>> {
811 let mut recommendations = Vec::new();
812
813 if system_profile.overall_score() < 6.0 {
815 recommendations.push(UpgradeRecommendation {
816 resource_type: ResourceType::CPU,
817 recommendation: "Consider a comprehensive system upgrade".to_string(),
818 estimated_improvement: "Significant overall performance improvement".to_string(),
819 cost_estimate: Some(CostEstimate {
820 min_cost: 1500.0,
821 max_cost: 5000.0,
822 currency: "USD".to_string(),
823 time_frame: "Complete system refresh".to_string(),
824 }),
825 priority: UpgradePriority::Medium,
826 });
827 }
828
829 Ok(recommendations)
830 }
831
832 fn recommend_optimal_cpu(&self, workload_requirements: &WorkloadRequirements) -> Result<String> {
834 let cpu_req = workload_requirements.resource_requirements
836 .iter()
837 .find(|req| req.resource_type == ResourceType::CPU);
838
839 let recommendation = match cpu_req {
840 Some(req) => match &req.minimum {
841 ResourceAmount::Level(level) => match level {
842 CapabilityLevel::Exceptional => "High-end workstation CPU (e.g., Intel Xeon W or AMD Threadripper PRO)",
843 CapabilityLevel::VeryHigh => "High-performance CPU (e.g., Intel Core i9 or AMD Ryzen 9)",
844 CapabilityLevel::High => "Performance CPU (e.g., Intel Core i7 or AMD Ryzen 7)",
845 CapabilityLevel::Medium => "Mid-range CPU (e.g., Intel Core i5 or AMD Ryzen 5)",
846 _ => "Entry-level CPU (e.g., Intel Core i3 or AMD Ryzen 3)",
847 },
848 _ => "Modern multi-core CPU with good single-thread performance",
849 },
850 None => "Balanced CPU suitable for general workloads",
851 };
852
853 Ok(recommendation.to_string())
854 }
855
856 fn recommend_optimal_gpu(&self, workload_requirements: &WorkloadRequirements) -> Result<String> {
857 let gpu_req = workload_requirements.resource_requirements
858 .iter()
859 .find(|req| req.resource_type == ResourceType::GPU);
860
861 let recommendation = match gpu_req {
862 Some(_) => {
863 if let Some(workload) = &workload_requirements.workload {
865 match workload.workload_type() {
866 WorkloadType::AIInference | WorkloadType::AITraining => {
867 "High-memory GPU optimized for AI (e.g., NVIDIA RTX 4090, A6000, or H100)"
868 }
869 _ => "Dedicated GPU with good compute performance"
870 }
871 } else {
872 "Modern dedicated GPU with adequate VRAM"
873 }
874 }
875 None => "Integrated graphics sufficient, dedicated GPU optional",
876 };
877
878 Ok(recommendation.to_string())
879 }
880
881 fn recommend_optimal_memory(&self, workload_requirements: &WorkloadRequirements) -> Result<String> {
882 let memory_req = workload_requirements.resource_requirements
883 .iter()
884 .find(|req| req.resource_type == ResourceType::Memory)
885 .and_then(|req| match &req.minimum {
886 ResourceAmount::Gigabytes(gb) => Some(*gb),
887 _ => None,
888 })
889 .unwrap_or(16.0);
890
891 let recommendation = match memory_req {
892 gb if gb >= 128.0 => format!("{}GB+ high-speed DDR5 RAM with ECC support", gb as u32),
893 gb if gb >= 64.0 => format!("{}GB+ high-speed DDR5 RAM", gb as u32),
894 gb if gb >= 32.0 => format!("{}GB DDR4/DDR5 RAM", gb as u32),
895 gb if gb >= 16.0 => format!("{}GB DDR4 RAM", gb as u32),
896 _ => "16GB DDR4 RAM (minimum recommended)".to_string(),
897 };
898
899 Ok(recommendation)
900 }
901
902 fn recommend_optimal_storage(&self, workload_requirements: &WorkloadRequirements) -> Result<String> {
903 let storage_req = workload_requirements.resource_requirements
904 .iter()
905 .find(|req| req.resource_type == ResourceType::Storage)
906 .and_then(|req| match &req.minimum {
907 ResourceAmount::Gigabytes(gb) => Some(*gb),
908 _ => None,
909 })
910 .unwrap_or(500.0);
911
912 let recommendation = if storage_req >= 2000.0 {
913 format!("{}GB+ NVMe SSD with high sequential read/write speeds", storage_req as u32)
914 } else if storage_req >= 1000.0 {
915 format!("{}GB NVMe SSD", storage_req as u32)
916 } else {
917 format!("{}GB+ SATA SSD or NVMe SSD", storage_req as u32)
918 };
919
920 Ok(recommendation)
921 }
922
923 fn recommend_optimal_network(&self, _workload_requirements: &WorkloadRequirements) -> Result<String> {
924 Ok("Gigabit Ethernet connection (wired preferred for consistency)".to_string())
925 }
926}
927
928impl Default for SystemAnalyzer {
929 fn default() -> Self {
930 Self::new()
931 }
932}
933
934#[cfg(test)]
935mod tests {
936 use super::*;
937 use crate::ResourceRequirement;
938
939 #[test]
940 fn test_analyzer_config_default() {
941 let config = AnalyzerConfig::default();
942 assert!(config.enable_gpu_detection);
943 assert!(config.enable_detailed_cpu_analysis);
944 assert!(!config.enable_network_testing);
945 assert_eq!(config.cache_duration_seconds, 300);
946 assert!(!config.enable_benchmarking);
947 assert_eq!(config.benchmark_timeout_seconds, 30);
948 }
949
950 #[test]
951 fn test_analyzer_creation() {
952 let analyzer = SystemAnalyzer::new();
953 assert!(analyzer.cached_system_info.is_none());
954 assert!(analyzer.cached_capability_profile.is_none());
955 }
956
957 #[test]
958 fn test_analyzer_with_config() {
959 let config = AnalyzerConfig {
960 enable_gpu_detection: false,
961 enable_detailed_cpu_analysis: false,
962 enable_network_testing: true,
963 cache_duration_seconds: 600,
964 enable_benchmarking: false,
965 benchmark_timeout_seconds: 60,
966 };
967
968 let analyzer = SystemAnalyzer::with_config(config.clone());
969 assert_eq!(analyzer.config.enable_gpu_detection, false);
970 assert_eq!(analyzer.config.enable_network_testing, true);
971 assert_eq!(analyzer.config.cache_duration_seconds, 600);
972 }
973
974 #[tokio::test]
975 async fn test_system_analysis_basic() {
976 let mut analyzer = SystemAnalyzer::new();
977 let result = analyzer.analyze_system().await;
978 assert!(result.is_ok());
979
980 let profile = result.unwrap();
981 assert!(profile.overall_score() >= 0.0 && profile.overall_score() <= 10.0);
983 assert!(profile.cpu_score() >= 0.0 && profile.cpu_score() <= 10.0);
984 assert!(profile.gpu_score() >= 0.0 && profile.gpu_score() <= 10.0);
985 assert!(profile.memory_score() >= 0.0 && profile.memory_score() <= 10.0);
986 assert!(profile.storage_score() >= 0.0 && profile.storage_score() <= 10.0);
987 assert!(profile.network_score() >= 0.0 && profile.network_score() <= 10.0);
988 }
989
990 #[tokio::test]
991 async fn test_workload_compatibility_simple() {
992 let mut analyzer = SystemAnalyzer::new();
993 let system_profile = analyzer.analyze_system().await.unwrap();
994
995 let mut workload_requirements = WorkloadRequirements::new("test-workload");
996 workload_requirements.add_resource_requirement(
997 ResourceRequirement::new(ResourceType::Memory)
998 .minimum_gb(4.0)
999 .recommended_gb(8.0)
1000 );
1001
1002 let compatibility = analyzer.check_compatibility(&system_profile, &workload_requirements);
1003 assert!(compatibility.is_ok());
1004
1005 let result = compatibility.unwrap();
1006 assert!(result.is_compatible());
1007 assert!(result.score >= 0.0 && result.score <= 10.0);
1008 }
1009
1010 #[test]
1011 fn test_workload_requirements_builder() {
1012 let mut requirements = WorkloadRequirements::new("test-workload");
1013
1014 requirements.add_resource_requirement(
1015 ResourceRequirement::new(ResourceType::CPU)
1016 .minimum_level(CapabilityLevel::Medium)
1017 .recommended_level(CapabilityLevel::High)
1018 );
1019
1020 requirements.add_resource_requirement(
1021 ResourceRequirement::new(ResourceType::Memory)
1022 .minimum_gb(8.0)
1023 .recommended_gb(16.0)
1024 );
1025
1026 assert_eq!(requirements.name, "test-workload");
1027 assert_eq!(requirements.resource_requirements.len(), 2);
1028
1029 let cpu_req = requirements.get_resource_requirement(&ResourceType::CPU);
1031 assert!(cpu_req.is_some());
1032 assert_eq!(cpu_req.unwrap().resource_type, ResourceType::CPU);
1033
1034 let memory_req = requirements.get_resource_requirement(&ResourceType::Memory);
1035 assert!(memory_req.is_some());
1036 assert_eq!(memory_req.unwrap().resource_type, ResourceType::Memory);
1037
1038 let gpu_req = requirements.get_resource_requirement(&ResourceType::GPU);
1040 assert!(gpu_req.is_none());
1041 }
1042}