Skip to main content

voirs_cli/platform/
hardware.rs

1//! Hardware detection and optimization
2//!
3//! This module provides hardware detection, optimization, and performance monitoring.
4
5use std::collections::HashMap;
6
7/// GPU information and capabilities
8#[derive(Debug, Clone)]
9pub struct GpuInfo {
10    /// GPU name/model
11    pub name: String,
12    /// GPU vendor (NVIDIA, AMD, Intel, etc.)
13    pub vendor: String,
14    /// VRAM in bytes
15    pub vram: u64,
16    /// Whether GPU supports CUDA
17    pub cuda_support: bool,
18    /// Whether GPU supports OpenCL
19    pub opencl_support: bool,
20    /// Whether GPU supports Vulkan
21    pub vulkan_support: bool,
22}
23
24/// CPU information and capabilities
25#[derive(Debug, Clone)]
26pub struct CpuInfo {
27    /// CPU name/model
28    pub name: String,
29    /// CPU vendor (Intel, AMD, ARM, etc.)
30    pub vendor: String,
31    /// Number of physical cores
32    pub physical_cores: usize,
33    /// Number of logical cores (with hyperthreading)
34    pub logical_cores: usize,
35    /// Base frequency in MHz
36    pub base_frequency: u32,
37    /// Maximum frequency in MHz
38    pub max_frequency: u32,
39    /// Cache sizes (L1, L2, L3) in bytes
40    pub cache_sizes: HashMap<String, u64>,
41    /// Supported instruction sets
42    pub instruction_sets: Vec<String>,
43}
44
45/// Memory information
46#[derive(Debug, Clone)]
47pub struct MemoryInfo {
48    /// Total physical memory in bytes
49    pub total: u64,
50    /// Available memory in bytes
51    pub available: u64,
52    /// Memory speed in MHz
53    pub speed: u32,
54    /// Memory type (DDR4, DDR5, etc.)
55    pub memory_type: String,
56}
57
58/// Hardware optimization recommendations
59#[derive(Debug, Clone)]
60pub struct OptimizationRecommendations {
61    /// Recommended number of worker threads
62    pub worker_threads: usize,
63    /// Whether to enable GPU acceleration
64    pub use_gpu: bool,
65    /// Recommended memory usage limit in bytes
66    pub memory_limit: u64,
67    /// Recommended batch size for processing
68    pub batch_size: usize,
69    /// Whether to enable SIMD optimizations
70    pub use_simd: bool,
71    /// Specific optimization flags
72    pub optimization_flags: Vec<String>,
73}
74
75/// Get GPU information
76pub fn get_gpu_info() -> Vec<GpuInfo> {
77    let mut gpus = Vec::new();
78
79    #[cfg(target_os = "windows")]
80    {
81        // Windows GPU detection using WMI or DirectX
82        gpus.push(detect_windows_gpu());
83    }
84
85    #[cfg(target_os = "macos")]
86    {
87        // macOS GPU detection using Metal or system_profiler
88        gpus.push(detect_macos_gpu());
89    }
90
91    #[cfg(target_os = "linux")]
92    {
93        // Linux GPU detection using lspci, nvidia-ml, or vulkan
94        gpus.extend(detect_linux_gpus());
95    }
96
97    // Fallback detection
98    if gpus.is_empty() {
99        gpus.push(GpuInfo {
100            name: "Unknown GPU".to_string(),
101            vendor: "Unknown".to_string(),
102            vram: 0,
103            cuda_support: false,
104            opencl_support: false,
105            vulkan_support: false,
106        });
107    }
108
109    gpus
110}
111
112/// Get CPU information
113pub fn get_cpu_info() -> CpuInfo {
114    #[cfg(target_os = "windows")]
115    {
116        detect_windows_cpu()
117    }
118    #[cfg(target_os = "macos")]
119    {
120        detect_macos_cpu()
121    }
122    #[cfg(target_os = "linux")]
123    {
124        detect_linux_cpu()
125    }
126    #[cfg(not(any(target_os = "windows", target_os = "macos", target_os = "linux")))]
127    {
128        CpuInfo {
129            name: "Unknown CPU".to_string(),
130            vendor: "Unknown".to_string(),
131            physical_cores: num_cpus::get_physical(),
132            logical_cores: num_cpus::get(),
133            base_frequency: 2400,
134            max_frequency: 3600,
135            cache_sizes: HashMap::new(),
136            instruction_sets: Vec::new(),
137        }
138    }
139}
140
141/// Get memory information
142pub fn get_memory_info() -> MemoryInfo {
143    let (total, available) = crate::platform::get_memory_info();
144
145    MemoryInfo {
146        total,
147        available,
148        speed: detect_memory_speed(),
149        memory_type: detect_memory_type(),
150    }
151}
152
153/// Generate optimization recommendations based on hardware
154pub fn get_optimization_recommendations() -> OptimizationRecommendations {
155    let cpu_info = get_cpu_info();
156    let memory_info = get_memory_info();
157    let gpu_info = get_gpu_info();
158
159    let worker_threads = calculate_optimal_threads(&cpu_info);
160    let use_gpu = should_use_gpu(&gpu_info);
161    let memory_limit = calculate_memory_limit(&memory_info);
162    let batch_size = calculate_batch_size(&cpu_info, &memory_info);
163    let use_simd = supports_simd(&cpu_info);
164    let optimization_flags = generate_optimization_flags(&cpu_info, &gpu_info);
165
166    OptimizationRecommendations {
167        worker_threads,
168        use_gpu,
169        memory_limit,
170        batch_size,
171        use_simd,
172        optimization_flags,
173    }
174}
175
176/// Monitor hardware usage during synthesis
177pub fn monitor_hardware_usage() -> HardwareUsage {
178    HardwareUsage {
179        cpu_usage: get_cpu_usage(),
180        memory_usage: get_memory_usage(),
181        gpu_usage: get_gpu_usage(),
182        temperature: get_temperature_info(),
183    }
184}
185
186/// Hardware usage statistics
187#[derive(Debug, Clone)]
188pub struct HardwareUsage {
189    /// CPU usage percentage (0-100)
190    pub cpu_usage: f32,
191    /// Memory usage in bytes
192    pub memory_usage: u64,
193    /// GPU usage percentage (0-100)
194    pub gpu_usage: f32,
195    /// Temperature information
196    pub temperature: TemperatureInfo,
197}
198
199/// Temperature monitoring
200#[derive(Debug, Clone)]
201pub struct TemperatureInfo {
202    /// CPU temperature in Celsius
203    pub cpu_temp: f32,
204    /// GPU temperature in Celsius
205    pub gpu_temp: f32,
206    /// Thermal throttling status
207    pub thermal_throttling: bool,
208}
209
210// Platform-specific implementations
211
212#[cfg(target_os = "windows")]
213fn detect_windows_gpu() -> GpuInfo {
214    use std::process::Command;
215
216    // Query Win32_VideoController via PowerShell for GPU information
217    if let Ok(output) = Command::new("powershell")
218        .arg("-NoProfile")
219        .arg("-Command")
220        .arg("Get-WmiObject Win32_VideoController | Select-Object Name,AdapterCompatibility,AdapterRAM | ConvertTo-Json")
221        .output()
222    {
223        let output_str = String::from_utf8_lossy(&output.stdout);
224
225        // Simple JSON-like parsing (avoiding serde dependency)
226        // Format: {"Name":"...", "AdapterCompatibility":"...", "AdapterRAM":...}
227        let mut name = String::from("Windows GPU");
228        let mut vendor = String::from("Unknown");
229        let mut vram: u64 = 0;
230
231        for line in output_str.lines() {
232            let trimmed = line.trim();
233
234            if trimmed.contains("\"Name\"") {
235                if let Some(value) = extract_json_string_value(trimmed) {
236                    name = value;
237                }
238            } else if trimmed.contains("\"AdapterCompatibility\"") {
239                if let Some(value) = extract_json_string_value(trimmed) {
240                    vendor = value;
241                }
242            } else if trimmed.contains("\"AdapterRAM\"") {
243                if let Some(value) = extract_json_number_value(trimmed) {
244                    vram = value;
245                }
246            }
247        }
248
249        // Normalize vendor name
250        let vendor_normalized = if vendor.to_lowercase().contains("nvidia") {
251            "NVIDIA".to_string()
252        } else if vendor.to_lowercase().contains("amd") || vendor.to_lowercase().contains("ati") {
253            "AMD".to_string()
254        } else if vendor.to_lowercase().contains("intel") {
255            "Intel".to_string()
256        } else if vendor.to_lowercase().contains("microsoft") {
257            "Microsoft".to_string() // Software renderer
258        } else {
259            vendor
260        };
261
262        let cuda_support = vendor_normalized == "NVIDIA";
263        let opencl_support = vendor_normalized == "NVIDIA" || vendor_normalized == "AMD" || vendor_normalized == "Intel";
264        let vulkan_support = vendor_normalized == "NVIDIA" || vendor_normalized == "AMD" || vendor_normalized == "Intel";
265
266        return GpuInfo {
267            name,
268            vendor: vendor_normalized,
269            vram,
270            cuda_support,
271            opencl_support,
272            vulkan_support,
273        };
274    }
275
276    // Fallback: Try nvidia-smi if NVIDIA GPU is present
277    if let Ok(output) = Command::new("nvidia-smi")
278        .arg("--query-gpu=name,memory.total")
279        .arg("--format=csv,noheader,nounits")
280        .output()
281    {
282        let output_str = String::from_utf8_lossy(&output.stdout);
283        if let Some(line) = output_str.lines().next() {
284            let parts: Vec<&str> = line.split(',').collect();
285            let name = parts.get(0).unwrap_or(&"NVIDIA GPU").trim().to_string();
286            let vram = parts
287                .get(1)
288                .and_then(|s| s.trim().parse::<u64>().ok())
289                .unwrap_or(0)
290                * 1024
291                * 1024; // Convert MB to bytes
292
293            return GpuInfo {
294                name,
295                vendor: "NVIDIA".to_string(),
296                vram,
297                cuda_support: true,
298                opencl_support: true,
299                vulkan_support: true,
300            };
301        }
302    }
303
304    // Final fallback
305    GpuInfo {
306        name: "Unknown Windows GPU".to_string(),
307        vendor: "Unknown".to_string(),
308        vram: 0,
309        cuda_support: false,
310        opencl_support: false,
311        vulkan_support: false,
312    }
313}
314
315#[cfg(target_os = "windows")]
316fn extract_json_string_value(line: &str) -> Option<String> {
317    // Extract string value from JSON line like: "Name": "NVIDIA GeForce RTX 3080",
318    if let Some(colon_pos) = line.find(':') {
319        let value_part = &line[colon_pos + 1..];
320        // Find the quoted value
321        if let Some(first_quote) = value_part.find('"') {
322            if let Some(second_quote) = value_part[first_quote + 1..].find('"') {
323                let value = &value_part[first_quote + 1..first_quote + 1 + second_quote];
324                return Some(value.to_string());
325            }
326        }
327    }
328    None
329}
330
331#[cfg(target_os = "windows")]
332fn extract_json_number_value(line: &str) -> Option<u64> {
333    // Extract number value from JSON line like: "AdapterRAM": 12884901888
334    if let Some(colon_pos) = line.find(':') {
335        let value_part = &line[colon_pos + 1..].trim();
336        // Remove trailing comma if present
337        let value_str = value_part.trim_end_matches(',').trim();
338        if let Ok(value) = value_str.parse::<u64>() {
339            return Some(value);
340        }
341    }
342    None
343}
344
345#[cfg(target_os = "windows")]
346fn detect_windows_cpu() -> CpuInfo {
347    use std::process::Command;
348
349    let mut cpu_info = CpuInfo {
350        name: "Windows CPU".to_string(),
351        vendor: "Unknown".to_string(),
352        physical_cores: num_cpus::get_physical(),
353        logical_cores: num_cpus::get(),
354        base_frequency: 2400,
355        max_frequency: 3600,
356        cache_sizes: HashMap::new(),
357        instruction_sets: Vec::new(),
358    };
359
360    // Query Win32_Processor via PowerShell for CPU information
361    if let Ok(output) = Command::new("powershell")
362        .arg("-NoProfile")
363        .arg("-Command")
364        .arg("Get-WmiObject Win32_Processor | Select-Object Name,Manufacturer,MaxClockSpeed,CurrentClockSpeed | ConvertTo-Json")
365        .output()
366    {
367        let output_str = String::from_utf8_lossy(&output.stdout);
368
369        for line in output_str.lines() {
370            let trimmed = line.trim();
371
372            if trimmed.contains("\"Name\"") {
373                if let Some(value) = extract_json_string_value(trimmed) {
374                    cpu_info.name = value;
375                }
376            } else if trimmed.contains("\"Manufacturer\"") {
377                if let Some(value) = extract_json_string_value(trimmed) {
378                    cpu_info.vendor = value;
379                }
380            } else if trimmed.contains("\"MaxClockSpeed\"") {
381                if let Some(value) = extract_json_number_value(trimmed) {
382                    cpu_info.max_frequency = value as u32;
383                }
384            } else if trimmed.contains("\"CurrentClockSpeed\"") {
385                if let Some(value) = extract_json_number_value(trimmed) {
386                    cpu_info.base_frequency = value as u32;
387                }
388            }
389        }
390    }
391
392    // Try to detect instruction sets using Windows CPUID
393    // Query processor features via PowerShell
394    if let Ok(output) = Command::new("powershell")
395        .arg("-NoProfile")
396        .arg("-Command")
397        .arg("[System.Environment]::GetEnvironmentVariable('PROCESSOR_IDENTIFIER')")
398        .output()
399    {
400        let output_str = String::from_utf8_lossy(&output.stdout);
401        let processor_id = output_str.trim().to_lowercase();
402
403        // Infer common instruction sets based on processor info
404        let mut instruction_sets =
405            vec!["x86-64".to_string(), "SSE".to_string(), "SSE2".to_string()];
406
407        // Most modern Intel/AMD processors support these
408        if processor_id.contains("intel") || processor_id.contains("amd") {
409            instruction_sets.push("SSE3".to_string());
410            instruction_sets.push("SSSE3".to_string());
411            instruction_sets.push("SSE4.1".to_string());
412            instruction_sets.push("SSE4.2".to_string());
413            instruction_sets.push("AVX".to_string());
414
415            // Recent processors
416            if !processor_id.contains("pentium") && !processor_id.contains("celeron") {
417                instruction_sets.push("AVX2".to_string());
418                instruction_sets.push("FMA".to_string());
419            }
420
421            // Very recent high-end processors
422            if processor_id.contains("xeon") || processor_id.contains("core") {
423                instruction_sets.push("AVX-512".to_string());
424            }
425        }
426
427        cpu_info.instruction_sets = instruction_sets;
428    }
429
430    // Try to get cache information
431    if let Ok(output) = Command::new("powershell")
432        .arg("-NoProfile")
433        .arg("-Command")
434        .arg("Get-WmiObject Win32_CacheMemory | Select-Object InstalledSize,Level | ConvertTo-Json")
435        .output()
436    {
437        let output_str = String::from_utf8_lossy(&output.stdout);
438        let mut current_level = 0;
439
440        for line in output_str.lines() {
441            let trimmed = line.trim();
442
443            if trimmed.contains("\"Level\"") {
444                if let Some(value) = extract_json_number_value(trimmed) {
445                    current_level = value;
446                }
447            } else if trimmed.contains("\"InstalledSize\"") {
448                if let Some(value) = extract_json_number_value(trimmed) {
449                    if current_level > 0 {
450                        let cache_key = format!("L{}", current_level);
451                        cpu_info.cache_sizes.insert(cache_key, value * 1024); // Convert KB to bytes
452                    }
453                }
454            }
455        }
456    }
457
458    cpu_info
459}
460
461#[cfg(target_os = "macos")]
462fn detect_macos_gpu() -> GpuInfo {
463    // macOS GPU detection using system_profiler
464    use std::process::Command;
465
466    let output = Command::new("system_profiler")
467        .arg("SPDisplaysDataType")
468        .output()
469        .ok();
470
471    if let Some(output) = output {
472        let output_str = String::from_utf8_lossy(&output.stdout);
473
474        // Parse system_profiler output for GPU information
475        let mut name = String::from("macOS GPU");
476        let mut vendor = String::from("Unknown");
477        let mut vram: u64 = 0;
478
479        let lines: Vec<&str> = output_str.lines().collect();
480        let mut in_chipset_section = false;
481
482        for line in lines {
483            let trimmed = line.trim();
484
485            // Detect chipset/GPU section
486            if trimmed.starts_with("Chipset Model:") || trimmed.starts_with("Chip Type:") {
487                in_chipset_section = true;
488                if let Some(value) = trimmed.split(':').nth(1) {
489                    name = value.trim().to_string();
490                }
491            }
492
493            // Extract vendor from chipset name
494            if in_chipset_section && !name.is_empty() {
495                vendor = if name.contains("Apple")
496                    || name.contains("M1")
497                    || name.contains("M2")
498                    || name.contains("M3")
499                {
500                    "Apple".to_string()
501                } else if name.contains("AMD") || name.contains("Radeon") {
502                    "AMD".to_string()
503                } else if name.contains("NVIDIA") || name.contains("GeForce") {
504                    "NVIDIA".to_string()
505                } else if name.contains("Intel") {
506                    "Intel".to_string()
507                } else {
508                    "Unknown".to_string()
509                };
510            }
511
512            // Extract VRAM
513            if trimmed.starts_with("VRAM") || trimmed.starts_with("vRAM") {
514                if let Some(value_str) = trimmed.split(':').nth(1) {
515                    let value_str = value_str.trim();
516
517                    // Parse VRAM value (e.g., "8 GB", "4096 MB")
518                    if let Some(num_str) = value_str.split_whitespace().next() {
519                        if let Ok(num) = num_str.parse::<u64>() {
520                            if value_str.contains("GB") {
521                                vram = num * 1024 * 1024 * 1024;
522                            } else if value_str.contains("MB") {
523                                vram = num * 1024 * 1024;
524                            }
525                        }
526                    }
527                }
528            }
529
530            // Apple Silicon unified memory detection
531            if (name.contains("M1") || name.contains("M2") || name.contains("M3"))
532                && trimmed.starts_with("Metal:")
533            {
534                // For Apple Silicon, VRAM is shared with system memory
535                // Try to estimate from Metal Support section
536                if let Some(value_str) = trimmed.split(':').nth(1) {
537                    if value_str.contains("Supported") {
538                        // Apple Silicon typically has 8GB, 16GB, 24GB, 32GB, etc.
539                        // Use a conservative estimate if we can't find exact VRAM
540                        if vram == 0 {
541                            vram = 8 * 1024 * 1024 * 1024; // 8GB default for Apple Silicon
542                        }
543                    }
544                }
545            }
546        }
547
548        // Determine API support based on vendor
549        let cuda_support = vendor == "NVIDIA"; // CUDA only for NVIDIA
550        let opencl_support = vendor != "Unknown"; // Most GPUs support OpenCL on macOS
551        let vulkan_support = false; // macOS deprecated Vulkan in favor of Metal
552
553        GpuInfo {
554            name,
555            vendor,
556            vram,
557            cuda_support,
558            opencl_support,
559            vulkan_support,
560        }
561    } else {
562        GpuInfo {
563            name: "Unknown macOS GPU".to_string(),
564            vendor: "Unknown".to_string(),
565            vram: 0,
566            cuda_support: false,
567            opencl_support: false,
568            vulkan_support: false,
569        }
570    }
571}
572
573#[cfg(target_os = "macos")]
574fn detect_macos_cpu() -> CpuInfo {
575    use std::process::Command;
576
577    let mut cpu_info = CpuInfo {
578        name: "macOS CPU".to_string(),
579        vendor: "Unknown".to_string(),
580        physical_cores: num_cpus::get_physical(),
581        logical_cores: num_cpus::get(),
582        base_frequency: 2400,
583        max_frequency: 3600,
584        cache_sizes: HashMap::new(),
585        instruction_sets: Vec::new(),
586    };
587
588    // Get CPU name from sysctl
589    if let Ok(output) = Command::new("sysctl")
590        .arg("-n")
591        .arg("machdep.cpu.brand_string")
592        .output()
593    {
594        cpu_info.name = String::from_utf8_lossy(&output.stdout).trim().to_string();
595    }
596
597    // Get CPU vendor
598    if let Ok(output) = Command::new("sysctl")
599        .arg("-n")
600        .arg("machdep.cpu.vendor")
601        .output()
602    {
603        cpu_info.vendor = String::from_utf8_lossy(&output.stdout).trim().to_string();
604    }
605
606    cpu_info
607}
608
609#[cfg(target_os = "linux")]
610fn detect_linux_gpus() -> Vec<GpuInfo> {
611    let mut gpus = Vec::new();
612
613    // Try lspci first
614    if let Ok(output) = std::process::Command::new("lspci").arg("-nn").output() {
615        let output_str = String::from_utf8_lossy(&output.stdout);
616        for line in output_str.lines() {
617            if line.to_lowercase().contains("vga") || line.to_lowercase().contains("3d") {
618                gpus.push(parse_lspci_gpu_line(line));
619            }
620        }
621    }
622
623    // Try nvidia-smi for NVIDIA GPUs
624    if let Ok(output) = std::process::Command::new("nvidia-smi")
625        .arg("--query-gpu=name,memory.total")
626        .arg("--format=csv,noheader,nounits")
627        .output()
628    {
629        let output_str = String::from_utf8_lossy(&output.stdout);
630        for line in output_str.lines() {
631            if !line.trim().is_empty() {
632                gpus.push(parse_nvidia_smi_line(line));
633            }
634        }
635    }
636
637    if gpus.is_empty() {
638        gpus.push(GpuInfo {
639            name: "Linux GPU".to_string(),
640            vendor: "Unknown".to_string(),
641            vram: 0,
642            cuda_support: false,
643            opencl_support: false,
644            vulkan_support: false,
645        });
646    }
647
648    gpus
649}
650
651#[cfg(target_os = "linux")]
652fn detect_linux_cpu() -> CpuInfo {
653    use std::fs;
654
655    let mut cpu_info = CpuInfo {
656        name: "Linux CPU".to_string(),
657        vendor: "Unknown".to_string(),
658        physical_cores: num_cpus::get_physical(),
659        logical_cores: num_cpus::get(),
660        base_frequency: 2400,
661        max_frequency: 3600,
662        cache_sizes: HashMap::new(),
663        instruction_sets: Vec::new(),
664    };
665
666    // Parse /proc/cpuinfo
667    if let Ok(content) = fs::read_to_string("/proc/cpuinfo") {
668        for line in content.lines() {
669            if line.starts_with("model name") {
670                if let Some(name) = line.split(':').nth(1) {
671                    cpu_info.name = name.trim().to_string();
672                }
673            } else if line.starts_with("vendor_id") {
674                if let Some(vendor) = line.split(':').nth(1) {
675                    cpu_info.vendor = vendor.trim().to_string();
676                }
677            } else if line.starts_with("flags") {
678                if let Some(flags) = line.split(':').nth(1) {
679                    cpu_info.instruction_sets =
680                        flags.split_whitespace().map(|s| s.to_string()).collect();
681                }
682            }
683        }
684    }
685
686    cpu_info
687}
688
689#[cfg(target_os = "linux")]
690fn parse_lspci_gpu_line(line: &str) -> GpuInfo {
691    let name = line
692        .split(':')
693        .last()
694        .unwrap_or("Unknown GPU")
695        .trim()
696        .to_string();
697    let vendor = if line.to_lowercase().contains("nvidia") {
698        "NVIDIA"
699    } else if line.to_lowercase().contains("amd") || line.to_lowercase().contains("ati") {
700        "AMD"
701    } else if line.to_lowercase().contains("intel") {
702        "Intel"
703    } else {
704        "Unknown"
705    }
706    .to_string();
707
708    GpuInfo {
709        name,
710        vendor: vendor.clone(),
711        vram: 0, // lspci doesn't provide VRAM info
712        cuda_support: vendor == "NVIDIA",
713        opencl_support: true, // Most modern GPUs support OpenCL
714        vulkan_support: true, // Most modern GPUs support Vulkan
715    }
716}
717
718#[cfg(target_os = "linux")]
719fn parse_nvidia_smi_line(line: &str) -> GpuInfo {
720    let parts: Vec<&str> = line.split(',').collect();
721    let name = parts.get(0).unwrap_or(&"NVIDIA GPU").trim().to_string();
722    let vram = parts
723        .get(1)
724        .and_then(|s| s.trim().parse::<u64>().ok())
725        .unwrap_or(0)
726        * 1024
727        * 1024; // Convert MB to bytes
728
729    GpuInfo {
730        name,
731        vendor: "NVIDIA".to_string(),
732        vram,
733        cuda_support: true,
734        opencl_support: true,
735        vulkan_support: true,
736    }
737}
738
739// Helper functions
740
741fn detect_memory_speed() -> u32 {
742    // Platform-specific memory speed detection
743    #[cfg(target_os = "linux")]
744    {
745        use std::fs;
746        if let Ok(content) = fs::read_to_string("/proc/meminfo") {
747            // Try to parse memory speed from dmidecode or other sources
748            // This is a simplified implementation
749            return 3200; // DDR4-3200 as default
750        }
751    }
752
753    2400 // Default fallback
754}
755
756fn detect_memory_type() -> String {
757    // Platform-specific memory type detection
758    #[cfg(target_os = "linux")]
759    {
760        if let Ok(output) = std::process::Command::new("dmidecode")
761            .arg("-t")
762            .arg("memory")
763            .output()
764        {
765            let output_str = String::from_utf8_lossy(&output.stdout);
766            if output_str.contains("DDR5") {
767                return "DDR5".to_string();
768            } else if output_str.contains("DDR4") {
769                return "DDR4".to_string();
770            }
771        }
772    }
773
774    "DDR4".to_string() // Default fallback
775}
776
777fn calculate_optimal_threads(cpu_info: &CpuInfo) -> usize {
778    // Calculate optimal thread count based on CPU characteristics
779    let logical_cores = cpu_info.logical_cores;
780
781    // For synthesis workloads, typically use 75% of logical cores
782    // to leave headroom for OS and other processes
783    std::cmp::max(1, (logical_cores * 3) / 4)
784}
785
786fn should_use_gpu(gpu_info: &[GpuInfo]) -> bool {
787    // Determine if GPU acceleration should be recommended
788    gpu_info.iter().any(|gpu| {
789        gpu.cuda_support || gpu.opencl_support || gpu.vram > 2_000_000_000 // 2GB+
790    })
791}
792
793fn calculate_memory_limit(memory_info: &MemoryInfo) -> u64 {
794    // Calculate safe memory limit (typically 75% of available memory)
795    (memory_info.available * 3) / 4
796}
797
798fn calculate_batch_size(cpu_info: &CpuInfo, memory_info: &MemoryInfo) -> usize {
799    // Calculate optimal batch size based on CPU cores and available memory
800    let base_batch_size = cpu_info.logical_cores * 2;
801    let memory_factor = (memory_info.available / 1_000_000_000) as usize; // GB
802
803    std::cmp::min(base_batch_size * memory_factor, 64) // Cap at reasonable maximum
804}
805
806fn supports_simd(cpu_info: &CpuInfo) -> bool {
807    // Check if CPU supports SIMD instructions beneficial for audio processing
808    cpu_info.instruction_sets.iter().any(|inst| {
809        inst.to_lowercase().contains("avx")
810            || inst.to_lowercase().contains("sse")
811            || inst.to_lowercase().contains("neon") // ARM NEON
812    })
813}
814
815fn generate_optimization_flags(cpu_info: &CpuInfo, gpu_info: &[GpuInfo]) -> Vec<String> {
816    let mut flags = Vec::new();
817
818    // CPU-specific flags
819    if supports_simd(cpu_info) {
820        flags.push("enable-simd".to_string());
821    }
822
823    if cpu_info.logical_cores >= 8 {
824        flags.push("high-parallelism".to_string());
825    }
826
827    // GPU-specific flags
828    if should_use_gpu(gpu_info) {
829        flags.push("gpu-acceleration".to_string());
830
831        if gpu_info.iter().any(|gpu| gpu.cuda_support) {
832            flags.push("cuda-support".to_string());
833        }
834
835        if gpu_info.iter().any(|gpu| gpu.opencl_support) {
836            flags.push("opencl-support".to_string());
837        }
838    }
839
840    flags
841}
842
843fn get_cpu_usage() -> f32 {
844    // Platform-specific CPU usage monitoring
845    #[cfg(target_os = "linux")]
846    {
847        use std::fs;
848
849        // Read /proc/stat for CPU usage
850        if let Ok(stat) = fs::read_to_string("/proc/stat") {
851            if let Some(cpu_line) = stat.lines().next() {
852                let fields: Vec<&str> = cpu_line.split_whitespace().collect();
853                if fields.len() >= 8 {
854                    // Parse CPU times: user, nice, system, idle, iowait, irq, softirq
855                    let user: u64 = fields[1].parse().unwrap_or(0);
856                    let nice: u64 = fields[2].parse().unwrap_or(0);
857                    let system: u64 = fields[3].parse().unwrap_or(0);
858                    let idle: u64 = fields[4].parse().unwrap_or(0);
859                    let iowait: u64 = fields[5].parse().unwrap_or(0);
860
861                    let active = user + nice + system;
862                    let total = active + idle + iowait;
863
864                    if total > 0 {
865                        return (active as f32 / total as f32) * 100.0;
866                    }
867                }
868            }
869        }
870    }
871
872    #[cfg(target_os = "macos")]
873    {
874        use std::process::Command;
875
876        // Use top command to get CPU usage
877        if let Ok(output) = Command::new("top")
878            .arg("-l")
879            .arg("1")
880            .arg("-n")
881            .arg("0")
882            .output()
883        {
884            let output_str = String::from_utf8_lossy(&output.stdout);
885            for line in output_str.lines() {
886                if line.contains("CPU usage:") {
887                    // Parse line like: "CPU usage: 12.34% user, 5.67% sys, 81.99% idle"
888                    let parts: Vec<&str> = line.split(',').collect();
889                    if parts.len() >= 2 {
890                        let mut usage = 0.0f32;
891                        for part in parts {
892                            if part.contains("user") || part.contains("sys") {
893                                if let Some(percent_str) = part.split('%').next() {
894                                    if let Some(num_str) = percent_str.split_whitespace().last() {
895                                        if let Ok(val) = num_str.parse::<f32>() {
896                                            usage += val;
897                                        }
898                                    }
899                                }
900                            }
901                        }
902                        return usage;
903                    }
904                }
905            }
906        }
907    }
908
909    #[cfg(target_os = "windows")]
910    {
911        use std::process::Command;
912
913        // Use PowerShell to get CPU usage
914        if let Ok(output) = Command::new("powershell")
915            .arg("-NoProfile")
916            .arg("-Command")
917            .arg("(Get-Counter '\\Processor(_Total)\\% Processor Time').CounterSamples.CookedValue")
918            .output()
919        {
920            let output_str = String::from_utf8_lossy(&output.stdout);
921            if let Ok(usage) = output_str.trim().parse::<f32>() {
922                return usage;
923            }
924        }
925    }
926
927    0.0 // Fallback if all methods fail
928}
929
930fn get_memory_usage() -> u64 {
931    // Current memory usage
932    let (total, available) = crate::platform::get_memory_info();
933    total - available
934}
935
936fn get_gpu_usage() -> f32 {
937    // Platform-specific GPU usage monitoring
938    #[cfg(target_os = "linux")]
939    {
940        use std::process::Command;
941
942        // Try nvidia-smi for NVIDIA GPUs
943        if let Ok(output) = Command::new("nvidia-smi")
944            .arg("--query-gpu=utilization.gpu")
945            .arg("--format=csv,noheader,nounits")
946            .output()
947        {
948            let output_str = String::from_utf8_lossy(&output.stdout);
949            if let Ok(usage) = output_str.trim().parse::<f32>() {
950                return usage;
951            }
952        }
953
954        // Try radeontop for AMD GPUs (if installed)
955        if let Ok(output) = Command::new("radeontop")
956            .arg("-d")
957            .arg("-")
958            .arg("-l")
959            .arg("1")
960            .output()
961        {
962            let output_str = String::from_utf8_lossy(&output.stdout);
963            for line in output_str.lines() {
964                if line.contains("gpu") {
965                    // Parse radeontop output format
966                    if let Some(percent_part) = line.split_whitespace().find(|s| s.ends_with('%')) {
967                        if let Ok(usage) = percent_part.trim_end_matches('%').parse::<f32>() {
968                            return usage;
969                        }
970                    }
971                }
972            }
973        }
974    }
975
976    #[cfg(target_os = "macos")]
977    {
978        use std::process::Command;
979
980        // Use powermetrics for Apple Silicon GPU usage
981        if let Ok(output) = Command::new("powermetrics")
982            .arg("--samplers")
983            .arg("gpu_power")
984            .arg("-n")
985            .arg("1")
986            .arg("-i")
987            .arg("1000")
988            .output()
989        {
990            let output_str = String::from_utf8_lossy(&output.stdout);
991            for line in output_str.lines() {
992                if line.contains("GPU Active") || line.contains("GPU HW active") {
993                    // Parse percentage from powermetrics output
994                    if let Some(percent_str) = line.split(':').nth(1) {
995                        let percent_str = percent_str.trim();
996                        if let Some(num_str) = percent_str.split('%').next() {
997                            if let Ok(usage) = num_str.trim().parse::<f32>() {
998                                return usage;
999                            }
1000                        }
1001                    }
1002                }
1003            }
1004        }
1005
1006        // Fallback: Try Activity Monitor via ioreg for discrete GPUs
1007        if let Ok(output) = Command::new("ioreg")
1008            .arg("-r")
1009            .arg("-c")
1010            .arg("IOAccelerator")
1011            .output()
1012        {
1013            let output_str = String::from_utf8_lossy(&output.stdout);
1014            // This is a rough estimate based on GPU presence
1015            if output_str.contains("PerformanceStatistics") {
1016                return 15.0; // Conservative estimate if GPU is active
1017            }
1018        }
1019    }
1020
1021    #[cfg(target_os = "windows")]
1022    {
1023        use std::process::Command;
1024
1025        // Try nvidia-smi for NVIDIA GPUs on Windows
1026        if let Ok(output) = Command::new("nvidia-smi")
1027            .arg("--query-gpu=utilization.gpu")
1028            .arg("--format=csv,noheader,nounits")
1029            .output()
1030        {
1031            let output_str = String::from_utf8_lossy(&output.stdout);
1032            if let Ok(usage) = output_str.trim().parse::<f32>() {
1033                return usage;
1034            }
1035        }
1036
1037        // Use PowerShell to query GPU usage via WMI (works for Intel/AMD integrated)
1038        if let Ok(output) = Command::new("powershell")
1039            .arg("-NoProfile")
1040            .arg("-Command")
1041            .arg("(Get-Counter '\\GPU Engine(*engtype_3D)\\Utilization Percentage').CounterSamples | Measure-Object -Property CookedValue -Sum | Select-Object -ExpandProperty Sum")
1042            .output()
1043        {
1044            let output_str = String::from_utf8_lossy(&output.stdout);
1045            if let Ok(usage) = output_str.trim().parse::<f32>() {
1046                return usage.min(100.0); // Cap at 100%
1047            }
1048        }
1049    }
1050
1051    0.0 // Fallback if all methods fail
1052}
1053
1054fn get_temperature_info() -> TemperatureInfo {
1055    // Platform-specific temperature monitoring
1056    let mut cpu_temp = 45.0; // Default fallback
1057    let mut gpu_temp = 50.0; // Default fallback
1058    #[allow(unused_assignments)]
1059    // Initial value used as fallback for non-linux/macos/windows platforms
1060    let mut thermal_throttling = false;
1061
1062    #[cfg(target_os = "linux")]
1063    {
1064        use std::fs;
1065        use std::process::Command;
1066
1067        // Try to read CPU temperature from sensors
1068        if let Ok(output) = Command::new("sensors").arg("-u").output() {
1069            let output_str = String::from_utf8_lossy(&output.stdout);
1070            for line in output_str.lines() {
1071                if line.contains("temp1_input") || line.contains("Core 0") {
1072                    if let Some(temp_str) = line.split(':').nth(1) {
1073                        if let Ok(temp) = temp_str.trim().parse::<f32>() {
1074                            cpu_temp = temp;
1075                            break;
1076                        }
1077                    }
1078                }
1079            }
1080        }
1081
1082        // Fallback: Try reading from /sys/class/thermal
1083        if let Ok(entries) = fs::read_dir("/sys/class/thermal") {
1084            for entry in entries.flatten() {
1085                let path = entry.path();
1086                if path
1087                    .file_name()
1088                    .unwrap()
1089                    .to_str()
1090                    .unwrap_or("")
1091                    .starts_with("thermal_zone")
1092                {
1093                    let temp_path = path.join("temp");
1094                    if let Ok(temp_str) = fs::read_to_string(&temp_path) {
1095                        if let Ok(temp_millidegrees) = temp_str.trim().parse::<i32>() {
1096                            cpu_temp = temp_millidegrees as f32 / 1000.0;
1097                            break;
1098                        }
1099                    }
1100                }
1101            }
1102        }
1103
1104        // Try nvidia-smi for GPU temperature
1105        if let Ok(output) = Command::new("nvidia-smi")
1106            .arg("--query-gpu=temperature.gpu")
1107            .arg("--format=csv,noheader,nounits")
1108            .output()
1109        {
1110            let output_str = String::from_utf8_lossy(&output.stdout);
1111            if let Ok(temp) = output_str.trim().parse::<f32>() {
1112                gpu_temp = temp;
1113            }
1114        }
1115
1116        // Check for thermal throttling
1117        thermal_throttling = cpu_temp > 85.0 || gpu_temp > 80.0;
1118    }
1119
1120    #[cfg(target_os = "macos")]
1121    {
1122        use std::process::Command;
1123
1124        // Use powermetrics for temperature info (requires sudo on some systems)
1125        if let Ok(output) = Command::new("powermetrics")
1126            .arg("--samplers")
1127            .arg("thermal")
1128            .arg("-n")
1129            .arg("1")
1130            .arg("-i")
1131            .arg("1000")
1132            .output()
1133        {
1134            let output_str = String::from_utf8_lossy(&output.stdout);
1135            for line in output_str.lines() {
1136                if line.contains("CPU die temperature") || line.contains("CPU temp") {
1137                    if let Some(temp_str) = line.split(':').nth(1) {
1138                        let temp_str = temp_str.trim();
1139                        if let Some(num_str) = temp_str.split_whitespace().next() {
1140                            if let Ok(temp) = num_str.parse::<f32>() {
1141                                cpu_temp = temp;
1142                            }
1143                        }
1144                    }
1145                }
1146                if line.contains("GPU die temperature") || line.contains("GPU temp") {
1147                    if let Some(temp_str) = line.split(':').nth(1) {
1148                        let temp_str = temp_str.trim();
1149                        if let Some(num_str) = temp_str.split_whitespace().next() {
1150                            if let Ok(temp) = num_str.parse::<f32>() {
1151                                gpu_temp = temp;
1152                            }
1153                        }
1154                    }
1155                }
1156            }
1157        }
1158
1159        // Fallback: Try osx-cpu-temp if available
1160        if let Ok(output) = Command::new("osx-cpu-temp").output() {
1161            let output_str = String::from_utf8_lossy(&output.stdout);
1162            // Parse output like "61.2°C"
1163            if let Some(temp_str) = output_str.split('°').next() {
1164                if let Ok(temp) = temp_str.trim().parse::<f32>() {
1165                    cpu_temp = temp;
1166                }
1167            }
1168        }
1169
1170        // Check for thermal throttling (macOS typically throttles around 100°C)
1171        thermal_throttling = cpu_temp > 95.0 || gpu_temp > 90.0;
1172    }
1173
1174    #[cfg(target_os = "windows")]
1175    {
1176        use std::process::Command;
1177
1178        // Try nvidia-smi for GPU temperature
1179        if let Ok(output) = Command::new("nvidia-smi")
1180            .arg("--query-gpu=temperature.gpu")
1181            .arg("--format=csv,noheader,nounits")
1182            .output()
1183        {
1184            let output_str = String::from_utf8_lossy(&output.stdout);
1185            if let Ok(temp) = output_str.trim().parse::<f32>() {
1186                gpu_temp = temp;
1187            }
1188        }
1189
1190        // Use PowerShell/WMI for CPU temperature (limited on Windows without admin)
1191        if let Ok(output) = Command::new("powershell")
1192            .arg("-NoProfile")
1193            .arg("-Command")
1194            .arg("Get-WmiObject MSAcpi_ThermalZoneTemperature -Namespace root/wmi | Select-Object -ExpandProperty CurrentTemperature")
1195            .output()
1196        {
1197            let output_str = String::from_utf8_lossy(&output.stdout);
1198            if let Ok(temp_tenth_kelvin) = output_str.trim().parse::<f32>() {
1199                // Convert from tenths of Kelvin to Celsius
1200                cpu_temp = (temp_tenth_kelvin / 10.0) - 273.15;
1201            }
1202        }
1203
1204        // Check for thermal throttling
1205        thermal_throttling = cpu_temp > 90.0 || gpu_temp > 85.0;
1206    }
1207
1208    TemperatureInfo {
1209        cpu_temp,
1210        gpu_temp,
1211        thermal_throttling,
1212    }
1213}
1214
1215#[cfg(test)]
1216mod tests {
1217    use super::*;
1218
1219    #[test]
1220    fn test_gpu_detection() {
1221        let gpus = get_gpu_info();
1222        assert!(!gpus.is_empty());
1223        assert!(!gpus[0].name.is_empty());
1224    }
1225
1226    #[test]
1227    fn test_cpu_detection() {
1228        let cpu = get_cpu_info();
1229        assert!(!cpu.name.is_empty());
1230        assert!(cpu.logical_cores > 0);
1231        assert!(cpu.physical_cores > 0);
1232    }
1233
1234    #[test]
1235    fn test_memory_detection() {
1236        let memory = get_memory_info();
1237        assert!(memory.total > 0);
1238        assert!(memory.available <= memory.total);
1239    }
1240
1241    #[test]
1242    fn test_optimization_recommendations() {
1243        let recommendations = get_optimization_recommendations();
1244        assert!(recommendations.worker_threads > 0);
1245        assert!(recommendations.memory_limit > 0);
1246        assert!(recommendations.batch_size > 0);
1247    }
1248
1249    #[test]
1250    fn test_hardware_monitoring() {
1251        let usage = monitor_hardware_usage();
1252        assert!(usage.cpu_usage >= 0.0);
1253        assert!(usage.gpu_usage >= 0.0);
1254    }
1255}