use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct GpuInfo {
pub name: String,
pub vendor: String,
pub vram: u64,
pub cuda_support: bool,
pub opencl_support: bool,
pub vulkan_support: bool,
}
#[derive(Debug, Clone)]
pub struct CpuInfo {
pub name: String,
pub vendor: String,
pub physical_cores: usize,
pub logical_cores: usize,
pub base_frequency: u32,
pub max_frequency: u32,
pub cache_sizes: HashMap<String, u64>,
pub instruction_sets: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct MemoryInfo {
pub total: u64,
pub available: u64,
pub speed: u32,
pub memory_type: String,
}
#[derive(Debug, Clone)]
pub struct OptimizationRecommendations {
pub worker_threads: usize,
pub use_gpu: bool,
pub memory_limit: u64,
pub batch_size: usize,
pub use_simd: bool,
pub optimization_flags: Vec<String>,
}
pub fn get_gpu_info() -> Vec<GpuInfo> {
let mut gpus = Vec::new();
#[cfg(target_os = "windows")]
{
gpus.push(detect_windows_gpu());
}
#[cfg(target_os = "macos")]
{
gpus.push(detect_macos_gpu());
}
#[cfg(target_os = "linux")]
{
gpus.extend(detect_linux_gpus());
}
if gpus.is_empty() {
gpus.push(GpuInfo {
name: "Unknown GPU".to_string(),
vendor: "Unknown".to_string(),
vram: 0,
cuda_support: false,
opencl_support: false,
vulkan_support: false,
});
}
gpus
}
pub fn get_cpu_info() -> CpuInfo {
#[cfg(target_os = "windows")]
{
detect_windows_cpu()
}
#[cfg(target_os = "macos")]
{
detect_macos_cpu()
}
#[cfg(target_os = "linux")]
{
detect_linux_cpu()
}
#[cfg(not(any(target_os = "windows", target_os = "macos", target_os = "linux")))]
{
CpuInfo {
name: "Unknown CPU".to_string(),
vendor: "Unknown".to_string(),
physical_cores: num_cpus::get_physical(),
logical_cores: num_cpus::get(),
base_frequency: 2400,
max_frequency: 3600,
cache_sizes: HashMap::new(),
instruction_sets: Vec::new(),
}
}
}
pub fn get_memory_info() -> MemoryInfo {
let (total, available) = crate::platform::get_memory_info();
MemoryInfo {
total,
available,
speed: detect_memory_speed(),
memory_type: detect_memory_type(),
}
}
pub fn get_optimization_recommendations() -> OptimizationRecommendations {
let cpu_info = get_cpu_info();
let memory_info = get_memory_info();
let gpu_info = get_gpu_info();
let worker_threads = calculate_optimal_threads(&cpu_info);
let use_gpu = should_use_gpu(&gpu_info);
let memory_limit = calculate_memory_limit(&memory_info);
let batch_size = calculate_batch_size(&cpu_info, &memory_info);
let use_simd = supports_simd(&cpu_info);
let optimization_flags = generate_optimization_flags(&cpu_info, &gpu_info);
OptimizationRecommendations {
worker_threads,
use_gpu,
memory_limit,
batch_size,
use_simd,
optimization_flags,
}
}
pub fn monitor_hardware_usage() -> HardwareUsage {
HardwareUsage {
cpu_usage: get_cpu_usage(),
memory_usage: get_memory_usage(),
gpu_usage: get_gpu_usage(),
temperature: get_temperature_info(),
}
}
#[derive(Debug, Clone)]
pub struct HardwareUsage {
pub cpu_usage: f32,
pub memory_usage: u64,
pub gpu_usage: f32,
pub temperature: TemperatureInfo,
}
#[derive(Debug, Clone)]
pub struct TemperatureInfo {
pub cpu_temp: f32,
pub gpu_temp: f32,
pub thermal_throttling: bool,
}
#[cfg(target_os = "windows")]
fn detect_windows_gpu() -> GpuInfo {
use std::process::Command;
if let Ok(output) = Command::new("powershell")
.arg("-NoProfile")
.arg("-Command")
.arg("Get-WmiObject Win32_VideoController | Select-Object Name,AdapterCompatibility,AdapterRAM | ConvertTo-Json")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
let mut name = String::from("Windows GPU");
let mut vendor = String::from("Unknown");
let mut vram: u64 = 0;
for line in output_str.lines() {
let trimmed = line.trim();
if trimmed.contains("\"Name\"") {
if let Some(value) = extract_json_string_value(trimmed) {
name = value;
}
} else if trimmed.contains("\"AdapterCompatibility\"") {
if let Some(value) = extract_json_string_value(trimmed) {
vendor = value;
}
} else if trimmed.contains("\"AdapterRAM\"") {
if let Some(value) = extract_json_number_value(trimmed) {
vram = value;
}
}
}
let vendor_normalized = if vendor.to_lowercase().contains("nvidia") {
"NVIDIA".to_string()
} else if vendor.to_lowercase().contains("amd") || vendor.to_lowercase().contains("ati") {
"AMD".to_string()
} else if vendor.to_lowercase().contains("intel") {
"Intel".to_string()
} else if vendor.to_lowercase().contains("microsoft") {
"Microsoft".to_string() } else {
vendor
};
let cuda_support = vendor_normalized == "NVIDIA";
let opencl_support = vendor_normalized == "NVIDIA" || vendor_normalized == "AMD" || vendor_normalized == "Intel";
let vulkan_support = vendor_normalized == "NVIDIA" || vendor_normalized == "AMD" || vendor_normalized == "Intel";
return GpuInfo {
name,
vendor: vendor_normalized,
vram,
cuda_support,
opencl_support,
vulkan_support,
};
}
if let Ok(output) = Command::new("nvidia-smi")
.arg("--query-gpu=name,memory.total")
.arg("--format=csv,noheader,nounits")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Some(line) = output_str.lines().next() {
let parts: Vec<&str> = line.split(',').collect();
let name = parts.get(0).unwrap_or(&"NVIDIA GPU").trim().to_string();
let vram = parts
.get(1)
.and_then(|s| s.trim().parse::<u64>().ok())
.unwrap_or(0)
* 1024
* 1024;
return GpuInfo {
name,
vendor: "NVIDIA".to_string(),
vram,
cuda_support: true,
opencl_support: true,
vulkan_support: true,
};
}
}
GpuInfo {
name: "Unknown Windows GPU".to_string(),
vendor: "Unknown".to_string(),
vram: 0,
cuda_support: false,
opencl_support: false,
vulkan_support: false,
}
}
#[cfg(target_os = "windows")]
fn extract_json_string_value(line: &str) -> Option<String> {
if let Some(colon_pos) = line.find(':') {
let value_part = &line[colon_pos + 1..];
if let Some(first_quote) = value_part.find('"') {
if let Some(second_quote) = value_part[first_quote + 1..].find('"') {
let value = &value_part[first_quote + 1..first_quote + 1 + second_quote];
return Some(value.to_string());
}
}
}
None
}
#[cfg(target_os = "windows")]
fn extract_json_number_value(line: &str) -> Option<u64> {
if let Some(colon_pos) = line.find(':') {
let value_part = &line[colon_pos + 1..].trim();
let value_str = value_part.trim_end_matches(',').trim();
if let Ok(value) = value_str.parse::<u64>() {
return Some(value);
}
}
None
}
#[cfg(target_os = "windows")]
fn detect_windows_cpu() -> CpuInfo {
use std::process::Command;
let mut cpu_info = CpuInfo {
name: "Windows CPU".to_string(),
vendor: "Unknown".to_string(),
physical_cores: num_cpus::get_physical(),
logical_cores: num_cpus::get(),
base_frequency: 2400,
max_frequency: 3600,
cache_sizes: HashMap::new(),
instruction_sets: Vec::new(),
};
if let Ok(output) = Command::new("powershell")
.arg("-NoProfile")
.arg("-Command")
.arg("Get-WmiObject Win32_Processor | Select-Object Name,Manufacturer,MaxClockSpeed,CurrentClockSpeed | ConvertTo-Json")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
let trimmed = line.trim();
if trimmed.contains("\"Name\"") {
if let Some(value) = extract_json_string_value(trimmed) {
cpu_info.name = value;
}
} else if trimmed.contains("\"Manufacturer\"") {
if let Some(value) = extract_json_string_value(trimmed) {
cpu_info.vendor = value;
}
} else if trimmed.contains("\"MaxClockSpeed\"") {
if let Some(value) = extract_json_number_value(trimmed) {
cpu_info.max_frequency = value as u32;
}
} else if trimmed.contains("\"CurrentClockSpeed\"") {
if let Some(value) = extract_json_number_value(trimmed) {
cpu_info.base_frequency = value as u32;
}
}
}
}
if let Ok(output) = Command::new("powershell")
.arg("-NoProfile")
.arg("-Command")
.arg("[System.Environment]::GetEnvironmentVariable('PROCESSOR_IDENTIFIER')")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
let processor_id = output_str.trim().to_lowercase();
let mut instruction_sets =
vec!["x86-64".to_string(), "SSE".to_string(), "SSE2".to_string()];
if processor_id.contains("intel") || processor_id.contains("amd") {
instruction_sets.push("SSE3".to_string());
instruction_sets.push("SSSE3".to_string());
instruction_sets.push("SSE4.1".to_string());
instruction_sets.push("SSE4.2".to_string());
instruction_sets.push("AVX".to_string());
if !processor_id.contains("pentium") && !processor_id.contains("celeron") {
instruction_sets.push("AVX2".to_string());
instruction_sets.push("FMA".to_string());
}
if processor_id.contains("xeon") || processor_id.contains("core") {
instruction_sets.push("AVX-512".to_string());
}
}
cpu_info.instruction_sets = instruction_sets;
}
if let Ok(output) = Command::new("powershell")
.arg("-NoProfile")
.arg("-Command")
.arg("Get-WmiObject Win32_CacheMemory | Select-Object InstalledSize,Level | ConvertTo-Json")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
let mut current_level = 0;
for line in output_str.lines() {
let trimmed = line.trim();
if trimmed.contains("\"Level\"") {
if let Some(value) = extract_json_number_value(trimmed) {
current_level = value;
}
} else if trimmed.contains("\"InstalledSize\"") {
if let Some(value) = extract_json_number_value(trimmed) {
if current_level > 0 {
let cache_key = format!("L{}", current_level);
cpu_info.cache_sizes.insert(cache_key, value * 1024); }
}
}
}
}
cpu_info
}
#[cfg(target_os = "macos")]
fn detect_macos_gpu() -> GpuInfo {
use std::process::Command;
let output = Command::new("system_profiler")
.arg("SPDisplaysDataType")
.output()
.ok();
if let Some(output) = output {
let output_str = String::from_utf8_lossy(&output.stdout);
let mut name = String::from("macOS GPU");
let mut vendor = String::from("Unknown");
let mut vram: u64 = 0;
let lines: Vec<&str> = output_str.lines().collect();
let mut in_chipset_section = false;
for line in lines {
let trimmed = line.trim();
if trimmed.starts_with("Chipset Model:") || trimmed.starts_with("Chip Type:") {
in_chipset_section = true;
if let Some(value) = trimmed.split(':').nth(1) {
name = value.trim().to_string();
}
}
if in_chipset_section && !name.is_empty() {
vendor = if name.contains("Apple")
|| name.contains("M1")
|| name.contains("M2")
|| name.contains("M3")
{
"Apple".to_string()
} else if name.contains("AMD") || name.contains("Radeon") {
"AMD".to_string()
} else if name.contains("NVIDIA") || name.contains("GeForce") {
"NVIDIA".to_string()
} else if name.contains("Intel") {
"Intel".to_string()
} else {
"Unknown".to_string()
};
}
if trimmed.starts_with("VRAM") || trimmed.starts_with("vRAM") {
if let Some(value_str) = trimmed.split(':').nth(1) {
let value_str = value_str.trim();
if let Some(num_str) = value_str.split_whitespace().next() {
if let Ok(num) = num_str.parse::<u64>() {
if value_str.contains("GB") {
vram = num * 1024 * 1024 * 1024;
} else if value_str.contains("MB") {
vram = num * 1024 * 1024;
}
}
}
}
}
if (name.contains("M1") || name.contains("M2") || name.contains("M3"))
&& trimmed.starts_with("Metal:")
{
if let Some(value_str) = trimmed.split(':').nth(1) {
if value_str.contains("Supported") {
if vram == 0 {
vram = 8 * 1024 * 1024 * 1024; }
}
}
}
}
let cuda_support = vendor == "NVIDIA"; let opencl_support = vendor != "Unknown"; let vulkan_support = false;
GpuInfo {
name,
vendor,
vram,
cuda_support,
opencl_support,
vulkan_support,
}
} else {
GpuInfo {
name: "Unknown macOS GPU".to_string(),
vendor: "Unknown".to_string(),
vram: 0,
cuda_support: false,
opencl_support: false,
vulkan_support: false,
}
}
}
#[cfg(target_os = "macos")]
fn detect_macos_cpu() -> CpuInfo {
use std::process::Command;
let mut cpu_info = CpuInfo {
name: "macOS CPU".to_string(),
vendor: "Unknown".to_string(),
physical_cores: num_cpus::get_physical(),
logical_cores: num_cpus::get(),
base_frequency: 2400,
max_frequency: 3600,
cache_sizes: HashMap::new(),
instruction_sets: Vec::new(),
};
if let Ok(output) = Command::new("sysctl")
.arg("-n")
.arg("machdep.cpu.brand_string")
.output()
{
cpu_info.name = String::from_utf8_lossy(&output.stdout).trim().to_string();
}
if let Ok(output) = Command::new("sysctl")
.arg("-n")
.arg("machdep.cpu.vendor")
.output()
{
cpu_info.vendor = String::from_utf8_lossy(&output.stdout).trim().to_string();
}
cpu_info
}
#[cfg(target_os = "linux")]
fn detect_linux_gpus() -> Vec<GpuInfo> {
let mut gpus = Vec::new();
if let Ok(output) = std::process::Command::new("lspci").arg("-nn").output() {
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.to_lowercase().contains("vga") || line.to_lowercase().contains("3d") {
gpus.push(parse_lspci_gpu_line(line));
}
}
}
if let Ok(output) = std::process::Command::new("nvidia-smi")
.arg("--query-gpu=name,memory.total")
.arg("--format=csv,noheader,nounits")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if !line.trim().is_empty() {
gpus.push(parse_nvidia_smi_line(line));
}
}
}
if gpus.is_empty() {
gpus.push(GpuInfo {
name: "Linux GPU".to_string(),
vendor: "Unknown".to_string(),
vram: 0,
cuda_support: false,
opencl_support: false,
vulkan_support: false,
});
}
gpus
}
#[cfg(target_os = "linux")]
fn detect_linux_cpu() -> CpuInfo {
use std::fs;
let mut cpu_info = CpuInfo {
name: "Linux CPU".to_string(),
vendor: "Unknown".to_string(),
physical_cores: num_cpus::get_physical(),
logical_cores: num_cpus::get(),
base_frequency: 2400,
max_frequency: 3600,
cache_sizes: HashMap::new(),
instruction_sets: Vec::new(),
};
if let Ok(content) = fs::read_to_string("/proc/cpuinfo") {
for line in content.lines() {
if line.starts_with("model name") {
if let Some(name) = line.split(':').nth(1) {
cpu_info.name = name.trim().to_string();
}
} else if line.starts_with("vendor_id") {
if let Some(vendor) = line.split(':').nth(1) {
cpu_info.vendor = vendor.trim().to_string();
}
} else if line.starts_with("flags") {
if let Some(flags) = line.split(':').nth(1) {
cpu_info.instruction_sets =
flags.split_whitespace().map(|s| s.to_string()).collect();
}
}
}
}
cpu_info
}
#[cfg(target_os = "linux")]
fn parse_lspci_gpu_line(line: &str) -> GpuInfo {
let name = line
.split(':')
.last()
.unwrap_or("Unknown GPU")
.trim()
.to_string();
let vendor = if line.to_lowercase().contains("nvidia") {
"NVIDIA"
} else if line.to_lowercase().contains("amd") || line.to_lowercase().contains("ati") {
"AMD"
} else if line.to_lowercase().contains("intel") {
"Intel"
} else {
"Unknown"
}
.to_string();
GpuInfo {
name,
vendor: vendor.clone(),
vram: 0, cuda_support: vendor == "NVIDIA",
opencl_support: true, vulkan_support: true, }
}
#[cfg(target_os = "linux")]
fn parse_nvidia_smi_line(line: &str) -> GpuInfo {
let parts: Vec<&str> = line.split(',').collect();
let name = parts.get(0).unwrap_or(&"NVIDIA GPU").trim().to_string();
let vram = parts
.get(1)
.and_then(|s| s.trim().parse::<u64>().ok())
.unwrap_or(0)
* 1024
* 1024;
GpuInfo {
name,
vendor: "NVIDIA".to_string(),
vram,
cuda_support: true,
opencl_support: true,
vulkan_support: true,
}
}
fn detect_memory_speed() -> u32 {
#[cfg(target_os = "linux")]
{
use std::fs;
if let Ok(content) = fs::read_to_string("/proc/meminfo") {
return 3200; }
}
2400 }
fn detect_memory_type() -> String {
#[cfg(target_os = "linux")]
{
if let Ok(output) = std::process::Command::new("dmidecode")
.arg("-t")
.arg("memory")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if output_str.contains("DDR5") {
return "DDR5".to_string();
} else if output_str.contains("DDR4") {
return "DDR4".to_string();
}
}
}
"DDR4".to_string() }
fn calculate_optimal_threads(cpu_info: &CpuInfo) -> usize {
let logical_cores = cpu_info.logical_cores;
std::cmp::max(1, (logical_cores * 3) / 4)
}
fn should_use_gpu(gpu_info: &[GpuInfo]) -> bool {
gpu_info.iter().any(|gpu| {
gpu.cuda_support || gpu.opencl_support || gpu.vram > 2_000_000_000 })
}
fn calculate_memory_limit(memory_info: &MemoryInfo) -> u64 {
(memory_info.available * 3) / 4
}
fn calculate_batch_size(cpu_info: &CpuInfo, memory_info: &MemoryInfo) -> usize {
let base_batch_size = cpu_info.logical_cores * 2;
let memory_factor = (memory_info.available / 1_000_000_000) as usize;
std::cmp::min(base_batch_size * memory_factor, 64) }
fn supports_simd(cpu_info: &CpuInfo) -> bool {
cpu_info.instruction_sets.iter().any(|inst| {
inst.to_lowercase().contains("avx")
|| inst.to_lowercase().contains("sse")
|| inst.to_lowercase().contains("neon") })
}
fn generate_optimization_flags(cpu_info: &CpuInfo, gpu_info: &[GpuInfo]) -> Vec<String> {
let mut flags = Vec::new();
if supports_simd(cpu_info) {
flags.push("enable-simd".to_string());
}
if cpu_info.logical_cores >= 8 {
flags.push("high-parallelism".to_string());
}
if should_use_gpu(gpu_info) {
flags.push("gpu-acceleration".to_string());
if gpu_info.iter().any(|gpu| gpu.cuda_support) {
flags.push("cuda-support".to_string());
}
if gpu_info.iter().any(|gpu| gpu.opencl_support) {
flags.push("opencl-support".to_string());
}
}
flags
}
fn get_cpu_usage() -> f32 {
#[cfg(target_os = "linux")]
{
use std::fs;
if let Ok(stat) = fs::read_to_string("/proc/stat") {
if let Some(cpu_line) = stat.lines().next() {
let fields: Vec<&str> = cpu_line.split_whitespace().collect();
if fields.len() >= 8 {
let user: u64 = fields[1].parse().unwrap_or(0);
let nice: u64 = fields[2].parse().unwrap_or(0);
let system: u64 = fields[3].parse().unwrap_or(0);
let idle: u64 = fields[4].parse().unwrap_or(0);
let iowait: u64 = fields[5].parse().unwrap_or(0);
let active = user + nice + system;
let total = active + idle + iowait;
if total > 0 {
return (active as f32 / total as f32) * 100.0;
}
}
}
}
}
#[cfg(target_os = "macos")]
{
use std::process::Command;
if let Ok(output) = Command::new("top")
.arg("-l")
.arg("1")
.arg("-n")
.arg("0")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.contains("CPU usage:") {
let parts: Vec<&str> = line.split(',').collect();
if parts.len() >= 2 {
let mut usage = 0.0f32;
for part in parts {
if part.contains("user") || part.contains("sys") {
if let Some(percent_str) = part.split('%').next() {
if let Some(num_str) = percent_str.split_whitespace().last() {
if let Ok(val) = num_str.parse::<f32>() {
usage += val;
}
}
}
}
}
return usage;
}
}
}
}
}
#[cfg(target_os = "windows")]
{
use std::process::Command;
if let Ok(output) = Command::new("powershell")
.arg("-NoProfile")
.arg("-Command")
.arg("(Get-Counter '\\Processor(_Total)\\% Processor Time').CounterSamples.CookedValue")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Ok(usage) = output_str.trim().parse::<f32>() {
return usage;
}
}
}
0.0 }
fn get_memory_usage() -> u64 {
let (total, available) = crate::platform::get_memory_info();
total - available
}
fn get_gpu_usage() -> f32 {
#[cfg(target_os = "linux")]
{
use std::process::Command;
if let Ok(output) = Command::new("nvidia-smi")
.arg("--query-gpu=utilization.gpu")
.arg("--format=csv,noheader,nounits")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Ok(usage) = output_str.trim().parse::<f32>() {
return usage;
}
}
if let Ok(output) = Command::new("radeontop")
.arg("-d")
.arg("-")
.arg("-l")
.arg("1")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.contains("gpu") {
if let Some(percent_part) = line.split_whitespace().find(|s| s.ends_with('%')) {
if let Ok(usage) = percent_part.trim_end_matches('%').parse::<f32>() {
return usage;
}
}
}
}
}
}
#[cfg(target_os = "macos")]
{
use std::process::Command;
if let Ok(output) = Command::new("powermetrics")
.arg("--samplers")
.arg("gpu_power")
.arg("-n")
.arg("1")
.arg("-i")
.arg("1000")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.contains("GPU Active") || line.contains("GPU HW active") {
if let Some(percent_str) = line.split(':').nth(1) {
let percent_str = percent_str.trim();
if let Some(num_str) = percent_str.split('%').next() {
if let Ok(usage) = num_str.trim().parse::<f32>() {
return usage;
}
}
}
}
}
}
if let Ok(output) = Command::new("ioreg")
.arg("-r")
.arg("-c")
.arg("IOAccelerator")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if output_str.contains("PerformanceStatistics") {
return 15.0; }
}
}
#[cfg(target_os = "windows")]
{
use std::process::Command;
if let Ok(output) = Command::new("nvidia-smi")
.arg("--query-gpu=utilization.gpu")
.arg("--format=csv,noheader,nounits")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Ok(usage) = output_str.trim().parse::<f32>() {
return usage;
}
}
if let Ok(output) = Command::new("powershell")
.arg("-NoProfile")
.arg("-Command")
.arg("(Get-Counter '\\GPU Engine(*engtype_3D)\\Utilization Percentage').CounterSamples | Measure-Object -Property CookedValue -Sum | Select-Object -ExpandProperty Sum")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Ok(usage) = output_str.trim().parse::<f32>() {
return usage.min(100.0); }
}
}
0.0 }
fn get_temperature_info() -> TemperatureInfo {
let mut cpu_temp = 45.0; let mut gpu_temp = 50.0; #[allow(unused_assignments)]
let mut thermal_throttling = false;
#[cfg(target_os = "linux")]
{
use std::fs;
use std::process::Command;
if let Ok(output) = Command::new("sensors").arg("-u").output() {
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.contains("temp1_input") || line.contains("Core 0") {
if let Some(temp_str) = line.split(':').nth(1) {
if let Ok(temp) = temp_str.trim().parse::<f32>() {
cpu_temp = temp;
break;
}
}
}
}
}
if let Ok(entries) = fs::read_dir("/sys/class/thermal") {
for entry in entries.flatten() {
let path = entry.path();
if path
.file_name()
.unwrap_or_default()
.to_str()
.unwrap_or("")
.starts_with("thermal_zone")
{
let temp_path = path.join("temp");
if let Ok(temp_str) = fs::read_to_string(&temp_path) {
if let Ok(temp_millidegrees) = temp_str.trim().parse::<i32>() {
cpu_temp = temp_millidegrees as f32 / 1000.0;
break;
}
}
}
}
}
if let Ok(output) = Command::new("nvidia-smi")
.arg("--query-gpu=temperature.gpu")
.arg("--format=csv,noheader,nounits")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Ok(temp) = output_str.trim().parse::<f32>() {
gpu_temp = temp;
}
}
thermal_throttling = cpu_temp > 85.0 || gpu_temp > 80.0;
}
#[cfg(target_os = "macos")]
{
use std::process::Command;
if let Ok(output) = Command::new("powermetrics")
.arg("--samplers")
.arg("thermal")
.arg("-n")
.arg("1")
.arg("-i")
.arg("1000")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if line.contains("CPU die temperature") || line.contains("CPU temp") {
if let Some(temp_str) = line.split(':').nth(1) {
let temp_str = temp_str.trim();
if let Some(num_str) = temp_str.split_whitespace().next() {
if let Ok(temp) = num_str.parse::<f32>() {
cpu_temp = temp;
}
}
}
}
if line.contains("GPU die temperature") || line.contains("GPU temp") {
if let Some(temp_str) = line.split(':').nth(1) {
let temp_str = temp_str.trim();
if let Some(num_str) = temp_str.split_whitespace().next() {
if let Ok(temp) = num_str.parse::<f32>() {
gpu_temp = temp;
}
}
}
}
}
}
if let Ok(output) = Command::new("osx-cpu-temp").output() {
let output_str = String::from_utf8_lossy(&output.stdout);
if let Some(temp_str) = output_str.split('°').next() {
if let Ok(temp) = temp_str.trim().parse::<f32>() {
cpu_temp = temp;
}
}
}
thermal_throttling = cpu_temp > 95.0 || gpu_temp > 90.0;
}
#[cfg(target_os = "windows")]
{
use std::process::Command;
if let Ok(output) = Command::new("nvidia-smi")
.arg("--query-gpu=temperature.gpu")
.arg("--format=csv,noheader,nounits")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Ok(temp) = output_str.trim().parse::<f32>() {
gpu_temp = temp;
}
}
if let Ok(output) = Command::new("powershell")
.arg("-NoProfile")
.arg("-Command")
.arg("Get-WmiObject MSAcpi_ThermalZoneTemperature -Namespace root/wmi | Select-Object -ExpandProperty CurrentTemperature")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
if let Ok(temp_tenth_kelvin) = output_str.trim().parse::<f32>() {
cpu_temp = (temp_tenth_kelvin / 10.0) - 273.15;
}
}
thermal_throttling = cpu_temp > 90.0 || gpu_temp > 85.0;
}
TemperatureInfo {
cpu_temp,
gpu_temp,
thermal_throttling,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_gpu_detection() {
let gpus = get_gpu_info();
assert!(!gpus.is_empty());
assert!(!gpus[0].name.is_empty());
}
#[test]
fn test_cpu_detection() {
let cpu = get_cpu_info();
assert!(!cpu.name.is_empty());
assert!(cpu.logical_cores > 0);
assert!(cpu.physical_cores > 0);
}
#[test]
fn test_memory_detection() {
let memory = get_memory_info();
assert!(memory.total > 0);
assert!(memory.available <= memory.total);
}
#[test]
fn test_optimization_recommendations() {
let recommendations = get_optimization_recommendations();
assert!(recommendations.worker_threads > 0);
assert!(recommendations.memory_limit > 0);
assert!(recommendations.batch_size > 0);
}
#[test]
fn test_hardware_monitoring() {
let usage = monitor_hardware_usage();
assert!(usage.cpu_usage >= 0.0);
assert!(usage.gpu_usage >= 0.0);
}
}