pub mod auto_tuning;
pub mod cpu;
pub mod gpu;
pub mod memory;
pub mod network;
pub mod optimization;
pub mod storage;
use crate::error::{CoreError, CoreResult};
use std::time::Duration;
#[derive(Debug, Clone)]
pub struct SystemResources {
pub cpu: cpu::CpuInfo,
pub memory: memory::MemoryInfo,
pub gpu: Option<gpu::GpuInfo>,
pub network: network::NetworkInfo,
pub storage: storage::StorageInfo,
pub optimization_params: optimization::OptimizationParams,
}
impl SystemResources {
pub fn discover() -> CoreResult<Self> {
let cpu = cpu::CpuInfo::detect()?;
let memory = memory::MemoryInfo::detect()?;
let gpu = gpu::GpuInfo::detect().ok();
let network = network::NetworkInfo::detect()?;
let storage = storage::StorageInfo::detect()?;
let optimization_params = optimization::OptimizationParams::generate(
&cpu,
&memory,
gpu.as_ref(),
&network,
&storage,
)?;
Ok(Self {
cpu,
memory,
gpu,
network,
storage,
optimization_params,
})
}
pub fn recommended_thread_count(&self) -> usize {
self.optimization_params.thread_count
}
pub fn recommended_chunk_size(&self) -> usize {
self.optimization_params.chunk_size
}
pub fn supports_simd(&self) -> bool {
self.cpu.simd_capabilities.avx2 || self.cpu.simd_capabilities.sse4_2
}
pub fn supports_gpu(&self) -> bool {
self.gpu.is_some()
}
pub fn total_memory(&self) -> usize {
self.memory.total_memory
}
pub fn available_memory(&self) -> usize {
self.memory.available_memory
}
pub fn performance_tier(&self) -> PerformanceTier {
let cpu_score = self.cpu.performance_score();
let memory_score = self.memory.performance_score();
let gpu_score = self
.gpu
.as_ref()
.map(|g| g.performance_score())
.unwrap_or(0.0);
let combined_score = (cpu_score + memory_score + gpu_score) / 3.0;
if combined_score >= 0.8 {
PerformanceTier::High
} else if combined_score >= 0.5 {
PerformanceTier::Medium
} else {
PerformanceTier::Low
}
}
pub fn summary_report(&self) -> String {
let mut report = String::new();
report.push_str("# System Resource Summary\n\n");
report.push_str("## CPU\n");
report.push_str(&format!("- Model: {}\n", self.cpu.model));
report.push_str(&format!(
"- Cores: {} physical, {} logical\n",
self.cpu.physical_cores, self.cpu.logical_cores
));
report.push_str(&format!(
"- Base frequency: {:.2} GHz\n",
self.cpu.base_frequency_ghz
));
report.push_str(&format!(
"- Cache L1: {} KB, L2: {} KB, L3: {} KB\n",
self.cpu.cache_l1_kb, self.cpu.cache_l2_kb, self.cpu.cache_l3_kb
));
report.push_str("- SIMD support:");
if self.cpu.simd_capabilities.sse4_2 {
report.push_str(" SSE4.2");
}
if self.cpu.simd_capabilities.avx2 {
report.push_str(" AVX2");
}
if self.cpu.simd_capabilities.avx512 {
report.push_str(" AVX512");
}
if self.cpu.simd_capabilities.neon {
report.push_str(" NEON");
}
report.push('\n');
report.push_str("\n## Memory\n");
report.push_str(&format!(
"- Total: {:.2} GB\n",
self.memory.total_memory as f64 / (1024.0 * 1024.0 * 1024.0)
));
report.push_str(&format!(
"- Available: {:.2} GB\n",
self.memory.available_memory as f64 / (1024.0 * 1024.0 * 1024.0)
));
report.push_str(&format!(
"- Page size: {} KB\n",
self.memory.page_size / 1024
));
if let Some(ref gpu) = self.gpu {
report.push_str("\n## GPU\n");
report.push_str(&format!("- Model: {}\n", gpu.name));
report.push_str(&format!(
"- Memory: {:.2} GB\n",
gpu.memory_total as f64 / (1024.0 * 1024.0 * 1024.0)
));
report.push_str(&format!("- Compute units: {}\n", gpu.compute_units));
}
report.push_str("\n## Optimization Recommendations\n");
report.push_str(&format!(
"- Thread count: {}\n",
self.optimization_params.thread_count
));
report.push_str(&format!(
"- Chunk size: {} KB\n",
self.optimization_params.chunk_size / 1024
));
report.push_str(&format!(
"- SIMD enabled: {}\n",
self.optimization_params.enable_simd
));
report.push_str(&format!(
"- GPU enabled: {}\n",
self.optimization_params.enable_gpu
));
report.push_str(&format!(
"\n## Performance Tier: {:?}\n",
self.performance_tier()
));
report
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PerformanceTier {
High,
Medium,
Low,
}
#[derive(Debug, Clone)]
pub struct DiscoveryConfig {
pub detect_cpu: bool,
pub detect_memory: bool,
pub detect_gpu: bool,
pub detect_network: bool,
pub detectstorage: bool,
pub cache_results: bool,
pub cache_duration: Duration,
pub detailed_detection: bool,
}
impl Default for DiscoveryConfig {
fn default() -> Self {
Self {
detect_cpu: true,
detect_memory: true,
detect_gpu: true,
detect_network: true,
detectstorage: true,
cache_results: true,
cache_duration: Duration::from_secs(300), detailed_detection: false,
}
}
}
impl DiscoveryConfig {
pub fn new() -> Self {
Self::default()
}
pub fn detect_all(mut self) -> Self {
self.detect_cpu = true;
self.detect_memory = true;
self.detect_gpu = true;
self.detect_network = true;
self.detectstorage = true;
self
}
pub fn detect_none(mut self) -> Self {
self.detect_cpu = false;
self.detect_memory = false;
self.detect_gpu = false;
self.detect_network = false;
self.detectstorage = false;
self
}
pub fn detect_essential(mut self) -> Self {
self.detect_cpu = true;
self.detect_memory = true;
self.detect_gpu = false;
self.detect_network = false;
self.detectstorage = false;
self
}
pub fn with_cache_duration(mut self, duration: Duration) -> Self {
self.cache_results = true;
self.cache_duration = duration;
self
}
pub fn with_detailed_detection(mut self, enabled: bool) -> Self {
self.detailed_detection = enabled;
self
}
}
pub struct ResourceDiscovery {
config: DiscoveryConfig,
cached_resources: std::sync::Mutex<Option<(SystemResources, std::time::Instant)>>,
}
impl ResourceDiscovery {
pub fn new(config: DiscoveryConfig) -> Self {
Self {
config,
cached_resources: std::sync::Mutex::new(None),
}
}
}
impl Default for ResourceDiscovery {
fn default() -> Self {
Self::new(DiscoveryConfig::default())
}
}
impl ResourceDiscovery {
pub fn discover(&self) -> CoreResult<SystemResources> {
if self.config.cache_results {
if let Ok(cache) = self.cached_resources.lock() {
if let Some((ref resources, timestamp)) = *cache {
if timestamp.elapsed() < self.config.cache_duration {
return Ok(resources.clone());
}
}
}
}
let resources = self.discover_fresh()?;
if self.config.cache_results {
if let Ok(mut cache) = self.cached_resources.lock() {
*cache = Some((resources.clone(), std::time::Instant::now()));
}
}
Ok(resources)
}
pub fn discover_fresh(&self) -> CoreResult<SystemResources> {
let cpu = if self.config.detect_cpu {
cpu::CpuInfo::detect()?
} else {
cpu::CpuInfo::default()
};
let memory = if self.config.detect_memory {
memory::MemoryInfo::detect()?
} else {
memory::MemoryInfo::default()
};
let gpu = if self.config.detect_gpu {
gpu::GpuInfo::detect().ok()
} else {
None
};
let network = if self.config.detect_network {
network::NetworkInfo::detect()?
} else {
network::NetworkInfo::default()
};
let storage = if self.config.detectstorage {
storage::StorageInfo::detect()?
} else {
storage::StorageInfo::default()
};
let optimization_params = optimization::OptimizationParams::generate(
&cpu,
&memory,
gpu.as_ref(),
&network,
&storage,
)?;
Ok(SystemResources {
cpu,
memory,
gpu,
network,
storage,
optimization_params,
})
}
pub fn clear_cache(&self) -> CoreResult<()> {
if let Ok(mut cache) = self.cached_resources.lock() {
*cache = None;
Ok(())
} else {
Err(CoreError::ComputationError(
crate::error::ErrorContext::new("Failed to clear cache"),
))
}
}
pub fn cache_status(&self) -> CoreResult<Option<Duration>> {
if let Ok(cache) = self.cached_resources.lock() {
if let Some((_, timestamp)) = cache.as_ref() {
Ok(Some(timestamp.elapsed()))
} else {
Ok(None)
}
} else {
Err(CoreError::ComputationError(
crate::error::ErrorContext::new("Failed to read cache status"),
))
}
}
}
static GLOBAL_RESOURCE_DISCOVERY: std::sync::LazyLock<ResourceDiscovery> =
std::sync::LazyLock::new(ResourceDiscovery::default);
#[allow(dead_code)]
pub fn global_resource_discovery() -> &'static ResourceDiscovery {
&GLOBAL_RESOURCE_DISCOVERY
}
#[allow(dead_code)]
pub fn get_system_resources() -> CoreResult<SystemResources> {
global_resource_discovery().discover()
}
#[allow(dead_code)]
pub fn get_recommended_thread_count() -> CoreResult<usize> {
Ok(get_system_resources()?.recommended_thread_count())
}
#[allow(dead_code)]
pub fn get_recommended_chunk_size() -> CoreResult<usize> {
Ok(get_system_resources()?.recommended_chunk_size())
}
#[allow(dead_code)]
pub fn is_simd_supported() -> CoreResult<bool> {
Ok(get_system_resources()?.supports_simd())
}
#[allow(dead_code)]
pub fn is_gpu_available() -> CoreResult<bool> {
Ok(get_system_resources()?.supports_gpu())
}
#[allow(dead_code)]
pub fn get_total_memory() -> CoreResult<usize> {
Ok(get_system_resources()?.total_memory())
}
#[allow(dead_code)]
pub fn get_available_memory() -> CoreResult<usize> {
Ok(get_system_resources()?.available_memory())
}
#[allow(dead_code)]
pub fn get_performance_tier() -> CoreResult<PerformanceTier> {
Ok(get_system_resources()?.performance_tier())
}
pub struct ResourceMonitor {
discovery: ResourceDiscovery,
monitoring_interval: Duration,
last_update: std::sync::Mutex<std::time::Instant>,
adaptive_params: std::sync::Mutex<optimization::OptimizationParams>,
}
impl ResourceMonitor {
pub fn new(config: DiscoveryConfig, monitoringinterval: Duration) -> Self {
let discovery = ResourceDiscovery::new(config);
Self {
discovery,
monitoring_interval: monitoringinterval,
last_update: std::sync::Mutex::new(std::time::Instant::now()),
adaptive_params: std::sync::Mutex::new(optimization::OptimizationParams::default()),
}
}
pub fn update_optimization_params(&self) -> CoreResult<optimization::OptimizationParams> {
let should_update = {
if let Ok(last_update) = self.last_update.lock() {
last_update.elapsed() >= self.monitoring_interval
} else {
true
}
};
if should_update {
let resources = self.discovery.discover_fresh()?;
if let Ok(mut params) = self.adaptive_params.lock() {
*params = resources.optimization_params.clone();
}
if let Ok(mut last_update) = self.last_update.lock() {
*last_update = std::time::Instant::now();
}
Ok(resources.optimization_params)
} else {
if let Ok(params) = self.adaptive_params.lock() {
Ok(params.clone())
} else {
Err(CoreError::ComputationError(
crate::error::ErrorContext::new("Failed to read adaptive parameters"),
))
}
}
}
pub fn current_params(&self) -> CoreResult<optimization::OptimizationParams> {
if let Ok(params) = self.adaptive_params.lock() {
Ok(params.clone())
} else {
Err(CoreError::ComputationError(
crate::error::ErrorContext::new("Failed to read current parameters"),
))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_discovery_config() {
let config = DiscoveryConfig::new()
.detect_essential()
.with_cache_duration(Duration::from_secs(60))
.with_detailed_detection(true);
assert!(config.detect_cpu);
assert!(config.detect_memory);
assert!(!config.detect_gpu);
assert_eq!(config.cache_duration, Duration::from_secs(60));
assert!(config.detailed_detection);
}
#[test]
fn test_performance_tier() {
assert_eq!(PerformanceTier::High, PerformanceTier::High);
assert_ne!(PerformanceTier::High, PerformanceTier::Low);
}
#[test]
fn test_resource_discovery() {
let config = DiscoveryConfig::new().detect_essential();
let discovery = ResourceDiscovery::new(config);
let resources = discovery.discover();
assert!(resources.is_ok());
}
#[test]
fn test_global_functions() {
let thread_count = get_recommended_thread_count();
assert!(thread_count.is_ok());
assert!(thread_count.expect("Operation failed") > 0);
let chunk_size = get_recommended_chunk_size();
assert!(chunk_size.is_ok());
assert!(chunk_size.expect("Operation failed") > 0);
}
#[test]
fn test_resourcemonitor() {
let config = DiscoveryConfig::new().detect_essential();
let monitor = ResourceMonitor::new(config, Duration::from_secs(1));
let params = monitor.update_optimization_params();
assert!(params.is_ok());
let current = monitor.current_params();
assert!(current.is_ok());
}
}