use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PlatformCapabilities {
pub cpu: CpuCapabilities,
pub gpu: GpuCapabilities,
pub memory: MemoryCapabilities,
pub platform_type: PlatformType,
pub os: OperatingSystem,
pub architecture: Architecture,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CpuCapabilities {
pub physical_cores: usize,
pub logical_cores: usize,
pub simd: SimdCapabilities,
pub cache: CacheInfo,
pub base_clock_mhz: Option<f32>,
pub vendor: String,
pub model_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SimdCapabilities {
pub sse: bool,
pub sse2: bool,
pub sse3: bool,
pub ssse3: bool,
pub sse4_1: bool,
pub sse4_2: bool,
pub avx: bool,
pub avx2: bool,
pub avx512: bool,
pub fma: bool,
pub neon: bool,
pub sve: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheInfo {
pub l1_data: Option<usize>,
pub l1_instruction: Option<usize>,
pub l2: Option<usize>,
pub l3: Option<usize>,
pub line_size: Option<usize>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GpuCapabilities {
pub available: bool,
pub devices: Vec<GpuDevice>,
pub primary_device: Option<usize>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GpuDevice {
pub name: String,
pub vendor: String,
pub device_type: GpuType,
pub memory_bytes: usize,
pub compute_units: usize,
pub max_workgroup_size: usize,
pub cuda_cores: Option<usize>,
pub compute_capability: Option<(u32, u32)>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum GpuType {
Discrete,
Integrated,
Virtual,
Unknown,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryCapabilities {
pub total_memory: usize,
pub available_memory: usize,
pub bandwidth_gbps: Option<f32>,
pub numa_nodes: usize,
pub hugepage_support: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum PlatformType {
Desktop,
Server,
Mobile,
Embedded,
Cloud,
Unknown,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum OperatingSystem {
Linux,
Windows,
MacOS,
FreeBSD,
Android,
Unknown,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum Architecture {
X86_64,
Aarch64,
Riscv64,
Wasm32,
Unknown,
}
impl PlatformCapabilities {
pub fn detect() -> Self {
crate::platform::detector::detect_platform_capabilities()
}
pub const fn has_simd(&self) -> bool {
self.cpu.simd.sse2
|| self.cpu.simd.avx
|| self.cpu.simd.avx2
|| self.cpu.simd.neon
|| self.cpu.simd.sve
}
pub const fn simd_available(&self) -> bool {
self.has_simd()
}
pub const fn gpu_available(&self) -> bool {
self.gpu.available
}
pub const fn optimal_simd_width_f64(&self) -> usize {
if self.cpu.simd.avx512 {
8
} else if self.cpu.simd.avx || self.cpu.simd.avx2 {
4
} else if self.cpu.simd.sse2 || self.cpu.simd.neon {
2
} else {
1
}
}
pub fn has_gpu(&self) -> bool {
self.gpu.available && !self.gpu.devices.is_empty()
}
pub fn primary_gpu(&self) -> Option<&GpuDevice> {
self.gpu
.primary_device
.and_then(|idx| self.gpu.devices.get(idx))
}
pub const fn is_suitable_for_large_quantum_sim(&self) -> bool {
self.memory.total_memory >= 16 * 1024 * 1024 * 1024 && self.cpu.logical_cores >= 8
&& self.has_simd()
}
}