hive_gpu/backends/
detector.rs1use crate::error::{HiveGpuError, Result};
7
8#[derive(Debug, Clone, Copy, PartialEq, Eq)]
10pub enum GpuBackendType {
11 Metal,
13 Cuda,
15 Cpu,
17}
18
19impl std::fmt::Display for GpuBackendType {
20 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
21 match self {
22 GpuBackendType::Metal => write!(f, "Metal"),
23 GpuBackendType::Cuda => write!(f, "CUDA"),
24 GpuBackendType::Cpu => write!(f, "CPU"),
25 }
26 }
27}
28
29pub fn detect_available_backends() -> Vec<GpuBackendType> {
31 let mut backends = Vec::new();
32
33 #[cfg(all(target_os = "macos", feature = "metal-native"))]
35 {
36 if is_metal_available() {
37 backends.push(GpuBackendType::Metal);
38 }
39 }
40
41 #[cfg(feature = "cuda")]
43 {
44 if is_cuda_available() {
45 backends.push(GpuBackendType::Cuda);
46 }
47 }
48
49 backends.push(GpuBackendType::Cpu);
51
52 backends
53}
54
55pub fn select_best_backend() -> Result<GpuBackendType> {
57 let available = detect_available_backends();
58
59 if available.contains(&GpuBackendType::Metal) {
61 Ok(GpuBackendType::Metal)
62 } else if available.contains(&GpuBackendType::Cuda) {
63 Ok(GpuBackendType::Cuda)
64 } else if available.contains(&GpuBackendType::Cpu) {
65 Ok(GpuBackendType::Cpu)
66 } else {
67 Err(HiveGpuError::NoDeviceAvailable)
68 }
69}
70
71#[cfg(all(target_os = "macos", feature = "metal-native"))]
73fn is_metal_available() -> bool {
74 use metal::Device;
75
76 Device::system_default().is_some()
77}
78
79#[cfg(feature = "cuda")]
81fn is_cuda_available() -> bool {
82 std::env::var("CUDA_VISIBLE_DEVICES").is_ok() || std::env::var("CUDA_HOME").is_ok()
85}
86
87pub fn get_backend_info(backend: GpuBackendType) -> Result<String> {
89 match backend {
90 GpuBackendType::Metal => {
91 #[cfg(all(target_os = "macos", feature = "metal-native"))]
92 {
93 use metal::Device;
94 if let Some(device) = Device::system_default() {
95 Ok(format!("Metal device: {}", device.name()))
96 } else {
97 Err(HiveGpuError::NoDeviceAvailable)
98 }
99 }
100 #[cfg(not(all(target_os = "macos", feature = "metal-native")))]
101 {
102 Err(HiveGpuError::NoDeviceAvailable)
103 }
104 }
105 GpuBackendType::Cuda => {
106 #[cfg(feature = "cuda")]
107 {
108 Ok("CUDA device available".to_string())
110 }
111 #[cfg(not(feature = "cuda"))]
112 {
113 Err(HiveGpuError::NoDeviceAvailable)
114 }
115 }
116 GpuBackendType::Cpu => Ok("CPU fallback".to_string()),
117 }
118}
119
120pub fn get_backend_performance_info(backend: GpuBackendType) -> BackendPerformanceInfo {
122 match backend {
123 GpuBackendType::Metal => BackendPerformanceInfo {
124 name: "Metal".to_string(),
125 memory_bandwidth_gbps: 400.0, compute_units: 8, memory_size_gb: 16,
128 supports_hnsw: true,
129 supports_batch: true,
130 },
131 GpuBackendType::Cuda => BackendPerformanceInfo {
132 name: "CUDA".to_string(),
133 memory_bandwidth_gbps: 900.0, compute_units: 128, memory_size_gb: 24,
136 supports_hnsw: true,
137 supports_batch: true,
138 },
139 GpuBackendType::Cpu => BackendPerformanceInfo {
140 name: "CPU".to_string(),
141 memory_bandwidth_gbps: 50.0, compute_units: 16, memory_size_gb: 32,
144 supports_hnsw: false,
145 supports_batch: true,
146 },
147 }
148}
149
150#[derive(Debug, Clone)]
152pub struct BackendPerformanceInfo {
153 pub name: String,
154 pub memory_bandwidth_gbps: f32,
155 pub compute_units: usize,
156 pub memory_size_gb: usize,
157 pub supports_hnsw: bool,
158 pub supports_batch: bool,
159}