hive_gpu/backends/
detector.rs1use crate::error::{Result, HiveGpuError};
7
8#[derive(Debug, Clone, Copy, PartialEq, Eq)]
10pub enum GpuBackendType {
11 Metal,
13 Cuda,
15 Cpu,
17}
18
19impl std::fmt::Display for GpuBackendType {
20 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
21 match self {
22 GpuBackendType::Metal => write!(f, "Metal"),
23 GpuBackendType::Cuda => write!(f, "CUDA"),
24 GpuBackendType::Cpu => write!(f, "CPU"),
25 }
26 }
27}
28
29pub fn detect_available_backends() -> Vec<GpuBackendType> {
31 let mut backends = Vec::new();
32
33 #[cfg(all(target_os = "macos", feature = "metal-native"))]
35 {
36 if is_metal_available() {
37 backends.push(GpuBackendType::Metal);
38 }
39 }
40
41 #[cfg(feature = "cuda")]
43 {
44 if is_cuda_available() {
45 backends.push(GpuBackendType::Cuda);
46 }
47 }
48
49 backends.push(GpuBackendType::Cpu);
51
52 backends
53}
54
55pub fn select_best_backend() -> Result<GpuBackendType> {
57 let available = detect_available_backends();
58
59 if available.contains(&GpuBackendType::Metal) {
61 Ok(GpuBackendType::Metal)
62 } else if available.contains(&GpuBackendType::Cuda) {
63 Ok(GpuBackendType::Cuda)
64 } else if available.contains(&GpuBackendType::Cpu) {
65 Ok(GpuBackendType::Cpu)
66 } else {
67 Err(HiveGpuError::NoDeviceAvailable)
68 }
69}
70
71#[cfg(all(target_os = "macos", feature = "metal-native"))]
73fn is_metal_available() -> bool {
74 use metal::Device;
75
76 Device::system_default().is_some()
77}
78
79#[cfg(feature = "cuda")]
81fn is_cuda_available() -> bool {
82 std::env::var("CUDA_VISIBLE_DEVICES").is_ok() ||
85 std::env::var("CUDA_HOME").is_ok()
86}
87
88pub fn get_backend_info(backend: GpuBackendType) -> Result<String> {
90 match backend {
91 GpuBackendType::Metal => {
92 #[cfg(all(target_os = "macos", feature = "metal-native"))]
93 {
94 use metal::Device;
95 if let Some(device) = Device::system_default() {
96 Ok(format!("Metal device: {}", device.name()))
97 } else {
98 Err(HiveGpuError::NoDeviceAvailable)
99 }
100 }
101 #[cfg(not(all(target_os = "macos", feature = "metal-native")))]
102 {
103 Err(HiveGpuError::NoDeviceAvailable)
104 }
105 }
106 GpuBackendType::Cuda => {
107 #[cfg(feature = "cuda")]
108 {
109 Ok("CUDA device available".to_string())
111 }
112 #[cfg(not(feature = "cuda"))]
113 {
114 Err(HiveGpuError::NoDeviceAvailable)
115 }
116 }
117 GpuBackendType::Cpu => {
118 Ok("CPU fallback".to_string())
119 }
120 }
121}
122
123pub fn get_backend_performance_info(backend: GpuBackendType) -> BackendPerformanceInfo {
125 match backend {
126 GpuBackendType::Metal => BackendPerformanceInfo {
127 name: "Metal".to_string(),
128 memory_bandwidth_gbps: 400.0, compute_units: 8, memory_size_gb: 16,
131 supports_hnsw: true,
132 supports_batch: true,
133 },
134 GpuBackendType::Cuda => BackendPerformanceInfo {
135 name: "CUDA".to_string(),
136 memory_bandwidth_gbps: 900.0, compute_units: 128, memory_size_gb: 24,
139 supports_hnsw: true,
140 supports_batch: true,
141 },
142 GpuBackendType::Cpu => BackendPerformanceInfo {
143 name: "CPU".to_string(),
144 memory_bandwidth_gbps: 50.0, compute_units: 16, memory_size_gb: 32,
147 supports_hnsw: false,
148 supports_batch: true,
149 },
150 }
151}
152
153#[derive(Debug, Clone)]
155pub struct BackendPerformanceInfo {
156 pub name: String,
157 pub memory_bandwidth_gbps: f32,
158 pub compute_units: usize,
159 pub memory_size_gb: usize,
160 pub supports_hnsw: bool,
161 pub supports_batch: bool,
162}