Skip to main content

scirs2_vision/gpu_modules/
context.rs

1//! GPU context management and backend selection
2//!
3//! This module provides GPU context initialization, backend selection,
4//! and error handling for computer vision operations.
5
6use crate::error::{Result, VisionError};
7use scirs2_core::gpu::{GpuBackend, GpuContext};
8
9/// GPU-accelerated vision context
10pub struct GpuVisionContext {
11    pub(crate) context: GpuContext,
12    backend: GpuBackend,
13}
14
15impl GpuVisionContext {
16    /// Create a new GPU vision context with the preferred backend
17    pub fn new() -> Result<Self> {
18        let preferred_backend = GpuBackend::preferred();
19
20        // Try preferred backend first
21        match GpuContext::new(preferred_backend) {
22            Ok(context) => {
23                eprintln!("Successfully created GPU context with backend: {preferred_backend:?}");
24                Ok(Self {
25                    context,
26                    backend: preferred_backend,
27                })
28            }
29            Err(preferred_error) => {
30                eprintln!(
31                    "Failed to create GPU context with preferred backend {preferred_backend:?}: {preferred_error}"
32                );
33
34                // Try fallback backends in order of preference
35                let fallback_backends = [
36                    GpuBackend::Cpu,    // Always available as final fallback
37                    GpuBackend::Wgpu,   // Cross-platform
38                    GpuBackend::OpenCL, // Widely supported
39                    GpuBackend::Cuda,   // NVIDIA specific
40                    GpuBackend::Metal,  // Apple specific
41                ];
42
43                for &fallback_backend in &fallback_backends {
44                    if fallback_backend == preferred_backend {
45                        continue; // Skip already tried backend
46                    }
47
48                    match GpuContext::new(fallback_backend) {
49                        Ok(context) => {
50                            eprintln!(
51                                "Successfully created GPU context with fallback backend: {fallback_backend:?}"
52                            );
53                            return Ok(Self {
54                                context,
55                                backend: fallback_backend,
56                            });
57                        }
58                        Err(fallback_error) => {
59                            eprintln!(
60                                "Fallback backend {fallback_backend:?} also failed: {fallback_error}"
61                            );
62                        }
63                    }
64                }
65
66                // If all backends fail, return the original error with helpful context
67                Err(VisionError::Other(format!(
68                    "Failed to create GPU context with any backend. Preferred backend {preferred_backend:?} failed with: {preferred_error}. All fallback backends also failed. Check GPU drivers and compute capabilities."
69                )))
70            }
71        }
72    }
73
74    /// Create a new GPU vision context with a specific backend
75    pub fn with_backend(backend: GpuBackend) -> Result<Self> {
76        match GpuContext::new(backend) {
77            Ok(context) => {
78                eprintln!("Successfully created GPU context with requested backend: {backend:?}");
79                Ok(Self { context, backend })
80            }
81            Err(error) => {
82                let detailed_error = match backend {
83                    GpuBackend::Cuda => {
84                        format!(
85                            "CUDA backend failed: {error}. Ensure NVIDIA drivers are installed and CUDA-capable GPU is available."
86                        )
87                    }
88                    GpuBackend::Metal => {
89                        format!(
90                            "Metal backend failed: {error}. Metal is only available on macOS with compatible hardware."
91                        )
92                    }
93                    GpuBackend::OpenCL => {
94                        format!(
95                            "OpenCL backend failed: {error}. Check OpenCL runtime installation and driver support."
96                        )
97                    }
98                    GpuBackend::Wgpu => {
99                        format!(
100                            "WebGPU backend failed: {error}. Check GPU drivers and WebGPU support."
101                        )
102                    }
103                    GpuBackend::Cpu => {
104                        format!(
105                            "CPU backend failed: {error}. This should not happen as CPU backend should always be available."
106                        )
107                    }
108                    GpuBackend::Rocm => {
109                        format!(
110                            "ROCm backend failed: {error}. Check ROCm installation and AMD GPU drivers."
111                        )
112                    }
113                };
114
115                eprintln!("GPU context creation failed: {detailed_error}");
116                Err(VisionError::Other(detailed_error))
117            }
118        }
119    }
120
121    /// Get the backend being used
122    pub fn backend(&self) -> GpuBackend {
123        self.backend
124    }
125
126    /// Get backend name as string
127    pub fn backend_name(&self) -> &str {
128        self.context.backend_name()
129    }
130
131    /// Check if GPU acceleration is available
132    pub fn is_gpu_available(&self) -> bool {
133        self.backend != GpuBackend::Cpu
134    }
135
136    /// Get available GPU memory
137    pub fn available_memory(&self) -> Option<usize> {
138        self.context.get_available_memory()
139    }
140
141    /// Get total GPU memory
142    pub fn total_memory(&self) -> Option<usize> {
143        self.context.get_total_memory()
144    }
145}
146
147/// GPU memory usage statistics
148pub struct GpuMemoryStats {
149    /// Total GPU memory in bytes
150    pub total_memory: usize,
151    /// Available GPU memory in bytes
152    pub available_memory: usize,
153    /// Used GPU memory in bytes
154    pub used_memory: usize,
155    /// GPU memory utilization as percentage (0-100)
156    pub utilization_percent: f32,
157}
158
159impl GpuVisionContext {
160    /// Get current GPU memory statistics
161    pub fn memory_stats(&self) -> Option<GpuMemoryStats> {
162        let total = self.total_memory()?;
163        let available = self.available_memory()?;
164        let used = total.saturating_sub(available);
165        let utilization = (used as f32 / total as f32) * 100.0;
166
167        Some(GpuMemoryStats {
168            total_memory: total,
169            available_memory: available,
170            used_memory: used,
171            utilization_percent: utilization,
172        })
173    }
174}
175
176#[cfg(test)]
177mod tests {
178    use super::*;
179
180    #[test]
181    fn test_gpu_context_creation() {
182        let result = GpuVisionContext::new();
183        // Should succeed with at least CPU backend
184        assert!(result.is_ok());
185
186        let ctx = result.expect("Operation failed");
187        println!("GPU backend: {}", ctx.backend_name());
188    }
189
190    #[test]
191    fn test_gpu_memory_info() {
192        if let Ok(ctx) = GpuVisionContext::new() {
193            if let Some(stats) = ctx.memory_stats() {
194                println!("GPU Memory Stats:");
195                println!("  Total: {} MB", stats.total_memory / (1024 * 1024));
196                println!("  Available: {} MB", stats.available_memory / (1024 * 1024));
197                println!("  Used: {} MB", stats.used_memory / (1024 * 1024));
198                println!("  Utilization: {:.1}%", stats.utilization_percent);
199            }
200        }
201    }
202
203    #[test]
204    fn test_backend_selection() {
205        // Test CPU backend explicitly
206        let cpu_ctx = GpuVisionContext::with_backend(GpuBackend::Cpu);
207        assert!(cpu_ctx.is_ok());
208
209        let ctx = cpu_ctx.expect("Operation failed");
210        assert_eq!(ctx.backend(), GpuBackend::Cpu);
211        assert!(!ctx.is_gpu_available());
212    }
213}