Skip to main content

oxigdal_gpu/
lib.rs

1//! GPU-accelerated geospatial operations for OxiGDAL.
2//!
3//! This crate provides GPU acceleration for raster operations using WGPU,
4//! enabling 10-100x speedup for large-scale geospatial data processing.
5//!
6//! # Features
7//!
8//! - **Cross-platform GPU support**: Vulkan, Metal, DX12, DirectML, WebGPU
9//! - **Backend-specific optimizations**: CUDA, Vulkan, Metal, DirectML
10//! - **Multi-GPU support**: Distribute work across multiple GPUs
11//! - **Advanced memory management**: Memory pooling, staging buffers, VRAM budget tracking
12//! - **Element-wise operations**: Add, subtract, multiply, divide, etc.
13//! - **Statistical operations**: Parallel reduction, histogram, min/max, advanced statistics
14//! - **Resampling**: Nearest neighbor, bilinear, bicubic, Lanczos interpolation
15//! - **Convolution**: Gaussian blur, edge detection, FFT-based, custom filters
16//! - **Pipeline API**: Chain operations without CPU transfers
17//! - **Pure Rust**: No C/C++ dependencies
18//! - **Safe**: Comprehensive error handling, no unwrap()
19//!
20//! # Quick Start
21//!
22//! ```rust,no_run
23//! use oxigdal_gpu::*;
24//!
25//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
26//! // Initialize GPU context
27//! let gpu = GpuContext::new().await?;
28//!
29//! // Create compute pipeline
30//! let data: Vec<f32> = vec![1.0; 1024 * 1024];
31//! let result = ComputePipeline::from_data(&gpu, &data, 1024, 1024)?
32//!     .gaussian_blur(2.0)?
33//!     .multiply(1.5)?
34//!     .clamp(0.0, 255.0)?
35//!     .read_blocking()?;
36//! # Ok(())
37//! # }
38//! ```
39//!
40//! # GPU Backend Selection
41//!
42//! ```rust,no_run
43//! use oxigdal_gpu::*;
44//!
45//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
46//! // Auto-select best backend for platform
47//! let gpu = GpuContext::new().await?;
48//!
49//! // Or specify backend explicitly
50//! let config = GpuContextConfig::new()
51//!     .with_backend(BackendPreference::Vulkan)
52//!     .with_power_preference(GpuPowerPreference::HighPerformance);
53//!
54//! let gpu = GpuContext::with_config(config).await?;
55//! # Ok(())
56//! # }
57//! ```
58//!
59//! # NDVI Computation Example
60//!
61//! ```rust,no_run
62//! use oxigdal_gpu::*;
63//!
64//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
65//! let gpu = GpuContext::new().await?;
66//!
67//! // Load multispectral imagery (R, G, B, NIR bands)
68//! let bands_data: Vec<Vec<f32>> = vec![
69//!     vec![0.0; 512 * 512], // Red
70//!     vec![0.0; 512 * 512], // Green
71//!     vec![0.0; 512 * 512], // Blue
72//!     vec![0.0; 512 * 512], // NIR
73//! ];
74//!
75//! // Create GPU raster buffer
76//! let raster = GpuRasterBuffer::from_bands(
77//!     &gpu,
78//!     512,
79//!     512,
80//!     &bands_data,
81//!     wgpu::BufferUsages::STORAGE,
82//! )?;
83//!
84//! // Compute NDVI
85//! let pipeline = MultibandPipeline::new(&gpu, &raster)?;
86//! let ndvi = pipeline.ndvi()?;
87//!
88//! // Apply threshold and export
89//! let vegetation = ndvi
90//!     .threshold(0.3, 1.0, 0.0)?
91//!     .read_blocking()?;
92//! # Ok(())
93//! # }
94//! ```
95//!
96//! # Performance
97//!
98//! GPU acceleration provides significant speedups for large rasters:
99//!
100//! | Operation | CPU (single-thread) | GPU | Speedup |
101//! |-----------|---------------------|-----|---------|
102//! | Element-wise ops | 100 ms | 1 ms | 100x |
103//! | Gaussian blur | 500 ms | 5 ms | 100x |
104//! | Resampling | 200 ms | 10 ms | 20x |
105//! | Statistics | 150 ms | 2 ms | 75x |
106//!
107//! # Error Handling
108//!
109//! All GPU operations return `GpuResult<T>` and handle errors gracefully:
110//!
111//! ```rust,no_run
112//! use oxigdal_gpu::*;
113//!
114//! # async fn example() {
115//! match GpuContext::new().await {
116//!     Ok(gpu) => {
117//!         // Use GPU acceleration
118//!     }
119//!     Err(e) if e.should_fallback_to_cpu() => {
120//!         // Fallback to CPU implementation
121//!         println!("GPU not available, using CPU: {}", e);
122//!     }
123//!     Err(e) => {
124//!         eprintln!("GPU error: {}", e);
125//!     }
126//! }
127//! # }
128//! ```
129
130// Primary warnings/denials first
131#![warn(clippy::all)]
132#![deny(clippy::unwrap_used)]
133#![deny(clippy::panic)]
134// GPU crate is still under development - allow partial documentation
135#![allow(missing_docs)]
136// Allow dead code for internal structures not yet fully utilized
137#![allow(dead_code)]
138// Allow manual div_ceil for compatibility with older Rust versions
139#![allow(clippy::manual_div_ceil)]
140// Allow method name conflicts for builder patterns
141#![allow(clippy::should_implement_trait)]
142// Private type leakage allowed for internal APIs
143#![allow(private_interfaces)]
144// Allow unused_must_use for wgpu buffer creation patterns
145#![allow(unused_must_use)]
146// Allow complex type definitions in GPU interfaces
147#![allow(clippy::type_complexity)]
148// Allow expect() for GPU device invariants
149#![allow(clippy::expect_used)]
150// Allow manual clamp for GPU value normalization
151#![allow(clippy::manual_clamp)]
152// Allow first element access with get(0)
153#![allow(clippy::get_first)]
154// Allow collapsible matches for clarity
155#![allow(clippy::collapsible_match)]
156// Allow redundant closures for explicit code
157#![allow(clippy::redundant_closure)]
158// Allow vec push after creation for GPU buffer building
159#![allow(clippy::vec_init_then_push)]
160// Allow iterating on map values pattern
161#![allow(clippy::iter_kv_map)]
162// Allow needless question mark for explicit error handling
163#![allow(clippy::needless_question_mark)]
164// Allow confusing lifetimes in memory management
165#![allow(clippy::needless_lifetimes)]
166// Allow map iteration patterns
167#![allow(clippy::for_kv_map)]
168// Allow elided lifetime patterns
169#![allow(elided_lifetimes_in_associated_constant)]
170
171pub mod backends;
172pub mod buffer;
173pub mod compute;
174pub mod context;
175pub mod error;
176pub mod kernels;
177pub mod memory;
178pub mod multi_gpu;
179pub mod shaders;
180
181// Re-export commonly used items
182pub use buffer::{GpuBuffer, GpuRasterBuffer};
183pub use compute::{ComputePipeline, MultibandPipeline};
184pub use context::{BackendPreference, GpuContext, GpuContextConfig, GpuPowerPreference};
185pub use error::{GpuError, GpuResult};
186pub use kernels::{
187    convolution::{Filters, gaussian_blur},
188    raster::{ElementWiseOp, RasterKernel, ScalarOp, UnaryOp},
189    resampling::{ResamplingMethod, resize},
190    statistics::{HistogramParams, ReductionOp, Statistics, compute_statistics},
191};
192pub use memory::{MemoryPool, MemoryPoolConfig, StagingBufferManager, VramBudgetManager};
193pub use multi_gpu::{
194    DistributionStrategy, InterGpuTransfer, MultiGpuConfig, MultiGpuManager, WorkDistributor,
195};
196
197/// Library version.
198pub const VERSION: &str = env!("CARGO_PKG_VERSION");
199
200/// Check if GPU is available on the current system.
201///
202/// This is a convenience function that attempts to create a GPU context
203/// and returns whether it succeeded.
204///
205/// # Examples
206///
207/// ```rust,no_run
208/// use oxigdal_gpu::is_gpu_available;
209///
210/// # async fn example() {
211/// if is_gpu_available().await {
212///     println!("GPU acceleration available!");
213/// } else {
214///     println!("GPU not available, falling back to CPU");
215/// }
216/// # }
217/// ```
218pub async fn is_gpu_available() -> bool {
219    GpuContext::new().await.is_ok()
220}
221
222/// Get information about available GPU adapters.
223///
224/// Returns a list of available GPU adapter names and backends.
225///
226/// # Examples
227///
228/// ```rust,no_run
229/// use oxigdal_gpu::get_available_adapters;
230///
231/// # async fn example() {
232/// let adapters = get_available_adapters().await;
233/// for (name, backend) in adapters {
234///     println!("GPU: {} ({:?})", name, backend);
235/// }
236/// # }
237/// ```
238pub async fn get_available_adapters() -> Vec<(String, String)> {
239    use wgpu::{Backends, Instance, InstanceDescriptor, RequestAdapterOptions};
240
241    let _instance = Instance::new(&InstanceDescriptor {
242        backends: Backends::all(),
243        ..Default::default()
244    });
245
246    let mut adapters = Vec::new();
247
248    // Try to enumerate all adapters
249    for backend in &[
250        Backends::VULKAN,
251        Backends::METAL,
252        Backends::DX12,
253        Backends::BROWSER_WEBGPU,
254    ] {
255        let instance = Instance::new(&InstanceDescriptor {
256            backends: *backend,
257            ..Default::default()
258        });
259
260        if let Ok(adapter) = instance
261            .request_adapter(&RequestAdapterOptions::default())
262            .await
263        {
264            let info = adapter.get_info();
265            adapters.push((info.name, format!("{:?}", info.backend)));
266        }
267    }
268
269    adapters
270}
271
272#[cfg(test)]
273mod tests {
274    use super::*;
275
276    #[test]
277    fn test_version() {
278        assert!(!VERSION.is_empty());
279    }
280
281    #[tokio::test]
282    async fn test_gpu_availability() {
283        let available = is_gpu_available().await;
284        println!("GPU available: {}", available);
285    }
286
287    #[tokio::test]
288    async fn test_get_adapters() {
289        let adapters = get_available_adapters().await;
290        println!("Available adapters:");
291        for (name, backend) in adapters {
292            println!("  - {} ({:?})", name, backend);
293        }
294    }
295}