scirs2_series/gpu_acceleration/
mod.rs

1//! GPU acceleration infrastructure for time series operations
2//!
3//! This module provides the foundation for GPU-accelerated time series processing,
4//! including forecasting, decomposition, and feature extraction.
5
6// Module declarations
7pub mod algorithms;
8pub mod array;
9pub mod blas;
10pub mod config;
11pub mod convolution;
12pub mod device_manager;
13pub mod fft;
14pub mod traits;
15pub mod utils;
16
17// Re-export all public items for backward compatibility
18pub use config::{
19    GpuBackend, GpuCapabilities, GpuConfig, GraphOptimizationLevel, MemoryStrategy,
20    TensorCoresConfig, TensorCoresGeneration, TensorDataType,
21};
22
23pub use traits::{
24    DecompositionResult, GpuAccelerated, GpuDecomposition, GpuFeatureExtraction, GpuForecasting,
25};
26
27pub use array::GpuArray;
28
29pub use device_manager::GpuDeviceManager;
30
31// Re-export utility functions
32pub use utils::{
33    estimate_memory_usage, get_recommended_batch_size, is_gpu_supported, optimize_gpu_config,
34};
35
36// Re-export FFT functionality
37pub use fft::GpuFFT;
38
39// Re-export convolution functionality
40pub use convolution::GpuConvolution;
41
42// Re-export BLAS functionality
43pub use blas::{GpuBLAS, TensorCoresBLAS};
44
45// Re-export algorithms functionality
46pub use algorithms::{
47    FeatureConfig, ForecastMethod, GpuFeatureExtractor, GpuTimeSeriesProcessor, WindowStatistic,
48};
49
50// For backward compatibility, also re-export everything at the module level
51use scirs2_core::ndarray::{s, Array1};
52use scirs2_core::numeric::Float;
53use std::fmt::Debug;
54
55use crate::error::{Result, TimeSeriesError};
56
57// Re-export commonly used imports for convenience
58pub use scirs2_core::ndarray;
59pub use scirs2_core::numeric;
60
61#[cfg(test)]
62mod tests {
63    use super::*;
64    use scirs2_core::ndarray::Array1;
65
66    #[test]
67    fn test_gpu_config_default() {
68        let config = GpuConfig::default();
69        assert_eq!(config.device_id, 0);
70        assert_eq!(config.batch_size, 1024);
71        assert!(config.enable_memory_optimization);
72    }
73
74    #[test]
75    fn test_gpu_array_creation() {
76        let config = GpuConfig::default();
77        let data = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
78        let gpu_array = GpuArray::from_cpu(data, config);
79        assert_eq!(gpu_array.len(), 5);
80        assert!(!gpu_array.is_on_gpu());
81    }
82
83    #[test]
84    fn test_device_manager_creation() {
85        let device_manager = GpuDeviceManager::new();
86        assert!(device_manager.is_ok());
87    }
88
89    #[test]
90    fn test_tensor_cores_config_default() {
91        let config = TensorCoresConfig::default();
92        assert!(config.enabled);
93        assert_eq!(config.data_type, TensorDataType::FP16);
94        assert_eq!(config.tile_size, (16, 16, 16));
95    }
96
97    #[test]
98    fn test_tensor_cores_generation_capabilities() {
99        let gen_v1 = TensorCoresGeneration::V1;
100        let supported_types = gen_v1.supported_data_types();
101        assert!(supported_types.contains(&TensorDataType::FP16));
102
103        let dimensions = gen_v1.supported_matrix_dimensions();
104        assert!(dimensions.contains(&(16, 16, 16)));
105    }
106
107    #[test]
108    fn test_utils_functions() {
109        assert!(utils::is_gpu_supported() || !utils::is_gpu_supported()); // Should not panic
110
111        let batch_size = utils::get_recommended_batch_size(1000, 1024 * 1024);
112        assert!(batch_size > 0);
113
114        let memory_usage = utils::estimate_memory_usage(1000, 0.5);
115        assert!(memory_usage > 0);
116
117        let config = utils::optimize_gpu_config(10000, 256 * 1024 * 1024);
118        assert!(config.batch_size > 0);
119    }
120}