Skip to main content

oxigdal_ml/
lib.rs

1//! OxiGDAL ML - Machine Learning for Geospatial Data
2//!
3//! This crate provides machine learning capabilities for the OxiGDAL ecosystem,
4//! enabling geospatial ML workflows with ONNX Runtime integration.
5//!
6//! # Features
7//!
8//! - **ONNX Runtime Integration**: Pure Rust interface to ONNX Runtime for model inference
9//! - **Image Segmentation**: Semantic, instance, and panoptic segmentation
10//! - **Image Classification**: Scene classification, land cover classification, multi-label
11//! - **Object Detection**: Bounding box detection with NMS and georeferencing
12//! - **Preprocessing**: Normalization, tiling, padding, and augmentation
13//! - **Postprocessing**: Tile merging, thresholding, polygon conversion, GeoJSON export
14//!
15//! # Optional Features
16//!
17//! - `gpu` - Enable CUDA and TensorRT GPU acceleration
18//! - `directml` - Enable DirectML support (Windows)
19//! - `coreml` - Enable CoreML support (macOS/iOS)
20//!
21//! # Example: Image Segmentation
22//!
23//! ```ignore
24//! use oxigdal_ml::*;
25//! use oxigdal_ml::models::OnnxModel;
26//! use oxigdal_ml::inference::{InferenceEngine, InferenceConfig};
27//! use oxigdal_ml::segmentation::probability_to_mask;
28//!
29//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
30//! // Load ONNX model
31//! let model = OnnxModel::from_file("segmentation.onnx")?;
32//!
33//! // Create inference engine
34//! let config = InferenceConfig::default();
35//! let engine = InferenceEngine::new(model, config);
36//!
37//! // Load input raster (not shown)
38//! # use oxigdal_core::buffer::RasterBuffer;
39//! # use oxigdal_core::types::RasterDataType;
40//! # let input = RasterBuffer::zeros(256, 256, RasterDataType::Float32);
41//!
42//! // Run inference
43//! let predictions = engine.predict(&input)?;
44//!
45//! // Convert to segmentation mask
46//! let mask = probability_to_mask(&predictions, 2, 0.5)?;
47//! # Ok(())
48//! # }
49//! ```
50//!
51//! # Example: Object Detection
52//!
53//! ```ignore
54//! use oxigdal_ml::*;
55//! use oxigdal_ml::detection::{non_maximum_suppression, NmsConfig};
56//! use oxigdal_ml::postprocessing::export_detections_geojson;
57//!
58//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
59//! // Assume we have detections from a model
60//! # let detections = vec![];
61//!
62//! // Apply NMS to filter overlapping detections
63//! let config = NmsConfig::default();
64//! let filtered = non_maximum_suppression(&detections, &config)?;
65//!
66//! // Export to GeoJSON
67//! # let geo_detections = vec![];
68//! # export_detections_geojson(&geo_detections, "detections.geojson")?;
69//! # Ok(())
70//! # }
71//! ```
72//!
73//! # Example: Batch Processing with Progress Tracking
74//!
75//! ```ignore
76//! use oxigdal_ml::*;
77//! use oxigdal_ml::batch::{BatchConfig, BatchProcessor};
78//! use oxigdal_ml::models::OnnxModel;
79//! # use oxigdal_core::buffer::RasterBuffer;
80//! # use oxigdal_core::types::RasterDataType;
81//!
82//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
83//! // Load model
84//! let model = OnnxModel::from_file("model.onnx")?;
85//!
86//! // Auto-tune batch size based on available memory
87//! let sample_size = 3 * 256 * 256 * 4; // 3 channels, 256x256, float32
88//! let batch_size = BatchConfig::auto_tune_batch_size(sample_size, 0.5);
89//!
90//! // Create batch processor
91//! let config = BatchConfig::builder()
92//!     .max_batch_size(batch_size)
93//!     .parallel_batches(4)
94//!     .build();
95//!
96//! let processor = BatchProcessor::new(model, config);
97//!
98//! // Process large batch with progress bar
99//! # let inputs: Vec<RasterBuffer> = vec![];
100//! let results = processor.infer_batch_with_progress(inputs, true)?;
101//! # Ok(())
102//! # }
103//! ```
104//!
105//! # Example: Model Optimization Pipeline
106//!
107//! ```ignore
108//! use oxigdal_ml::*;
109//! use oxigdal_ml::optimization::{OptimizationPipeline, OptimizationProfile};
110//!
111//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
112//! // Create optimization pipeline for edge deployment
113//! let pipeline = OptimizationPipeline::from_profile(OptimizationProfile::Size);
114//!
115//! // Optimize model (applies quantization + pruning)
116//! let stats = pipeline.optimize("model.onnx", "model_optimized.onnx")?;
117//!
118//! println!("Size reduction: {:.1}%", stats.size_reduction_percent());
119//! println!("Speedup: {:.2}x", stats.speedup);
120//! println!("Accuracy delta: {:.2}%", stats.accuracy_delta);
121//! # Ok(())
122//! # }
123//! ```
124//!
125//! # Example: Model Zoo - Download Pretrained Models
126//!
127//! ```ignore
128//! use oxigdal_ml::*;
129//! use oxigdal_ml::zoo::ModelZoo;
130//! use oxigdal_ml::models::OnnxModel;
131//!
132//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
133//! // Create model zoo
134//! let mut zoo = ModelZoo::new()?;
135//!
136//! // List available models
137//! let models = zoo.list_models();
138//! for model in models {
139//!     println!("{}: {} - {:.1}% accuracy",
140//!         model.name, model.description,
141//!         model.accuracy.unwrap_or(0.0));
142//! }
143//!
144//! // Download a model (with progress bar and checksum verification)
145//! let model_path = zoo.get_model("unet_buildings")?;
146//!
147//! // Load and use the model
148//! let model = OnnxModel::from_file(model_path)?;
149//! # Ok(())
150//! # }
151//! ```
152//!
153//! # Example: Transfer Learning
154//!
155//! ```no_run
156//! use oxigdal_ml::*;
157//! # #[cfg(feature = "temporal")]
158//! # fn example() -> Result<(), Box<dyn std::error::Error>> {
159//! // This example requires oxigdal-ml-foundation
160//! // Load pretrained feature extractor
161//! use oxigdal_ml_foundation::transfer::FeatureExtractor;
162//!
163//! // Extract features from pretrained model
164//! let extractor = FeatureExtractor::from_pretrained("resnet50")?;
165//!
166//! // Use features for custom classification task
167//! // (Training loop implementation would go here)
168//! # Ok(())
169//! # }
170//! ```
171//!
172//! # Example: GPU Acceleration
173//!
174//! ```ignore
175//! use oxigdal_ml::*;
176//! use oxigdal_ml::gpu::{GpuBackend, GpuConfig};
177//! use oxigdal_ml::inference::{InferenceEngine, InferenceConfig};
178//! use oxigdal_ml::models::OnnxModel;
179//!
180//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
181//! // Detect available GPU backends
182//! let backends = GpuBackend::detect_all();
183//! for backend in &backends {
184//!     println!("Available: {:?}", backend);
185//! }
186//!
187//! // Configure GPU acceleration
188//! let gpu_config = GpuConfig::builder()
189//!     .preferred_backend(GpuBackend::Cuda)
190//!     .device_id(0)
191//!     .build();
192//!
193//! // Create inference engine with GPU support
194//! let model = OnnxModel::from_file("model.onnx")?;
195//! let mut inference_config = InferenceConfig::default();
196//! inference_config.gpu_config = Some(gpu_config);
197//!
198//! let engine = InferenceEngine::new(model, inference_config);
199//! # Ok(())
200//! # }
201//! ```
202//!
203//! # Example: Model Monitoring
204//!
205//! ```ignore
206//! use oxigdal_ml::*;
207//! use oxigdal_ml::monitoring::{ModelMonitor, MonitoringConfig};
208//! use oxigdal_ml::models::OnnxModel;
209//! # use oxigdal_core::buffer::RasterBuffer;
210//!
211//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
212//! let model = OnnxModel::from_file("model.onnx")?;
213//!
214//! // Create monitor with custom config
215//! let config = MonitoringConfig::builder()
216//!     .track_latency(true)
217//!     .track_memory(true)
218//!     .alert_threshold_ms(100.0)
219//!     .build();
220//!
221//! let mut monitor = ModelMonitor::new(model, config);
222//!
223//! // Run inference with monitoring
224//! # let input = RasterBuffer::zeros(256, 256, oxigdal_core::types::RasterDataType::Float32);
225//! let result = monitor.predict(&input)?;
226//!
227//! // Check metrics
228//! let metrics = monitor.metrics();
229//! println!("Avg latency: {:.2}ms", metrics.avg_latency_ms());
230//! println!("Memory usage: {:.1}MB", metrics.peak_memory_mb);
231//! # Ok(())
232//! # }
233//! ```
234//!
235//! # SciRS2 Integration Status
236//!
237//! This crate is being migrated to use the Pure Rust SciRS2 ecosystem following
238//! COOLJAPAN policy. Current status:
239//!
240//! - **Random Number Generation**: Completed - using `scirs2-core::random` for RNG
241//! - **Statistical Distributions**: Completed - using `NormalDistribution` for Gaussian noise
242//! - **Data Augmentation**: Completed - integrated SciRS2-Core RNG
243//! - **Linear Algebra**: Partial - `ndarray` still used in some modules, migration ongoing
244//! - **Neural Network Training**: In Progress - `oxigdal-ml-foundation` implements custom
245//!   Pure Rust backend with SciRS2 integration
246//!
247//! ## Usage Example with SciRS2
248//!
249//! ```no_run
250//! use oxigdal_ml::augmentation::{add_gaussian_noise, random_crop};
251//! # use oxigdal_core::buffer::RasterBuffer;
252//! # use oxigdal_core::types::RasterDataType;
253//!
254//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
255//! // Create a raster buffer
256//! let input = RasterBuffer::zeros(512, 512, RasterDataType::Float32);
257//!
258//! // Add Gaussian noise using SciRS2-Core
259//! let noisy = add_gaussian_noise(&input, 0.01)?;
260//!
261//! // Random crop using SciRS2-Core RNG
262//! let cropped = random_crop(&input, 256, 256)?;
263//! # Ok(())
264//! # }
265//! ```
266//!
267//! For more information about SciRS2 integration, see the SCIRS2 POLICY in the
268//! workspace documentation.
269
270#![warn(missing_docs)]
271#![warn(clippy::all)]
272// Pedantic disabled to reduce noise - default clippy::all is sufficient
273// #![warn(clippy::pedantic)]
274#![deny(clippy::unwrap_used)]
275#![allow(clippy::module_name_repetitions)]
276#![allow(clippy::missing_errors_doc)]
277#![allow(clippy::missing_panics_doc)]
278// Allow unused imports in feature-gated modules
279#![allow(unused_imports)]
280// Allow dead code for ML model structures
281#![allow(dead_code)]
282// Allow collapsible match for ML result handling
283#![allow(clippy::collapsible_match)]
284#![allow(clippy::collapsible_if)]
285// Allow expect() for model loading invariants
286#![allow(clippy::expect_used)]
287// Allow for loop with idx for tensor operations
288#![allow(clippy::needless_range_loop)]
289// Allow manual div_ceil for batch calculations
290#![allow(clippy::manual_div_ceil)]
291// Allow field assignment outside initializer for ML configs
292#![allow(clippy::field_reassign_with_default)]
293// Allow unexpected cfg for temporarily disabled features
294#![allow(unexpected_cfgs)]
295
296pub mod augmentation;
297pub mod batch;
298pub mod batch_predict;
299pub mod classification;
300#[cfg(feature = "cloud-removal")]
301pub mod cloud;
302pub mod detection;
303pub mod error;
304pub mod gpu;
305pub mod hot_reload;
306pub mod inference;
307pub mod inference_cache;
308pub mod model_versioning;
309pub mod models;
310pub mod monitoring;
311pub mod optimization;
312pub mod postprocessing;
313pub mod preprocessing;
314pub mod segmentation;
315pub mod serving;
316pub mod superres;
317#[cfg(feature = "temporal")]
318pub mod temporal;
319pub mod zoo;
320
321// Re-export commonly used items
322pub use augmentation::AugmentationConfig;
323pub use batch::{BatchConfig, BatchProcessor};
324pub use error::{MlError, Result};
325pub use gpu::{GpuBackend, GpuConfig, GpuDevice};
326pub use models::{Model, OnnxModel};
327pub use monitoring::{ModelMonitor, MonitoringConfig, PerformanceMetrics};
328pub use optimization::{OptimizationPipeline, OptimizationProfile};
329pub use serving::{DeploymentStrategy, ModelServer, ServerConfig};
330pub use superres::{SuperResConfig, SuperResolution, UpscaleFactor};
331#[cfg(feature = "temporal")]
332pub use temporal::{ForecastConfig, ForecastResult, TemporalForecaster};
333pub use zoo::ModelZoo;
334
335/// Crate version
336pub const VERSION: &str = env!("CARGO_PKG_VERSION");
337
338/// Crate name
339pub const NAME: &str = env!("CARGO_PKG_NAME");
340
341#[cfg(test)]
342mod tests {
343    use super::*;
344
345    #[test]
346    fn test_version() {
347        assert!(!VERSION.is_empty());
348        assert_eq!(NAME, "oxigdal-ml");
349    }
350}