Expand description
OxiGDAL ML - Machine Learning for Geospatial Data
This crate provides machine learning capabilities for the OxiGDAL ecosystem, enabling geospatial ML workflows with ONNX Runtime integration.
§Features
- ONNX Runtime Integration: Pure Rust interface to ONNX Runtime for model inference
- Image Segmentation: Semantic, instance, and panoptic segmentation
- Image Classification: Scene classification, land cover classification, multi-label
- Object Detection: Bounding box detection with NMS and georeferencing
- Preprocessing: Normalization, tiling, padding, and augmentation
- Postprocessing: Tile merging, thresholding, polygon conversion, GeoJSON export
§Optional Features
gpu- Enable CUDA and TensorRT GPU accelerationdirectml- Enable DirectML support (Windows)coreml- Enable CoreML support (macOS/iOS)
§Example: Image Segmentation
ⓘ
use oxigdal_ml::*;
use oxigdal_ml::models::OnnxModel;
use oxigdal_ml::inference::{InferenceEngine, InferenceConfig};
use oxigdal_ml::segmentation::probability_to_mask;
// Load ONNX model
let model = OnnxModel::from_file("segmentation.onnx")?;
// Create inference engine
let config = InferenceConfig::default();
let engine = InferenceEngine::new(model, config);
// Load input raster (not shown)
// Run inference
let predictions = engine.predict(&input)?;
// Convert to segmentation mask
let mask = probability_to_mask(&predictions, 2, 0.5)?;§Example: Object Detection
ⓘ
use oxigdal_ml::*;
use oxigdal_ml::detection::{non_maximum_suppression, NmsConfig};
use oxigdal_ml::postprocessing::export_detections_geojson;
// Assume we have detections from a model
// Apply NMS to filter overlapping detections
let config = NmsConfig::default();
let filtered = non_maximum_suppression(&detections, &config)?;
// Export to GeoJSON§Example: Batch Processing with Progress Tracking
ⓘ
use oxigdal_ml::*;
use oxigdal_ml::batch::{BatchConfig, BatchProcessor};
use oxigdal_ml::models::OnnxModel;
// Load model
let model = OnnxModel::from_file("model.onnx")?;
// Auto-tune batch size based on available memory
let sample_size = 3 * 256 * 256 * 4; // 3 channels, 256x256, float32
let batch_size = BatchConfig::auto_tune_batch_size(sample_size, 0.5);
// Create batch processor
let config = BatchConfig::builder()
.max_batch_size(batch_size)
.parallel_batches(4)
.build();
let processor = BatchProcessor::new(model, config);
// Process large batch with progress bar
let results = processor.infer_batch_with_progress(inputs, true)?;§Example: Model Optimization Pipeline
ⓘ
use oxigdal_ml::*;
use oxigdal_ml::optimization::{OptimizationPipeline, OptimizationProfile};
// Create optimization pipeline for edge deployment
let pipeline = OptimizationPipeline::from_profile(OptimizationProfile::Size);
// Optimize model (applies quantization + pruning)
let stats = pipeline.optimize("model.onnx", "model_optimized.onnx")?;
println!("Size reduction: {:.1}%", stats.size_reduction_percent());
println!("Speedup: {:.2}x", stats.speedup);
println!("Accuracy delta: {:.2}%", stats.accuracy_delta);§Example: Model Zoo - Download Pretrained Models
ⓘ
use oxigdal_ml::*;
use oxigdal_ml::zoo::ModelZoo;
use oxigdal_ml::models::OnnxModel;
// Create model zoo
let mut zoo = ModelZoo::new()?;
// List available models
let models = zoo.list_models();
for model in models {
println!("{}: {} - {:.1}% accuracy",
model.name, model.description,
model.accuracy.unwrap_or(0.0));
}
// Download a model (with progress bar and checksum verification)
let model_path = zoo.get_model("unet_buildings")?;
// Load and use the model
let model = OnnxModel::from_file(model_path)?;§Example: Transfer Learning
use oxigdal_ml::*;
// This example requires oxigdal-ml-foundation
// Load pretrained feature extractor
use oxigdal_ml_foundation::transfer::FeatureExtractor;
// Extract features from pretrained model
let extractor = FeatureExtractor::from_pretrained("resnet50")?;
// Use features for custom classification task
// (Training loop implementation would go here)§Example: GPU Acceleration
ⓘ
use oxigdal_ml::*;
use oxigdal_ml::gpu::{GpuBackend, GpuConfig};
use oxigdal_ml::inference::{InferenceEngine, InferenceConfig};
use oxigdal_ml::models::OnnxModel;
// Detect available GPU backends
let backends = GpuBackend::detect_all();
for backend in &backends {
println!("Available: {:?}", backend);
}
// Configure GPU acceleration
let gpu_config = GpuConfig::builder()
.preferred_backend(GpuBackend::Cuda)
.device_id(0)
.build();
// Create inference engine with GPU support
let model = OnnxModel::from_file("model.onnx")?;
let mut inference_config = InferenceConfig::default();
inference_config.gpu_config = Some(gpu_config);
let engine = InferenceEngine::new(model, inference_config);§Example: Model Monitoring
ⓘ
use oxigdal_ml::*;
use oxigdal_ml::monitoring::{ModelMonitor, MonitoringConfig};
use oxigdal_ml::models::OnnxModel;
let model = OnnxModel::from_file("model.onnx")?;
// Create monitor with custom config
let config = MonitoringConfig::builder()
.track_latency(true)
.track_memory(true)
.alert_threshold_ms(100.0)
.build();
let mut monitor = ModelMonitor::new(model, config);
// Run inference with monitoring
let result = monitor.predict(&input)?;
// Check metrics
let metrics = monitor.metrics();
println!("Avg latency: {:.2}ms", metrics.avg_latency_ms());
println!("Memory usage: {:.1}MB", metrics.peak_memory_mb);§SciRS2 Integration Status
This crate is being migrated to use the Pure Rust SciRS2 ecosystem following COOLJAPAN policy. Current status:
- Random Number Generation: Completed - using
scirs2-core::randomfor RNG - Statistical Distributions: Completed - using
NormalDistributionfor Gaussian noise - Data Augmentation: Completed - integrated SciRS2-Core RNG
- Linear Algebra: Partial -
ndarraystill used in some modules, migration ongoing - Neural Network Training: In Progress -
oxigdal-ml-foundationimplements custom Pure Rust backend with SciRS2 integration
§Usage Example with SciRS2
use oxigdal_ml::augmentation::{add_gaussian_noise, random_crop};
// Create a raster buffer
let input = RasterBuffer::zeros(512, 512, RasterDataType::Float32);
// Add Gaussian noise using SciRS2-Core
let noisy = add_gaussian_noise(&input, 0.01)?;
// Random crop using SciRS2-Core RNG
let cropped = random_crop(&input, 256, 256)?;For more information about SciRS2 integration, see the SCIRS2 POLICY in the workspace documentation.
Re-exports§
pub use augmentation::AugmentationConfig;pub use batch::BatchConfig;pub use batch::BatchProcessor;pub use error::MlError;pub use error::Result;pub use gpu::GpuBackend;pub use gpu::GpuConfig;pub use gpu::GpuDevice;pub use models::Model;pub use models::OnnxModel;pub use monitoring::ModelMonitor;pub use monitoring::MonitoringConfig;pub use monitoring::PerformanceMetrics;pub use optimization::OptimizationPipeline;pub use optimization::OptimizationProfile;pub use serving::DeploymentStrategy;pub use serving::ModelServer;pub use serving::ServerConfig;pub use superres::SuperResConfig;pub use superres::SuperResolution;pub use superres::UpscaleFactor;pub use zoo::ModelZoo;
Modules§
- augmentation
- Data augmentation for geospatial imagery
- batch
- Advanced batch inference for efficient model serving
- batch_
predict - Adaptive batch prediction for geospatial ML inference
- classification
- Image classification for geospatial data
- detection
- Object detection for geospatial data
- error
- Error types for OxiGDAL ML operations
- gpu
- GPU acceleration for ML inference
- hot_
reload - ONNX model hot-reload with file watching
- inference
- Inference engine for ML workflows
- inference_
cache - Content-addressed inference result cache
- model_
versioning - Model versioning and A/B testing for geospatial ML
- models
- Model management for OxiGDAL ML
- monitoring
- Model monitoring and performance tracking
- optimization
- Model optimization techniques for efficient inference
- postprocessing
- Postprocessing operations for ML results
- preprocessing
- Data preprocessing for ML workflows
- segmentation
- Image segmentation for geospatial data
- serving
- Model serving and deployment utilities
- superres
- Super-resolution for geospatial imagery
- zoo
- Pre-trained model zoo for geospatial ML