scirs2_metrics/optimization/mod.rs
1//! Optimization and performance enhancements for metrics computation
2
3#![allow(clippy::manual_div_ceil)]
4#![allow(clippy::unwrap_or_default)]
5#![allow(clippy::arc_with_non_send_sync)]
6#![allow(clippy::await_holding_lock)]
7#![allow(clippy::type_complexity)]
8#![allow(clippy::manual_map)]
9#![allow(clippy::too_many_arguments)]
10#![allow(dead_code)]
11//!
12//! This module provides optimized implementations of metrics calculations
13//! for improved performance, memory efficiency, and numerical stability.
14//!
15//! The optimization module contains four main components:
16//!
17//! 1. `parallel` - Tools for parallel computation of metrics
18//! 2. `memory` - Tools for memory-efficient metrics computation
19//! 3. `numeric` - Tools for numerically stable metrics computation
20//! 4. `quantum_acceleration` - Quantum-inspired algorithms for exponential speedups
21//!
22//! # Examples
23//!
24//! ## Using StableMetrics for numerical stability
25//!
26//! ```
27//! use scirs2_metrics::optimization::numeric::StableMetrics;
28//! use scirs2_metrics::error::Result;
29//!
30//! fn compute_kl_divergence(p: &[f64], q: &[f64]) -> Result<f64> {
31//! let stable = StableMetrics::<f64>::new()
32//! .with_epsilon(1e-10)
33//! .with_clip_values(true);
34//! stable.kl_divergence(p, q)
35//! }
36//! ```
37//!
38//! ## Using parallel computation for batch metrics
39//!
40//! ```
41//! use scirs2_metrics::optimization::parallel::{ParallelConfig, compute_metrics_batch};
42//! use scirs2_metrics::error::Result;
43//! use scirs2_core::ndarray::Array1;
44//!
45//! fn compute_multiple_metrics(y_true: &Array1<f64>, y_pred: &Array1<f64>) -> Result<Vec<f64>> {
46//! let config = ParallelConfig {
47//! parallel_enabled: true,
48//! min_chunk_size: 1000,
49//! num_threads: None,
50//! };
51//!
52//! let metrics: Vec<Box<dyn Fn(&Array1<f64>, &Array1<f64>) -> Result<f64> + Send + Sync>> = vec![
53//! // Define your metric functions here
54//! ];
55//!
56//! compute_metrics_batch(y_true, y_pred, &metrics, &config)
57//! }
58//! ```
59//!
60//! ## Using memory-efficient metrics for large datasets
61//!
62//! ```
63//! use scirs2_metrics::optimization::memory::{StreamingMetric, ChunkedMetrics};
64//! use scirs2_metrics::error::Result;
65//!
66//! struct StreamingMeanAbsoluteError;
67//!
68//! impl StreamingMetric<f64> for StreamingMeanAbsoluteError {
69//! type State = (f64, usize); // Running sum and count
70//!
71//! fn init_state(&self) -> Self::State {
72//! (0.0, 0)
73//! }
74//!
75//! fn update_state(&self, state: &mut Self::State, batch_true: &[f64], batch_pred: &[f64]) -> Result<()> {
76//! for (y_t, y_p) in batch_true.iter().zip(batch_pred.iter()) {
77//! state.0 += (y_t - y_p).abs();
78//! state.1 += 1;
79//! }
80//! Ok(())
81//! }
82//!
83//! fn finalize(&self, state: &Self::State) -> Result<f64> {
84//! if state.1 == 0 {
85//! return Err(scirs2_metrics::error::MetricsError::InvalidInput(
86//! "No data processed".to_string()
87//! ));
88//! }
89//! Ok(state.0 / state.1 as f64)
90//! }
91//! }
92//! ```
93//!
94//! ## Using quantum-inspired acceleration for large-scale computations
95//!
96//! ```
97//! use scirs2_metrics::optimization::quantum_acceleration::{QuantumMetricsComputer, QuantumConfig};
98//! use scirs2_metrics::error::Result;
99//! use scirs2_core::ndarray::Array1;
100//!
101//! fn compute_quantum_correlation(x: &Array1<f64>, y: &Array1<f64>) -> Result<f64> {
102//! let config = QuantumConfig::default();
103//! let mut quantum_computer = QuantumMetricsComputer::new(config)?;
104//! quantum_computer.quantum_correlation(&x.view(), &y.view())
105//! }
106//! ```
107
108// Re-export submodules
109pub mod advanced_memory_optimization;
110pub mod distributed;
111pub mod distributed_advanced;
112pub mod enhanced_gpu_kernels;
113pub mod gpu_acceleration;
114pub mod gpu_kernels;
115pub mod hardware;
116pub mod memory;
117pub mod numeric;
118pub mod parallel;
119pub mod quantum_acceleration;
120pub mod simd_gpu;
121
122// Re-export common functionality
123pub use advanced_memory_optimization::{
124 AdvancedMemoryPool, AllocationStrategy, BlockType, MemoryBlock, MemoryPoolConfig, MemoryStats,
125 StrategyBenchmark,
126};
127pub use distributed::{
128 DistributedConfig, DistributedMetricsBuilder, DistributedMetricsCoordinator,
129};
130pub use distributed_advanced::{
131 AdvancedClusterConfig, AdvancedDistributedCoordinator, AutoScalingConfig, ClusterState,
132 ConsensusAlgorithm, ConsensusConfig, DistributedTask, FaultToleranceConfig, LocalityConfig,
133 NodeInfo, NodeRole, NodeStatus, OptimizationConfig, ResourceRequirements, ShardingConfig,
134 ShardingStrategy, TaskPriority, TaskType,
135};
136pub use gpu_acceleration::{BenchmarkResults, GpuAccelConfig, GpuInfo, GpuMetricsComputer};
137pub use gpu_kernels::{
138 AdvancedGpuComputer, BatchSettings, CudaContext, ErrorHandling, GpuApi, GpuComputeConfig,
139 GpuComputeResults, GpuPerformanceStats, KernelMetrics, KernelOptimization, MemoryStrategy,
140 OpenClContext, TransferMetrics, VectorizationLevel,
141};
142pub use hardware::{
143 HardwareAccelConfig, HardwareAcceleratedMatrix, HardwareCapabilities, SimdDistanceMetrics,
144 SimdStatistics, VectorWidth,
145};
146pub use memory::{ChunkedMetrics, StreamingMetric};
147pub use numeric::{StableMetric, StableMetrics};
148pub use parallel::ParallelConfig;
149pub use quantum_acceleration::{
150 InterferencePatterns, QuantumBenchmarkResults, QuantumConfig, QuantumMetricsComputer,
151 QuantumProcessor, SuperpositionManager, VqeParameters,
152};
153pub use simd_gpu::SimdMetrics;