mathhook_core/core/performance/
strategy.rs

1//! Performance optimization strategy for MathHook library usage
2//!
3//! This module defines the smart integration of SIMD, memoization, and concurrency
4//! optimized for Python/Node.js bindings and interactive usage patterns.
5
6use crate::core::Expression;
7use std::collections::HashMap;
8use std::sync::{Arc, OnceLock, RwLock};
9
10/// Binding context for performance optimization
11#[derive(Debug, Clone, Copy, PartialEq, Eq)]
12pub enum BindingContext {
13    /// Native Rust application (maximum performance)
14    Native,
15    /// Python binding (GIL constraints)
16    Python,
17    /// Node.js binding (single-threaded model)
18    NodeJs,
19    /// WebAssembly binding (memory constraints)
20    WebAssembly,
21    /// Custom binding (user-defined strategy)
22    Custom,
23}
24
25/// Performance configuration for different usage contexts
26#[derive(Debug, Clone)]
27pub struct PerformanceConfig {
28    /// Enable SIMD operations for bulk numeric operations
29    pub simd_enabled: bool,
30    /// Minimum size threshold for SIMD activation
31    pub simd_threshold: usize,
32    /// Enable memoization for expensive operations
33    pub memoization_enabled: bool,
34    /// Maximum cache size (number of entries)
35    pub cache_size_limit: usize,
36    /// Enable parallel processing for large operations
37    pub parallel_enabled: bool,
38    /// Minimum size threshold for parallel processing
39    pub parallel_threshold: usize,
40}
41
42impl Default for PerformanceConfig {
43    fn default() -> Self {
44        Self {
45            simd_enabled: true,
46            simd_threshold: 50, // Based on benchmark results
47            memoization_enabled: true,
48            cache_size_limit: 10000,  // ~10MB for typical expressions
49            parallel_enabled: false,  // Conservative default for bindings
50            parallel_threshold: 1000, // Only for very large operations
51        }
52    }
53}
54
55impl PerformanceConfig {
56    /// Configuration optimized for Python bindings (GIL-aware)
57    pub fn python_optimized() -> Self {
58        Self {
59            simd_enabled: true,
60            simd_threshold: 50,
61            memoization_enabled: true,
62            cache_size_limit: 50000, // Larger cache for interactive sessions
63            parallel_enabled: false, // Avoid GIL contention
64            parallel_threshold: usize::MAX, // Effectively disabled
65        }
66    }
67
68    /// Configuration optimized for Node.js bindings
69    pub fn nodejs_optimized() -> Self {
70        Self {
71            simd_enabled: true,
72            simd_threshold: 50,
73            memoization_enabled: true,
74            cache_size_limit: 20000, // Medium cache for server usage
75            parallel_enabled: true,  // Node can benefit from threads
76            parallel_threshold: 500, // Lower threshold for server workloads
77        }
78    }
79
80    /// Configuration for high-performance native usage
81    pub fn native_optimized() -> Self {
82        Self {
83            simd_enabled: true,
84            simd_threshold: 20, // More aggressive SIMD
85            memoization_enabled: true,
86            cache_size_limit: 100000, // Large cache for native apps
87            parallel_enabled: true,
88            parallel_threshold: 100, // Aggressive parallelization
89        }
90    }
91
92    /// Configuration for WebAssembly bindings (memory-constrained)
93    pub fn wasm_optimized() -> Self {
94        Self {
95            simd_enabled: true,  // WASM supports SIMD
96            simd_threshold: 100, // Higher threshold due to overhead
97            memoization_enabled: true,
98            cache_size_limit: 1000,  // Small cache due to memory limits
99            parallel_enabled: false, // WASM is single-threaded
100            parallel_threshold: usize::MAX,
101        }
102    }
103
104    /// Factory method to create configuration based on binding context
105    pub fn for_binding(context: BindingContext) -> Self {
106        match context {
107            BindingContext::Native => Self::native_optimized(),
108            BindingContext::Python => Self::python_optimized(),
109            BindingContext::NodeJs => Self::nodejs_optimized(),
110            BindingContext::WebAssembly => Self::wasm_optimized(),
111            BindingContext::Custom => Self::default(),
112        }
113    }
114}
115
116/// Smart performance optimizer that decides when to use different strategies
117pub struct PerformanceOptimizer {
118    pub config: PerformanceConfig,
119    simplify_cache: Arc<RwLock<HashMap<u64, Expression>>>,
120    derivative_cache: Arc<RwLock<HashMap<u64, Expression>>>,
121}
122
123impl PerformanceOptimizer {
124    pub fn new(config: PerformanceConfig) -> Self {
125        Self {
126            config,
127            simplify_cache: Arc::new(RwLock::new(HashMap::new())),
128            derivative_cache: Arc::new(RwLock::new(HashMap::new())),
129        }
130    }
131
132    /// Determine if SIMD should be used for bulk operations
133    pub fn should_use_simd(&self, operation_size: usize) -> bool {
134        self.config.simd_enabled && operation_size >= self.config.simd_threshold
135    }
136
137    /// Determine if parallel processing should be used
138    pub fn should_use_parallel(&self, operation_size: usize) -> bool {
139        self.config.parallel_enabled && operation_size >= self.config.parallel_threshold
140    }
141
142    /// Check cache for memoized result
143    pub fn get_cached_simplify(&self, expr_hash: u64) -> Option<Expression> {
144        if !self.config.memoization_enabled {
145            return None;
146        }
147
148        self.simplify_cache.read().ok()?.get(&expr_hash).cloned()
149    }
150
151    /// Cache a simplification result
152    pub fn cache_simplify(&self, expr_hash: u64, result: Expression) {
153        if !self.config.memoization_enabled {
154            return;
155        }
156
157        if let Ok(mut cache) = self.simplify_cache.write() {
158            // Implement LRU eviction if cache is full
159            if cache.len() >= self.config.cache_size_limit {
160                // Simple eviction: remove oldest entry
161                if let Some(oldest_key) = cache.keys().next().copied() {
162                    cache.remove(&oldest_key);
163                }
164            }
165            cache.insert(expr_hash, result);
166        }
167    }
168
169    /// Get cache statistics for monitoring
170    pub fn cache_stats(&self) -> CacheStats {
171        let simplify_size = self.simplify_cache.read().map(|c| c.len()).unwrap_or(0);
172        let derivative_size = self.derivative_cache.read().map(|c| c.len()).unwrap_or(0);
173
174        CacheStats {
175            simplify_cache_size: simplify_size,
176            derivative_cache_size: derivative_size,
177            total_memory_estimate: (simplify_size + derivative_size) * 1024, // Rough estimate
178        }
179    }
180}
181
182#[derive(Debug, Clone)]
183pub struct CacheStats {
184    pub simplify_cache_size: usize,
185    pub derivative_cache_size: usize,
186    pub total_memory_estimate: usize, // Bytes
187}
188
189/// Global performance optimizer instance
190static GLOBAL_OPTIMIZER: OnceLock<PerformanceOptimizer> = OnceLock::new();
191
192/// Initialize global performance optimizer
193pub fn init_performance_optimizer(config: PerformanceConfig) {
194    let _ = GLOBAL_OPTIMIZER.get_or_init(|| PerformanceOptimizer::new(config));
195}
196
197/// Get global performance optimizer
198pub fn get_performance_optimizer() -> Option<&'static PerformanceOptimizer> {
199    GLOBAL_OPTIMIZER.get()
200}
201
202#[cfg(test)]
203mod tests {
204    use super::*;
205
206    #[test]
207    fn test_performance_config_defaults() {
208        let config = PerformanceConfig::default();
209        assert!(config.simd_enabled);
210        assert_eq!(config.simd_threshold, 50);
211        assert!(config.memoization_enabled);
212    }
213
214    #[test]
215    fn test_python_optimized_config() {
216        let config = PerformanceConfig::python_optimized();
217        assert!(config.simd_enabled);
218        assert!(!config.parallel_enabled); // Should avoid GIL contention
219        assert!(config.memoization_enabled);
220        assert_eq!(config.cache_size_limit, 50000);
221    }
222
223    #[test]
224    fn test_performance_optimizer_simd_threshold() {
225        let config = PerformanceConfig::default();
226        let optimizer = PerformanceOptimizer::new(config);
227
228        assert!(!optimizer.should_use_simd(10)); // Below threshold
229        assert!(optimizer.should_use_simd(100)); // Above threshold
230    }
231
232    #[test]
233    fn test_memoization_cache() {
234        let config = PerformanceConfig::default();
235        let optimizer = PerformanceOptimizer::new(config);
236
237        let expr = Expression::integer(42);
238        let hash = 12345u64;
239
240        // Initially empty
241        assert!(optimizer.get_cached_simplify(hash).is_none());
242
243        // Cache result
244        optimizer.cache_simplify(hash, expr.clone());
245
246        // Should retrieve cached result
247        assert_eq!(optimizer.get_cached_simplify(hash), Some(expr));
248    }
249}