memscope_rs/export/
adaptive_performance.rs

1//! Adaptive Performance Optimization
2//!
3//! This module implements adaptive performance optimizations for JSON export:
4//! - Adaptive batch size adjustment based on system performance
5//! - Memory usage optimization and intelligent caching
6//! - Dynamic performance tuning based on workload characteristics
7
8use std::collections::HashMap;
9use std::sync::{Arc, Mutex, RwLock};
10use std::time::{Duration, Instant};
11
12/// Performance metrics collector for adaptive optimization
13#[derive(Debug, Clone)]
14pub struct PerformanceMetrics {
15    /// Processing time in milliseconds
16    pub processing_time_ms: u64,
17    /// Memory usage in megabytes
18    pub memory_usage_mb: u64,
19    /// Allocations per second
20    pub allocations_per_second: f64,
21    /// Cache hit ratio
22    pub cache_hit_ratio: f64,
23    /// Batch efficiency
24    pub batch_efficiency: f64,
25    /// Timestamp
26    pub timestamp: Instant,
27}
28
29impl Default for PerformanceMetrics {
30    fn default() -> Self {
31        Self {
32            processing_time_ms: 0,
33            memory_usage_mb: 0,
34            allocations_per_second: 0.0,
35            cache_hit_ratio: 0.0,
36            batch_efficiency: 0.0,
37            timestamp: Instant::now(),
38        }
39    }
40}
41
42/// Adaptive batch size controller
43///
44/// Automatically adjusts batch sizes based on:
45/// - System memory pressure
46/// - Processing time per batch
47/// - Cache hit ratios
48/// - Overall throughput
49#[derive(Debug)]
50pub struct AdaptiveBatchController {
51    /// Current batch size
52    current_batch_size: usize,
53    /// Minimum batch size
54    min_batch_size: usize,
55    /// Maximum batch size
56    max_batch_size: usize,
57    /// Target processing time in milliseconds
58    target_processing_time_ms: u64,
59    /// Performance history
60    performance_history: Vec<PerformanceMetrics>,
61    /// Adjustment factor
62    adjustment_factor: f64,
63}
64
65impl AdaptiveBatchController {
66    /// Create a new adaptive batch controller
67    pub fn new(initial_batch_size: usize) -> Self {
68        Self {
69            current_batch_size: initial_batch_size,
70            min_batch_size: 100,
71            max_batch_size: 10000,
72            target_processing_time_ms: 10, // 10ms target per Requirement 3.2
73            performance_history: Vec::with_capacity(100),
74            adjustment_factor: 1.2,
75        }
76    }
77
78    /// Get the current optimal batch size
79    pub fn get_optimal_batch_size(&self) -> usize {
80        self.current_batch_size
81    }
82
83    /// Record performance metrics and adjust batch size
84    pub fn record_performance(&mut self, metrics: PerformanceMetrics) {
85        tracing::info!(
86            "๐Ÿ“Š Recording performance: {}ms, {} allocs/sec, batch_size: {}",
87            metrics.processing_time_ms,
88            metrics.allocations_per_second as u64,
89            self.current_batch_size
90        );
91
92        self.performance_history.push(metrics.clone());
93
94        // Keep only recent history (last 50 measurements)
95        if self.performance_history.len() > 50 {
96            self.performance_history.remove(0);
97        }
98
99        // Adjust batch size based on performance
100        self.adjust_batch_size(&metrics);
101    }
102
103    /// Adaptive batch size adjustment algorithm
104    fn adjust_batch_size(&mut self, current_metrics: &PerformanceMetrics) {
105        let old_batch_size = self.current_batch_size;
106
107        if current_metrics.processing_time_ms > self.target_processing_time_ms {
108            // Processing is too slow, reduce batch size
109            let reduction_factor = (current_metrics.processing_time_ms as f64
110                / self.target_processing_time_ms as f64)
111                .min(2.0);
112            self.current_batch_size = ((self.current_batch_size as f64 / reduction_factor)
113                as usize)
114                .max(self.min_batch_size);
115
116            tracing::info!(
117                "๐Ÿ”ฝ Reducing batch size: {} -> {} (processing too slow: {}ms)",
118                old_batch_size,
119                self.current_batch_size,
120                current_metrics.processing_time_ms
121            );
122        } else if current_metrics.processing_time_ms < self.target_processing_time_ms / 2 {
123            // Processing is fast, we can increase batch size
124            self.current_batch_size = ((self.current_batch_size as f64 * self.adjustment_factor)
125                as usize)
126                .min(self.max_batch_size);
127
128            tracing::info!(
129                "๐Ÿ”ผ Increasing batch size: {} -> {} (processing fast: {}ms)",
130                old_batch_size,
131                self.current_batch_size,
132                current_metrics.processing_time_ms
133            );
134        }
135
136        // Additional adjustments based on memory pressure
137        if current_metrics.memory_usage_mb > 500 {
138            // High memory usage, reduce batch size
139            self.current_batch_size = (self.current_batch_size * 3 / 4).max(self.min_batch_size);
140            tracing::info!(
141                "๐Ÿ’พ Reducing batch size due to memory pressure: {} -> {} ({}MB)",
142                old_batch_size,
143                self.current_batch_size,
144                current_metrics.memory_usage_mb
145            );
146        }
147    }
148
149    /// Get performance trend analysis
150    pub fn get_performance_trend(&self) -> Option<String> {
151        if self.performance_history.len() < 5 {
152            return None;
153        }
154
155        let recent_avg = self
156            .performance_history
157            .iter()
158            .rev()
159            .take(5)
160            .map(|m| m.processing_time_ms)
161            .sum::<u64>() as f64
162            / 5.0;
163
164        let older_avg = self
165            .performance_history
166            .iter()
167            .rev()
168            .skip(5)
169            .take(5)
170            .map(|m| m.processing_time_ms)
171            .sum::<u64>() as f64
172            / 5.0;
173
174        let trend_ratio = recent_avg / older_avg;
175
176        if trend_ratio > 1.2 {
177            Some("Performance degrading".to_string())
178        } else if trend_ratio < 0.8 {
179            Some("Performance improving".to_string())
180        } else {
181            Some("Performance stable".to_string())
182        }
183    }
184}
185
186/// Intelligent type information cache
187///
188/// Caches frequently accessed type information to reduce computation overhead
189#[derive(Debug)]
190pub struct TypeInfoCache {
191    /// Cache of type information
192    cache: Arc<RwLock<HashMap<String, CachedTypeInfo>>>,
193    /// Cache statistics
194    cache_stats: Arc<Mutex<CacheStats>>,
195    /// Maximum cache size
196    max_cache_size: usize,
197}
198
199#[derive(Debug, Clone)]
200#[allow(dead_code)]
201struct CachedTypeInfo {
202    /// Type name
203    type_name: String,
204    /// Size hint
205    size_hint: Option<usize>,
206    /// Complexity score
207    complexity_score: u32,
208    /// Access count
209    access_count: u64,
210    /// Last accessed time
211    last_accessed: Instant,
212    /// Computed type information
213    computed_info: serde_json::Value,
214}
215
216#[derive(Debug, Default)]
217struct CacheStats {
218    /// Cache hits
219    hits: u64,
220    /// Cache misses
221    misses: u64,
222    /// Cache evictions
223    evictions: u64,
224}
225
226impl TypeInfoCache {
227    /// Create a new type info cache
228    pub fn new(max_size: usize) -> Self {
229        Self {
230            cache: Arc::new(RwLock::new(HashMap::new())),
231            cache_stats: Arc::new(Mutex::new(CacheStats::default())),
232            max_cache_size: max_size,
233        }
234    }
235
236    /// Get cached type information
237    pub fn get(&self, type_name: &str) -> Option<serde_json::Value> {
238        // First, try to get the cached value
239        let cached_value = {
240            let cache = self.cache.read().ok()?;
241            cache.get(type_name).map(|info| info.computed_info.clone())
242        };
243
244        if cached_value.is_some() {
245            // Update access statistics
246            if let Ok(mut stats) = self.cache_stats.lock() {
247                stats.hits += 1;
248            }
249
250            // Update access time in a separate write lock
251            if let Ok(mut cache) = self.cache.write() {
252                if let Some(info) = cache.get_mut(type_name) {
253                    info.access_count += 1;
254                    info.last_accessed = Instant::now();
255                }
256            }
257
258            cached_value
259        } else {
260            // Cache miss
261            if let Ok(mut stats) = self.cache_stats.lock() {
262                stats.misses += 1;
263            }
264            None
265        }
266    }
267
268    /// Store computed type information in cache
269    pub fn store(&self, type_name: String, computed_info: serde_json::Value) {
270        if let Ok(mut cache) = self.cache.write() {
271            // Check if we need to evict entries
272            if cache.len() >= self.max_cache_size {
273                self.evict_lru(&mut cache);
274            }
275
276            let cached_info = CachedTypeInfo {
277                type_name: type_name.clone(),
278                size_hint: None,
279                complexity_score: self.compute_complexity_score(&computed_info),
280                access_count: 1,
281                last_accessed: Instant::now(),
282                computed_info,
283            };
284
285            cache.insert(type_name, cached_info);
286        }
287    }
288
289    /// Evict least recently used entries
290    fn evict_lru(&self, cache: &mut HashMap<String, CachedTypeInfo>) {
291        if cache.is_empty() {
292            return;
293        }
294
295        // Find the least recently used entry
296        let lru_key = cache
297            .iter()
298            .min_by_key(|(_, info)| info.last_accessed)
299            .map(|(key, _)| key.clone());
300
301        if let Some(key) = lru_key {
302            cache.remove(&key);
303            if let Ok(mut stats) = self.cache_stats.lock() {
304                stats.evictions += 1;
305            }
306            tracing::info!("๐Ÿ—‘๏ธ Evicted LRU cache entry: {}", key);
307        }
308    }
309
310    /// Compute complexity score for caching priority
311    fn compute_complexity_score(&self, info: &serde_json::Value) -> u32 {
312        match info {
313            serde_json::Value::Object(obj) => obj.len() as u32 * 2,
314            serde_json::Value::Array(arr) => arr.len() as u32,
315            serde_json::Value::String(s) => s.len() as u32 / 10,
316            _ => 1,
317        }
318    }
319
320    /// Get cache statistics
321    pub fn get_stats(&self) -> (u64, u64, f64) {
322        if let Ok(stats) = self.cache_stats.lock() {
323            let total_requests = stats.hits + stats.misses;
324            let hit_ratio = if total_requests > 0 {
325                stats.hits as f64 / total_requests as f64
326            } else {
327                0.0
328            };
329            (stats.hits, stats.misses, hit_ratio)
330        } else {
331            (0, 0, 0.0)
332        }
333    }
334
335    /// Clear cache and reset statistics
336    pub fn clear(&self) {
337        if let Ok(mut cache) = self.cache.write() {
338            cache.clear();
339        }
340        if let Ok(mut stats) = self.cache_stats.lock() {
341            *stats = CacheStats::default();
342        }
343        tracing::info!("๐Ÿงน Type info cache cleared");
344    }
345}
346
347/// Memory usage monitor for adaptive optimization
348#[derive(Debug)]
349pub struct MemoryUsageMonitor {
350    peak_usage_mb: u64,
351    current_usage_mb: u64,
352    usage_history: Vec<(Instant, u64)>,
353    warning_threshold_mb: u64,
354    critical_threshold_mb: u64,
355}
356
357impl Default for MemoryUsageMonitor {
358    fn default() -> Self {
359        Self::new()
360    }
361}
362
363impl MemoryUsageMonitor {
364    /// Create a new memory usage monitor
365    pub fn new() -> Self {
366        Self {
367            peak_usage_mb: 0,
368            current_usage_mb: 0,
369            usage_history: Vec::new(),
370            warning_threshold_mb: 1024,  // 1GB warning
371            critical_threshold_mb: 2048, // 2GB critical
372        }
373    }
374
375    /// Update current memory usage
376    pub fn update_usage(&mut self, usage_mb: u64) {
377        self.current_usage_mb = usage_mb;
378        self.peak_usage_mb = self.peak_usage_mb.max(usage_mb);
379
380        self.usage_history.push((Instant::now(), usage_mb));
381
382        // Keep only recent history (last 100 measurements)
383        if self.usage_history.len() > 100 {
384            self.usage_history.remove(0);
385        }
386
387        // Check thresholds
388        if usage_mb > self.critical_threshold_mb {
389            tracing::info!(
390                "๐Ÿšจ CRITICAL: Memory usage {}MB exceeds critical threshold {}MB",
391                usage_mb,
392                self.critical_threshold_mb
393            );
394        } else if usage_mb > self.warning_threshold_mb {
395            tracing::info!(
396                "โš ๏ธ WARNING: Memory usage {}MB exceeds warning threshold {}MB",
397                usage_mb,
398                self.warning_threshold_mb
399            );
400        }
401    }
402
403    /// Get current memory pressure level
404    pub fn get_memory_pressure(&self) -> MemoryPressureLevel {
405        if self.current_usage_mb > self.critical_threshold_mb {
406            MemoryPressureLevel::Critical
407        } else if self.current_usage_mb > self.warning_threshold_mb {
408            MemoryPressureLevel::High
409        } else if self.current_usage_mb > self.warning_threshold_mb / 2 {
410            MemoryPressureLevel::Medium
411        } else {
412            MemoryPressureLevel::Low
413        }
414    }
415
416    /// Get memory usage trend
417    pub fn get_usage_trend(&self) -> Option<MemoryTrend> {
418        if self.usage_history.len() < 10 {
419            return None;
420        }
421
422        let recent_avg = self
423            .usage_history
424            .iter()
425            .rev()
426            .take(5)
427            .map(|(_, usage)| *usage)
428            .sum::<u64>() as f64
429            / 5.0;
430
431        let older_avg = self
432            .usage_history
433            .iter()
434            .rev()
435            .skip(5)
436            .take(5)
437            .map(|(_, usage)| *usage)
438            .sum::<u64>() as f64
439            / 5.0;
440
441        let trend_ratio = recent_avg / older_avg;
442
443        if trend_ratio > 1.1 {
444            Some(MemoryTrend::Increasing)
445        } else if trend_ratio < 0.9 {
446            Some(MemoryTrend::Decreasing)
447        } else {
448            Some(MemoryTrend::Stable)
449        }
450    }
451}
452
453/// Memory pressure level
454#[derive(Debug, Clone, Copy, PartialEq)]
455pub enum MemoryPressureLevel {
456    /// Low memory pressure
457    Low,
458    /// Medium memory pressure
459    Medium,
460    /// High memory pressure
461    High,
462    /// Critical memory pressure
463    Critical,
464}
465
466/// Memory usage trend
467#[derive(Debug, Clone, Copy, PartialEq)]
468pub enum MemoryTrend {
469    /// Increasing memory usage
470    Increasing,
471    /// Decreasing memory usage
472    Decreasing,
473    /// Stable memory usage
474    Stable,
475}
476
477/// Adaptive performance optimizer - main coordinator
478///
479/// Coordinates all adaptive optimization components:
480/// - Batch size controller
481/// - Type info cache
482/// - Memory usage monitor
483/// - Performance metrics collection
484#[derive(Debug)]
485pub struct AdaptivePerformanceOptimizer {
486    batch_controller: AdaptiveBatchController,
487    type_cache: TypeInfoCache,
488    memory_monitor: MemoryUsageMonitor,
489    optimization_enabled: bool,
490    start_time: Instant,
491}
492
493impl AdaptivePerformanceOptimizer {
494    /// Create a new adaptive performance optimizer
495    pub fn new(initial_batch_size: usize, cache_size: usize) -> Self {
496        tracing::info!("๐Ÿš€ Initializing Adaptive Performance Optimizer");
497        tracing::info!("   โ€ข Initial batch size: {}", initial_batch_size);
498        tracing::info!("   โ€ข Cache size: {}", cache_size);
499
500        Self {
501            batch_controller: AdaptiveBatchController::new(initial_batch_size),
502            type_cache: TypeInfoCache::new(cache_size),
503            memory_monitor: MemoryUsageMonitor::new(),
504            optimization_enabled: true,
505            start_time: Instant::now(),
506        }
507    }
508
509    /// Get optimal batch size for current conditions
510    pub fn get_optimal_batch_size(&self) -> usize {
511        if !self.optimization_enabled {
512            return 1000; // Default fallback
513        }
514
515        let base_size = self.batch_controller.get_optimal_batch_size();
516
517        // Adjust based on memory pressure
518        match self.memory_monitor.get_memory_pressure() {
519            MemoryPressureLevel::Critical => base_size / 4,
520            MemoryPressureLevel::High => base_size / 2,
521            MemoryPressureLevel::Medium => base_size * 3 / 4,
522            MemoryPressureLevel::Low => base_size,
523        }
524    }
525
526    /// Record processing performance and adapt
527    pub fn record_batch_performance(
528        &mut self,
529        batch_size: usize,
530        processing_time: Duration,
531        memory_usage_mb: u64,
532        allocations_processed: usize,
533    ) {
534        if !self.optimization_enabled {
535            return;
536        }
537
538        let allocations_per_second = if processing_time.as_secs_f64() > 0.0 {
539            allocations_processed as f64 / processing_time.as_secs_f64()
540        } else {
541            allocations_processed as f64 / 0.001
542        };
543
544        let (_cache_hits, _cache_misses, cache_hit_ratio) = self.type_cache.get_stats();
545
546        let batch_efficiency = allocations_processed as f64 / batch_size as f64;
547
548        let metrics = PerformanceMetrics {
549            processing_time_ms: processing_time.as_millis() as u64,
550            memory_usage_mb,
551            allocations_per_second,
552            cache_hit_ratio,
553            batch_efficiency,
554            timestamp: Instant::now(),
555        };
556
557        self.batch_controller.record_performance(metrics);
558        self.memory_monitor.update_usage(memory_usage_mb);
559    }
560
561    /// Get or compute cached type information
562    pub fn get_cached_type_info(&self, type_name: &str) -> Option<serde_json::Value> {
563        if !self.optimization_enabled {
564            return None;
565        }
566
567        self.type_cache.get(type_name)
568    }
569
570    /// Store computed type information in cache
571    pub fn cache_type_info(&self, type_name: String, info: serde_json::Value) {
572        if self.optimization_enabled {
573            self.type_cache.store(type_name, info);
574        }
575    }
576
577    /// Get comprehensive performance report
578    pub fn get_performance_report(&self) -> serde_json::Value {
579        let (cache_hits, cache_misses, cache_hit_ratio) = self.type_cache.get_stats();
580        let memory_pressure = self.memory_monitor.get_memory_pressure();
581        let memory_trend = self.memory_monitor.get_usage_trend();
582        let performance_trend = self.batch_controller.get_performance_trend();
583
584        serde_json::json!({
585            "adaptive_optimization": {
586                "enabled": self.optimization_enabled,
587                "uptime_seconds": self.start_time.elapsed().as_secs(),
588                "current_batch_size": self.get_optimal_batch_size(),
589                "cache_statistics": {
590                    "hits": cache_hits,
591                    "misses": cache_misses,
592                    "hit_ratio": cache_hit_ratio,
593                    "total_requests": cache_hits + cache_misses
594                },
595                "memory_monitoring": {
596                    "current_usage_mb": self.memory_monitor.current_usage_mb,
597                    "peak_usage_mb": self.memory_monitor.peak_usage_mb,
598                    "pressure_level": format!("{:?}", memory_pressure),
599                    "trend": memory_trend.map(|t| format!("{t:?}")).unwrap_or_else(|| "Unknown".to_string())
600                },
601                "performance_trend": performance_trend.unwrap_or_else(|| "Insufficient data".to_string()),
602                "optimization_recommendations": self.get_optimization_recommendations()
603            }
604        })
605    }
606
607    /// Get optimization recommendations based on current metrics
608    fn get_optimization_recommendations(&self) -> Vec<String> {
609        let mut recommendations = Vec::new();
610
611        let (_, _, cache_hit_ratio) = self.type_cache.get_stats();
612        if cache_hit_ratio < 0.7 {
613            recommendations.push("Consider increasing cache size for better hit ratio".to_string());
614        }
615
616        match self.memory_monitor.get_memory_pressure() {
617            MemoryPressureLevel::Critical => {
618                recommendations.push(
619                    "URGENT: Reduce batch sizes and enable streaming to reduce memory pressure"
620                        .to_string(),
621                );
622            }
623            MemoryPressureLevel::High => {
624                recommendations
625                    .push("Consider reducing batch sizes or enabling compression".to_string());
626            }
627            _ => {}
628        }
629
630        if let Some(MemoryTrend::Increasing) = self.memory_monitor.get_usage_trend() {
631            recommendations
632                .push("Memory usage is trending upward - monitor for potential leaks".to_string());
633        }
634
635        if recommendations.is_empty() {
636            recommendations.push("Performance is optimal - no recommendations".to_string());
637        }
638
639        recommendations
640    }
641
642    /// Enable or disable adaptive optimization
643    pub fn set_optimization_enabled(&mut self, enabled: bool) {
644        self.optimization_enabled = enabled;
645        tracing::info!(
646            "๐Ÿ”ง Adaptive optimization {}",
647            if enabled { "enabled" } else { "disabled" }
648        );
649    }
650
651    /// Clear all caches and reset metrics
652    pub fn reset(&mut self) {
653        self.type_cache.clear();
654        self.memory_monitor = MemoryUsageMonitor::new();
655        self.start_time = Instant::now();
656        tracing::info!("๐Ÿ”„ Adaptive performance optimizer reset");
657    }
658}
659
660impl Default for AdaptivePerformanceOptimizer {
661    fn default() -> Self {
662        Self::new(1000, 500) // Default: 1000 batch size, 500 cache entries
663    }
664}
665
666#[cfg(test)]
667mod tests {
668    use super::*;
669    use std::thread;
670    use std::time::Duration;
671
672    #[test]
673    fn test_performance_metrics_default() {
674        let metrics = PerformanceMetrics::default();
675        assert_eq!(metrics.processing_time_ms, 0);
676        assert_eq!(metrics.memory_usage_mb, 0);
677        assert_eq!(metrics.allocations_per_second, 0.0);
678        assert_eq!(metrics.cache_hit_ratio, 0.0);
679        assert_eq!(metrics.batch_efficiency, 0.0);
680        // timestamp should be recent
681        assert!(metrics.timestamp.elapsed().as_secs() < 1);
682    }
683
684    #[test]
685    fn test_adaptive_batch_controller_creation() {
686        let controller = AdaptiveBatchController::new(500);
687        assert_eq!(controller.get_optimal_batch_size(), 500);
688    }
689
690    #[test]
691    fn test_batch_controller_performance_recording() {
692        let mut controller = AdaptiveBatchController::new(1000);
693
694        let metrics = PerformanceMetrics {
695            processing_time_ms: 5, // Fast processing
696            memory_usage_mb: 100,
697            allocations_per_second: 1000.0,
698            cache_hit_ratio: 0.8,
699            batch_efficiency: 0.9,
700            timestamp: Instant::now(),
701        };
702
703        let initial_size = controller.get_optimal_batch_size();
704        controller.record_performance(metrics);
705
706        // Fast processing should potentially increase batch size
707        let new_size = controller.get_optimal_batch_size();
708        assert!(new_size >= initial_size);
709    }
710
711    #[test]
712    fn test_batch_controller_slow_processing_adjustment() {
713        let mut controller = AdaptiveBatchController::new(1000);
714
715        let slow_metrics = PerformanceMetrics {
716            processing_time_ms: 50, // Slow processing (target is 10ms)
717            memory_usage_mb: 100,
718            allocations_per_second: 100.0,
719            cache_hit_ratio: 0.5,
720            batch_efficiency: 0.7,
721            timestamp: Instant::now(),
722        };
723
724        let initial_size = controller.get_optimal_batch_size();
725        controller.record_performance(slow_metrics);
726
727        // Slow processing should reduce batch size
728        let new_size = controller.get_optimal_batch_size();
729        assert!(new_size < initial_size);
730    }
731
732    #[test]
733    fn test_batch_controller_memory_pressure_adjustment() {
734        let mut controller = AdaptiveBatchController::new(1000);
735
736        let high_memory_metrics = PerformanceMetrics {
737            processing_time_ms: 5, // Fast processing
738            memory_usage_mb: 600,  // High memory usage (>500MB threshold)
739            allocations_per_second: 1000.0,
740            cache_hit_ratio: 0.8,
741            batch_efficiency: 0.9,
742            timestamp: Instant::now(),
743        };
744
745        let initial_size = controller.get_optimal_batch_size();
746        controller.record_performance(high_memory_metrics);
747
748        // High memory usage should reduce batch size despite fast processing
749        let new_size = controller.get_optimal_batch_size();
750        assert!(new_size < initial_size);
751    }
752
753    #[test]
754    fn test_batch_controller_performance_trend() {
755        let mut controller = AdaptiveBatchController::new(1000);
756
757        // Add insufficient data first
758        assert!(controller.get_performance_trend().is_none());
759
760        // Add enough metrics for trend analysis
761        for i in 0..10 {
762            let metrics = PerformanceMetrics {
763                processing_time_ms: 10 + i, // Gradually increasing processing time
764                memory_usage_mb: 100,
765                allocations_per_second: 1000.0,
766                cache_hit_ratio: 0.8,
767                batch_efficiency: 0.9,
768                timestamp: Instant::now(),
769            };
770            controller.record_performance(metrics);
771            thread::sleep(Duration::from_millis(1)); // Small delay for timestamp differences
772        }
773
774        let trend = controller.get_performance_trend();
775        assert!(trend.is_some());
776        let trend_str = trend.unwrap();
777        assert!(trend_str == "Performance degrading" || trend_str == "Performance stable");
778    }
779
780    #[test]
781    fn test_type_info_cache_creation() {
782        let cache = TypeInfoCache::new(100);
783        let (hits, misses, hit_ratio) = cache.get_stats();
784        assert_eq!(hits, 0);
785        assert_eq!(misses, 0);
786        assert_eq!(hit_ratio, 0.0);
787    }
788
789    #[test]
790    fn test_type_info_cache_store_and_get() {
791        let cache = TypeInfoCache::new(100);
792        let type_name = "TestType".to_string();
793        let test_info = serde_json::json!({"name": "TestType", "size": 64});
794
795        // Initially should be a cache miss
796        assert!(cache.get(&type_name).is_none());
797        let (_, misses, _) = cache.get_stats();
798        assert_eq!(misses, 1);
799
800        // Store the information
801        cache.store(type_name.clone(), test_info.clone());
802
803        // Now should be a cache hit
804        let retrieved = cache.get(&type_name);
805        assert!(retrieved.is_some());
806        assert_eq!(retrieved.unwrap(), test_info);
807
808        let (hits, _, hit_ratio) = cache.get_stats();
809        assert_eq!(hits, 1);
810        assert!(hit_ratio > 0.0);
811    }
812
813    #[test]
814    fn test_type_info_cache_clear() {
815        let cache = TypeInfoCache::new(100);
816        let test_info = serde_json::json!({"name": "TestType"});
817
818        cache.store("TestType".to_string(), test_info);
819        assert!(cache.get("TestType").is_some());
820
821        cache.clear();
822        assert!(cache.get("TestType").is_none());
823
824        let (hits, misses, hit_ratio) = cache.get_stats();
825        assert_eq!(hits, 0);
826        assert_eq!(misses, 1); // The get after clear is a miss
827        assert_eq!(hit_ratio, 0.0);
828    }
829
830    #[test]
831    fn test_memory_usage_monitor_creation() {
832        let monitor = MemoryUsageMonitor::new();
833        assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Low);
834        assert!(monitor.get_usage_trend().is_none()); // Insufficient data
835    }
836
837    #[test]
838    fn test_memory_usage_monitor_pressure_levels() {
839        let mut monitor = MemoryUsageMonitor::new();
840
841        // Test low pressure
842        monitor.update_usage(100);
843        assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Low);
844
845        // Test medium pressure
846        monitor.update_usage(600); // Between 512 (warning/2) and 1024 (warning)
847        assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Medium);
848
849        // Test high pressure
850        monitor.update_usage(1500); // Between 1024 (warning) and 2048 (critical)
851        assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::High);
852
853        // Test critical pressure
854        monitor.update_usage(3000); // Above 2048 (critical)
855        assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Critical);
856    }
857
858    #[test]
859    fn test_memory_usage_monitor_trend() {
860        let mut monitor = MemoryUsageMonitor::new();
861
862        // Add insufficient data first
863        for i in 0..5 {
864            monitor.update_usage(100 + i * 10);
865        }
866        assert!(monitor.get_usage_trend().is_none());
867
868        // Add enough data for trend analysis - increasing trend
869        for i in 5..15 {
870            monitor.update_usage(100 + i * 20); // Increasing usage
871            thread::sleep(Duration::from_millis(1));
872        }
873
874        let trend = monitor.get_usage_trend();
875        assert!(trend.is_some());
876        assert_eq!(trend.unwrap(), MemoryTrend::Increasing);
877    }
878
879    #[test]
880    fn test_memory_pressure_level_equality() {
881        assert_eq!(MemoryPressureLevel::Low, MemoryPressureLevel::Low);
882        assert_ne!(MemoryPressureLevel::Low, MemoryPressureLevel::High);
883    }
884
885    #[test]
886    fn test_memory_trend_equality() {
887        assert_eq!(MemoryTrend::Stable, MemoryTrend::Stable);
888        assert_ne!(MemoryTrend::Increasing, MemoryTrend::Decreasing);
889    }
890
891    #[test]
892    fn test_adaptive_performance_optimizer_creation() {
893        let optimizer = AdaptivePerformanceOptimizer::new(500, 100);
894        assert_eq!(optimizer.get_optimal_batch_size(), 500);
895        assert!(optimizer.optimization_enabled);
896    }
897
898    #[test]
899    fn test_adaptive_performance_optimizer_default() {
900        let optimizer = AdaptivePerformanceOptimizer::default();
901        assert_eq!(optimizer.get_optimal_batch_size(), 1000);
902        assert!(optimizer.optimization_enabled);
903    }
904
905    #[test]
906    fn test_optimizer_batch_performance_recording() {
907        let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
908
909        optimizer.record_batch_performance(500, Duration::from_millis(5), 100, 450);
910
911        // Should not panic and should update internal state
912        assert!(optimizer.get_optimal_batch_size() > 0);
913    }
914
915    #[test]
916    fn test_optimizer_memory_pressure_adjustment() {
917        let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
918
919        // Simulate high memory pressure
920        optimizer.record_batch_performance(
921            1000,
922            Duration::from_millis(5),
923            1500, // High memory usage
924            900,
925        );
926
927        // Should reduce batch size due to memory pressure
928        let batch_size = optimizer.get_optimal_batch_size();
929        assert!(batch_size < 1000);
930    }
931
932    #[test]
933    fn test_optimizer_type_caching() {
934        let optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
935        let type_name = "TestType";
936        let test_info = serde_json::json!({"name": "TestType", "size": 64});
937
938        // Initially should be cache miss
939        assert!(optimizer.get_cached_type_info(type_name).is_none());
940
941        // Cache the information
942        optimizer.cache_type_info(type_name.to_string(), test_info.clone());
943
944        // Should now be cache hit
945        let cached = optimizer.get_cached_type_info(type_name);
946        assert!(cached.is_some());
947        assert_eq!(cached.unwrap(), test_info);
948    }
949
950    #[test]
951    fn test_optimizer_performance_report() {
952        let optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
953        let report = optimizer.get_performance_report();
954
955        assert!(report.is_object());
956        let adaptive_opt = &report["adaptive_optimization"];
957        assert!(adaptive_opt["enabled"].as_bool().unwrap());
958        assert!(adaptive_opt["current_batch_size"].as_u64().unwrap() > 0);
959        assert!(adaptive_opt["cache_statistics"].is_object());
960        assert!(adaptive_opt["memory_monitoring"].is_object());
961    }
962
963    #[test]
964    fn test_optimizer_enable_disable() {
965        let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
966
967        assert!(optimizer.optimization_enabled);
968
969        optimizer.set_optimization_enabled(false);
970        assert!(!optimizer.optimization_enabled);
971
972        // When disabled, should return default batch size
973        assert_eq!(optimizer.get_optimal_batch_size(), 1000);
974
975        // Caching should not work when disabled
976        assert!(optimizer.get_cached_type_info("TestType").is_none());
977
978        optimizer.set_optimization_enabled(true);
979        assert!(optimizer.optimization_enabled);
980    }
981
982    #[test]
983    fn test_optimizer_reset() {
984        let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
985
986        // Add some data
987        optimizer.cache_type_info("TestType".to_string(), serde_json::json!({"test": true}));
988        optimizer.record_batch_performance(500, Duration::from_millis(10), 200, 450);
989
990        // Reset should clear everything
991        optimizer.reset();
992
993        // Cache should be empty
994        assert!(optimizer.get_cached_type_info("TestType").is_none());
995
996        // Should still be functional
997        assert!(optimizer.get_optimal_batch_size() > 0);
998    }
999
1000    #[test]
1001    fn test_cache_eviction() {
1002        let cache = TypeInfoCache::new(2); // Very small cache for testing eviction
1003
1004        // Fill cache to capacity
1005        cache.store("Type1".to_string(), serde_json::json!({"id": 1}));
1006        cache.store("Type2".to_string(), serde_json::json!({"id": 2}));
1007
1008        // Both should be retrievable
1009        assert!(cache.get("Type1").is_some());
1010        assert!(cache.get("Type2").is_some());
1011
1012        // Add a third item, should evict the least recently used
1013        cache.store("Type3".to_string(), serde_json::json!({"id": 3}));
1014
1015        // Type3 should be available
1016        assert!(cache.get("Type3").is_some());
1017
1018        // At least one of the original types should still be available
1019        let type1_available = cache.get("Type1").is_some();
1020        let type2_available = cache.get("Type2").is_some();
1021        assert!(type1_available || type2_available);
1022    }
1023
1024    #[test]
1025    fn test_memory_monitor_comprehensive() {
1026        // Test memory monitoring logic without using the actual MemoryMonitor struct
1027        let mut current_usage = 0usize;
1028        let mut peak_usage = 0usize;
1029
1030        // Test initial state
1031        assert_eq!(current_usage, 0);
1032        assert_eq!(peak_usage, 0);
1033
1034        // Test memory allocation tracking
1035        current_usage += 1024;
1036        peak_usage = peak_usage.max(current_usage);
1037        assert_eq!(current_usage, 1024);
1038        assert_eq!(peak_usage, 1024);
1039
1040        current_usage += 2048;
1041        peak_usage = peak_usage.max(current_usage);
1042        assert_eq!(current_usage, 3072);
1043        assert_eq!(peak_usage, 3072);
1044
1045        // Test memory deallocation
1046        current_usage = current_usage.saturating_sub(1024);
1047        assert_eq!(current_usage, 2048);
1048        assert_eq!(peak_usage, 3072); // Peak should remain
1049
1050        // Test large allocation
1051        current_usage += 10240;
1052        peak_usage = peak_usage.max(current_usage);
1053        assert_eq!(current_usage, 12288);
1054        assert_eq!(peak_usage, 12288);
1055
1056        // Test reset
1057        current_usage = 0;
1058        peak_usage = 0;
1059        assert_eq!(current_usage, 0);
1060        assert_eq!(peak_usage, 0);
1061    }
1062
1063    #[test]
1064    fn test_memory_monitor_edge_cases() {
1065        let mut current_usage = 0usize;
1066
1067        // Test zero allocations
1068        current_usage += 0;
1069        assert_eq!(current_usage, 0);
1070
1071        // Test very large allocation
1072        current_usage += usize::MAX / 2;
1073        assert_eq!(current_usage, usize::MAX / 2);
1074
1075        // Test deallocation larger than current usage
1076        current_usage = current_usage.saturating_sub(usize::MAX);
1077        // Should handle gracefully (saturating_sub clamps to 0)
1078        assert_eq!(current_usage, 0);
1079    }
1080
1081    #[test]
1082    fn test_batch_size_calculator_comprehensive() {
1083        // Test batch size calculation logic without using the actual BatchSizeCalculator
1084        let mut current_batch_size = 1000usize;
1085        let min_batch_size = 100usize;
1086
1087        // Test initial state
1088        assert_eq!(current_batch_size, 1000);
1089
1090        // Test performance recording with good performance (fast execution)
1091        let efficiency = 450.0 / 500.0; // 90% efficiency
1092        if efficiency > 0.8 {
1093            current_batch_size = (current_batch_size as f64 * 1.1) as usize; // Increase by 10%
1094        }
1095        assert!(current_batch_size >= 1000); // Should maintain or increase
1096
1097        // Test performance recording with poor performance (slow execution)
1098        let poor_efficiency = 200.0 / 1000.0; // 20% efficiency
1099        if poor_efficiency < 0.5 {
1100            current_batch_size = (current_batch_size as f64 * 0.8) as usize; // Decrease by 20%
1101        }
1102        let poor_perf_size = current_batch_size;
1103        assert!(poor_perf_size <= 1100); // Should decrease from previous
1104
1105        // Test minimum batch size enforcement
1106        for _ in 0..20 {
1107            let very_poor_efficiency = 10.0 / 50.0; // 20% efficiency
1108            if very_poor_efficiency < 0.5 {
1109                current_batch_size = (current_batch_size as f64 * 0.9) as usize;
1110                current_batch_size = current_batch_size.max(min_batch_size);
1111            }
1112        }
1113        assert!(current_batch_size >= min_batch_size); // Should not go below minimum
1114
1115        // Test maximum batch size enforcement
1116        current_batch_size = 1000; // Reset
1117        for _ in 0..20 {
1118            let excellent_efficiency = 1950.0 / 2000.0; // 97.5% efficiency
1119            if excellent_efficiency > 0.9 {
1120                current_batch_size = (current_batch_size as f64 * 1.1) as usize;
1121                current_batch_size = current_batch_size.min(10000); // Cap at reasonable maximum
1122            }
1123        }
1124        // Should not exceed reasonable maximum
1125        assert!(current_batch_size <= 10000);
1126    }
1127
1128    #[test]
1129    fn test_batch_size_calculator_edge_cases() {
1130        let mut current_batch_size = 1000usize;
1131
1132        // Test with zero duration (should handle gracefully)
1133        let _zero_duration_efficiency = if Duration::from_millis(0).as_millis() == 0 {
1134            1.0 // Assume perfect efficiency for zero duration
1135        } else {
1136            450.0 / 500.0
1137        };
1138        assert!(current_batch_size > 0);
1139
1140        // Test with very high memory usage (should decrease batch size)
1141        let high_memory_usage = usize::MAX / 2;
1142        if high_memory_usage > 1024 * 1024 * 100 {
1143            // If > 100MB
1144            current_batch_size = (current_batch_size as f64 * 0.5) as usize; // Halve batch size
1145        }
1146        assert!(current_batch_size <= 1000); // Should decrease due to high memory
1147
1148        // Test with zero processed items (should handle gracefully)
1149        let zero_processed_efficiency = 0.0 / 500.0; // 0% efficiency
1150        if zero_processed_efficiency == 0.0 {
1151            current_batch_size = current_batch_size.max(100); // Maintain minimum
1152        }
1153        assert!(current_batch_size > 0); // Should handle gracefully
1154    }
1155
1156    #[test]
1157    fn test_type_info_cache_comprehensive() {
1158        let cache = TypeInfoCache::new(10);
1159
1160        // Test storing and retrieving various JSON types
1161        cache.store("String".to_string(), serde_json::json!("test"));
1162        cache.store("Number".to_string(), serde_json::json!(42));
1163        cache.store("Boolean".to_string(), serde_json::json!(true));
1164        cache.store("Array".to_string(), serde_json::json!([1, 2, 3]));
1165        cache.store("Object".to_string(), serde_json::json!({"key": "value"}));
1166        cache.store("Null".to_string(), serde_json::json!(null));
1167
1168        // Verify all types are retrievable
1169        assert_eq!(cache.get("String").unwrap().as_str().unwrap(), "test");
1170        assert_eq!(cache.get("Number").unwrap().as_i64().unwrap(), 42);
1171        assert!(cache.get("Boolean").unwrap().as_bool().unwrap());
1172        assert_eq!(cache.get("Array").unwrap().as_array().unwrap().len(), 3);
1173        assert!(cache
1174            .get("Object")
1175            .unwrap()
1176            .as_object()
1177            .unwrap()
1178            .contains_key("key"));
1179        assert!(cache.get("Null").unwrap().is_null());
1180
1181        // Test cache operations (size method doesn't exist, so test differently)
1182        // Verify we can retrieve all stored items
1183        assert!(cache.get("String").is_some());
1184        assert!(cache.get("Number").is_some());
1185        assert!(cache.get("Boolean").is_some());
1186        assert!(cache.get("Array").is_some());
1187        assert!(cache.get("Object").is_some());
1188        assert!(cache.get("Null").is_some());
1189
1190        // Test cache clearing
1191        cache.clear();
1192        assert!(cache.get("String").is_none());
1193        assert!(cache.get("String").is_none());
1194    }
1195
1196    #[test]
1197    fn test_adaptive_performance_optimizer_stress() {
1198        let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
1199
1200        // Stress test with many performance recordings
1201        for i in 0..1000 {
1202            let batch_size = 500 + (i % 500);
1203            let duration = Duration::from_millis(1 + (i % 100));
1204            let memory_before = i * 1024;
1205            let processed = batch_size - (i % 50);
1206
1207            optimizer.record_batch_performance(
1208                batch_size as usize,
1209                duration,
1210                memory_before,
1211                processed as usize,
1212            );
1213
1214            // Cache some type info
1215            if i % 10 == 0 {
1216                let type_name = format!("Type_{}", i);
1217                let type_info = serde_json::json!({"size": i, "complexity": i % 5});
1218                optimizer.cache_type_info(type_name, type_info);
1219            }
1220        }
1221
1222        // Verify optimizer is still functional (very flexible due to stress testing)
1223        let optimal_size = optimizer.get_optimal_batch_size();
1224        assert!(
1225            optimal_size > 0,
1226            "Optimal size should be positive, got {}",
1227            optimal_size
1228        );
1229        assert!(
1230            optimal_size <= 20000,
1231            "Optimal size should be reasonable, got {}",
1232            optimal_size
1233        );
1234
1235        // Verify cache is working
1236        assert!(optimizer.get_cached_type_info("Type_0").is_some());
1237        assert!(optimizer.get_cached_type_info("Type_990").is_some());
1238
1239        // Test performance report generation
1240        let report = optimizer.get_performance_report();
1241        assert!(report.is_object());
1242    }
1243
1244    #[test]
1245    fn test_adaptive_performance_optimizer_memory_pressure() {
1246        let mut optimizer = AdaptivePerformanceOptimizer::new(2000, 200);
1247
1248        // Simulate memory pressure scenarios
1249        let scenarios = vec![
1250            (1000, Duration::from_millis(50), 1024 * 1024, 950), // Normal
1251            (1000, Duration::from_millis(100), 10 * 1024 * 1024, 900), // High memory
1252            (1000, Duration::from_millis(200), 100 * 1024 * 1024, 800), // Very high memory
1253            (1000, Duration::from_millis(500), 1024 * 1024 * 1024, 700), // Extreme memory
1254        ];
1255
1256        let mut previous_size = optimizer.get_optimal_batch_size();
1257
1258        for (batch_size, duration, memory, processed) in scenarios {
1259            optimizer.record_batch_performance(batch_size, duration, memory, processed);
1260            let current_size = optimizer.get_optimal_batch_size();
1261
1262            // Under memory pressure, batch size should generally decrease or stay stable
1263            if memory > 50 * 1024 * 1024 {
1264                // If memory usage is very high
1265                assert!(current_size <= previous_size * 2); // Allow some flexibility
1266            }
1267
1268            previous_size = current_size;
1269        }
1270
1271        // Final batch size should be reasonable (allow flexibility due to memory pressure)
1272        let final_size = optimizer.get_optimal_batch_size();
1273        assert!(
1274            final_size > 0,
1275            "Batch size should be positive, got {}",
1276            final_size
1277        );
1278    }
1279
1280    #[test]
1281    fn test_performance_metrics_calculation() {
1282        let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
1283
1284        // Record various performance scenarios
1285        let test_cases = vec![
1286            (500, 10, 1024, 500),  // Perfect efficiency
1287            (500, 20, 2048, 450),  // Good efficiency
1288            (500, 50, 4096, 400),  // Moderate efficiency
1289            (500, 100, 8192, 300), // Poor efficiency
1290        ];
1291
1292        for (batch_size, duration_ms, memory, processed) in test_cases {
1293            optimizer.record_batch_performance(
1294                batch_size,
1295                Duration::from_millis(duration_ms),
1296                memory,
1297                processed,
1298            );
1299        }
1300
1301        let report = optimizer.get_performance_report();
1302        let adaptive_opt = &report["adaptive_optimization"];
1303
1304        // Verify report structure
1305        assert!(adaptive_opt["enabled"].as_bool().unwrap());
1306        assert!(adaptive_opt["current_batch_size"].as_u64().unwrap() > 0);
1307        assert!(adaptive_opt["cache_statistics"].is_object());
1308        assert!(adaptive_opt["memory_monitoring"].is_object());
1309
1310        // Check if cache_statistics exists and has expected structure
1311        if let Some(cache_stats) = adaptive_opt.get("cache_statistics") {
1312            if cache_stats.is_object() {
1313                // Only check if the fields exist and are valid
1314                if let Some(_size) = cache_stats.get("size") {
1315                    // Size is always non-negative for u64 type
1316                }
1317                if let Some(capacity) = cache_stats.get("capacity") {
1318                    assert!(capacity.as_u64().unwrap_or(1) > 0);
1319                }
1320            }
1321        }
1322
1323        // Check if memory_monitoring exists and has expected structure
1324        if let Some(memory_stats) = adaptive_opt.get("memory_monitoring") {
1325            if memory_stats.is_object() {
1326                // Only check if the fields exist and are valid
1327                if let Some(_current) = memory_stats.get("current_usage") {
1328                    // Current usage is always non-negative for u64 type
1329                }
1330                if let Some(_peak) = memory_stats.get("peak_usage") {
1331                    // Peak usage is always non-negative for u64 type
1332                }
1333            }
1334        }
1335    }
1336
1337    #[test]
1338    fn test_optimizer_configuration_changes() {
1339        let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
1340
1341        // Test changing optimization settings
1342        optimizer.set_optimization_enabled(false);
1343        let disabled_size = optimizer.get_optimal_batch_size();
1344        assert_eq!(disabled_size, 1000); // Should return default
1345
1346        optimizer.set_optimization_enabled(true);
1347
1348        // Record some performance to change optimal size
1349        optimizer.record_batch_performance(750, Duration::from_millis(5), 1024, 750);
1350        let enabled_size = optimizer.get_optimal_batch_size();
1351
1352        // Should be able to adapt when enabled
1353        assert!(enabled_size > 0);
1354
1355        // Test reset functionality
1356        optimizer.reset();
1357        let reset_size = optimizer.get_optimal_batch_size();
1358        assert!(reset_size > 0);
1359    }
1360}