1use std::collections::HashMap;
9use std::sync::{Arc, Mutex, RwLock};
10use std::time::{Duration, Instant};
11
12#[derive(Debug, Clone)]
14pub struct PerformanceMetrics {
15 pub processing_time_ms: u64,
17 pub memory_usage_mb: u64,
19 pub allocations_per_second: f64,
21 pub cache_hit_ratio: f64,
23 pub batch_efficiency: f64,
25 pub timestamp: Instant,
27}
28
29impl Default for PerformanceMetrics {
30 fn default() -> Self {
31 Self {
32 processing_time_ms: 0,
33 memory_usage_mb: 0,
34 allocations_per_second: 0.0,
35 cache_hit_ratio: 0.0,
36 batch_efficiency: 0.0,
37 timestamp: Instant::now(),
38 }
39 }
40}
41
42#[derive(Debug)]
50pub struct AdaptiveBatchController {
51 current_batch_size: usize,
53 min_batch_size: usize,
55 max_batch_size: usize,
57 target_processing_time_ms: u64,
59 performance_history: Vec<PerformanceMetrics>,
61 adjustment_factor: f64,
63}
64
65impl AdaptiveBatchController {
66 pub fn new(initial_batch_size: usize) -> Self {
68 Self {
69 current_batch_size: initial_batch_size,
70 min_batch_size: 100,
71 max_batch_size: 10000,
72 target_processing_time_ms: 10, performance_history: Vec::with_capacity(100),
74 adjustment_factor: 1.2,
75 }
76 }
77
78 pub fn get_optimal_batch_size(&self) -> usize {
80 self.current_batch_size
81 }
82
83 pub fn record_performance(&mut self, metrics: PerformanceMetrics) {
85 tracing::info!(
86 "๐ Recording performance: {}ms, {} allocs/sec, batch_size: {}",
87 metrics.processing_time_ms,
88 metrics.allocations_per_second as u64,
89 self.current_batch_size
90 );
91
92 self.performance_history.push(metrics.clone());
93
94 if self.performance_history.len() > 50 {
96 self.performance_history.remove(0);
97 }
98
99 self.adjust_batch_size(&metrics);
101 }
102
103 fn adjust_batch_size(&mut self, current_metrics: &PerformanceMetrics) {
105 let old_batch_size = self.current_batch_size;
106
107 if current_metrics.processing_time_ms > self.target_processing_time_ms {
108 let reduction_factor = (current_metrics.processing_time_ms as f64
110 / self.target_processing_time_ms as f64)
111 .min(2.0);
112 self.current_batch_size = ((self.current_batch_size as f64 / reduction_factor)
113 as usize)
114 .max(self.min_batch_size);
115
116 tracing::info!(
117 "๐ฝ Reducing batch size: {} -> {} (processing too slow: {}ms)",
118 old_batch_size,
119 self.current_batch_size,
120 current_metrics.processing_time_ms
121 );
122 } else if current_metrics.processing_time_ms < self.target_processing_time_ms / 2 {
123 self.current_batch_size = ((self.current_batch_size as f64 * self.adjustment_factor)
125 as usize)
126 .min(self.max_batch_size);
127
128 tracing::info!(
129 "๐ผ Increasing batch size: {} -> {} (processing fast: {}ms)",
130 old_batch_size,
131 self.current_batch_size,
132 current_metrics.processing_time_ms
133 );
134 }
135
136 if current_metrics.memory_usage_mb > 500 {
138 self.current_batch_size = (self.current_batch_size * 3 / 4).max(self.min_batch_size);
140 tracing::info!(
141 "๐พ Reducing batch size due to memory pressure: {} -> {} ({}MB)",
142 old_batch_size,
143 self.current_batch_size,
144 current_metrics.memory_usage_mb
145 );
146 }
147 }
148
149 pub fn get_performance_trend(&self) -> Option<String> {
151 if self.performance_history.len() < 5 {
152 return None;
153 }
154
155 let recent_avg = self
156 .performance_history
157 .iter()
158 .rev()
159 .take(5)
160 .map(|m| m.processing_time_ms)
161 .sum::<u64>() as f64
162 / 5.0;
163
164 let older_avg = self
165 .performance_history
166 .iter()
167 .rev()
168 .skip(5)
169 .take(5)
170 .map(|m| m.processing_time_ms)
171 .sum::<u64>() as f64
172 / 5.0;
173
174 let trend_ratio = recent_avg / older_avg;
175
176 if trend_ratio > 1.2 {
177 Some("Performance degrading".to_string())
178 } else if trend_ratio < 0.8 {
179 Some("Performance improving".to_string())
180 } else {
181 Some("Performance stable".to_string())
182 }
183 }
184}
185
186#[derive(Debug)]
190pub struct TypeInfoCache {
191 cache: Arc<RwLock<HashMap<String, CachedTypeInfo>>>,
193 cache_stats: Arc<Mutex<CacheStats>>,
195 max_cache_size: usize,
197}
198
199#[derive(Debug, Clone)]
200#[allow(dead_code)]
201struct CachedTypeInfo {
202 type_name: String,
204 size_hint: Option<usize>,
206 complexity_score: u32,
208 access_count: u64,
210 last_accessed: Instant,
212 computed_info: serde_json::Value,
214}
215
216#[derive(Debug, Default)]
217struct CacheStats {
218 hits: u64,
220 misses: u64,
222 evictions: u64,
224}
225
226impl TypeInfoCache {
227 pub fn new(max_size: usize) -> Self {
229 Self {
230 cache: Arc::new(RwLock::new(HashMap::new())),
231 cache_stats: Arc::new(Mutex::new(CacheStats::default())),
232 max_cache_size: max_size,
233 }
234 }
235
236 pub fn get(&self, type_name: &str) -> Option<serde_json::Value> {
238 let cached_value = {
240 let cache = self.cache.read().ok()?;
241 cache.get(type_name).map(|info| info.computed_info.clone())
242 };
243
244 if cached_value.is_some() {
245 if let Ok(mut stats) = self.cache_stats.lock() {
247 stats.hits += 1;
248 }
249
250 if let Ok(mut cache) = self.cache.write() {
252 if let Some(info) = cache.get_mut(type_name) {
253 info.access_count += 1;
254 info.last_accessed = Instant::now();
255 }
256 }
257
258 cached_value
259 } else {
260 if let Ok(mut stats) = self.cache_stats.lock() {
262 stats.misses += 1;
263 }
264 None
265 }
266 }
267
268 pub fn store(&self, type_name: String, computed_info: serde_json::Value) {
270 if let Ok(mut cache) = self.cache.write() {
271 if cache.len() >= self.max_cache_size {
273 self.evict_lru(&mut cache);
274 }
275
276 let cached_info = CachedTypeInfo {
277 type_name: type_name.clone(),
278 size_hint: None,
279 complexity_score: self.compute_complexity_score(&computed_info),
280 access_count: 1,
281 last_accessed: Instant::now(),
282 computed_info,
283 };
284
285 cache.insert(type_name, cached_info);
286 }
287 }
288
289 fn evict_lru(&self, cache: &mut HashMap<String, CachedTypeInfo>) {
291 if cache.is_empty() {
292 return;
293 }
294
295 let lru_key = cache
297 .iter()
298 .min_by_key(|(_, info)| info.last_accessed)
299 .map(|(key, _)| key.clone());
300
301 if let Some(key) = lru_key {
302 cache.remove(&key);
303 if let Ok(mut stats) = self.cache_stats.lock() {
304 stats.evictions += 1;
305 }
306 tracing::info!("๐๏ธ Evicted LRU cache entry: {}", key);
307 }
308 }
309
310 fn compute_complexity_score(&self, info: &serde_json::Value) -> u32 {
312 match info {
313 serde_json::Value::Object(obj) => obj.len() as u32 * 2,
314 serde_json::Value::Array(arr) => arr.len() as u32,
315 serde_json::Value::String(s) => s.len() as u32 / 10,
316 _ => 1,
317 }
318 }
319
320 pub fn get_stats(&self) -> (u64, u64, f64) {
322 if let Ok(stats) = self.cache_stats.lock() {
323 let total_requests = stats.hits + stats.misses;
324 let hit_ratio = if total_requests > 0 {
325 stats.hits as f64 / total_requests as f64
326 } else {
327 0.0
328 };
329 (stats.hits, stats.misses, hit_ratio)
330 } else {
331 (0, 0, 0.0)
332 }
333 }
334
335 pub fn clear(&self) {
337 if let Ok(mut cache) = self.cache.write() {
338 cache.clear();
339 }
340 if let Ok(mut stats) = self.cache_stats.lock() {
341 *stats = CacheStats::default();
342 }
343 tracing::info!("๐งน Type info cache cleared");
344 }
345}
346
347#[derive(Debug)]
349pub struct MemoryUsageMonitor {
350 peak_usage_mb: u64,
351 current_usage_mb: u64,
352 usage_history: Vec<(Instant, u64)>,
353 warning_threshold_mb: u64,
354 critical_threshold_mb: u64,
355}
356
357impl Default for MemoryUsageMonitor {
358 fn default() -> Self {
359 Self::new()
360 }
361}
362
363impl MemoryUsageMonitor {
364 pub fn new() -> Self {
366 Self {
367 peak_usage_mb: 0,
368 current_usage_mb: 0,
369 usage_history: Vec::new(),
370 warning_threshold_mb: 1024, critical_threshold_mb: 2048, }
373 }
374
375 pub fn update_usage(&mut self, usage_mb: u64) {
377 self.current_usage_mb = usage_mb;
378 self.peak_usage_mb = self.peak_usage_mb.max(usage_mb);
379
380 self.usage_history.push((Instant::now(), usage_mb));
381
382 if self.usage_history.len() > 100 {
384 self.usage_history.remove(0);
385 }
386
387 if usage_mb > self.critical_threshold_mb {
389 tracing::info!(
390 "๐จ CRITICAL: Memory usage {}MB exceeds critical threshold {}MB",
391 usage_mb,
392 self.critical_threshold_mb
393 );
394 } else if usage_mb > self.warning_threshold_mb {
395 tracing::info!(
396 "โ ๏ธ WARNING: Memory usage {}MB exceeds warning threshold {}MB",
397 usage_mb,
398 self.warning_threshold_mb
399 );
400 }
401 }
402
403 pub fn get_memory_pressure(&self) -> MemoryPressureLevel {
405 if self.current_usage_mb > self.critical_threshold_mb {
406 MemoryPressureLevel::Critical
407 } else if self.current_usage_mb > self.warning_threshold_mb {
408 MemoryPressureLevel::High
409 } else if self.current_usage_mb > self.warning_threshold_mb / 2 {
410 MemoryPressureLevel::Medium
411 } else {
412 MemoryPressureLevel::Low
413 }
414 }
415
416 pub fn get_usage_trend(&self) -> Option<MemoryTrend> {
418 if self.usage_history.len() < 10 {
419 return None;
420 }
421
422 let recent_avg = self
423 .usage_history
424 .iter()
425 .rev()
426 .take(5)
427 .map(|(_, usage)| *usage)
428 .sum::<u64>() as f64
429 / 5.0;
430
431 let older_avg = self
432 .usage_history
433 .iter()
434 .rev()
435 .skip(5)
436 .take(5)
437 .map(|(_, usage)| *usage)
438 .sum::<u64>() as f64
439 / 5.0;
440
441 let trend_ratio = recent_avg / older_avg;
442
443 if trend_ratio > 1.1 {
444 Some(MemoryTrend::Increasing)
445 } else if trend_ratio < 0.9 {
446 Some(MemoryTrend::Decreasing)
447 } else {
448 Some(MemoryTrend::Stable)
449 }
450 }
451}
452
453#[derive(Debug, Clone, Copy, PartialEq)]
455pub enum MemoryPressureLevel {
456 Low,
458 Medium,
460 High,
462 Critical,
464}
465
466#[derive(Debug, Clone, Copy, PartialEq)]
468pub enum MemoryTrend {
469 Increasing,
471 Decreasing,
473 Stable,
475}
476
477#[derive(Debug)]
485pub struct AdaptivePerformanceOptimizer {
486 batch_controller: AdaptiveBatchController,
487 type_cache: TypeInfoCache,
488 memory_monitor: MemoryUsageMonitor,
489 optimization_enabled: bool,
490 start_time: Instant,
491}
492
493impl AdaptivePerformanceOptimizer {
494 pub fn new(initial_batch_size: usize, cache_size: usize) -> Self {
496 tracing::info!("๐ Initializing Adaptive Performance Optimizer");
497 tracing::info!(" โข Initial batch size: {}", initial_batch_size);
498 tracing::info!(" โข Cache size: {}", cache_size);
499
500 Self {
501 batch_controller: AdaptiveBatchController::new(initial_batch_size),
502 type_cache: TypeInfoCache::new(cache_size),
503 memory_monitor: MemoryUsageMonitor::new(),
504 optimization_enabled: true,
505 start_time: Instant::now(),
506 }
507 }
508
509 pub fn get_optimal_batch_size(&self) -> usize {
511 if !self.optimization_enabled {
512 return 1000; }
514
515 let base_size = self.batch_controller.get_optimal_batch_size();
516
517 match self.memory_monitor.get_memory_pressure() {
519 MemoryPressureLevel::Critical => base_size / 4,
520 MemoryPressureLevel::High => base_size / 2,
521 MemoryPressureLevel::Medium => base_size * 3 / 4,
522 MemoryPressureLevel::Low => base_size,
523 }
524 }
525
526 pub fn record_batch_performance(
528 &mut self,
529 batch_size: usize,
530 processing_time: Duration,
531 memory_usage_mb: u64,
532 allocations_processed: usize,
533 ) {
534 if !self.optimization_enabled {
535 return;
536 }
537
538 let allocations_per_second = if processing_time.as_secs_f64() > 0.0 {
539 allocations_processed as f64 / processing_time.as_secs_f64()
540 } else {
541 allocations_processed as f64 / 0.001
542 };
543
544 let (_cache_hits, _cache_misses, cache_hit_ratio) = self.type_cache.get_stats();
545
546 let batch_efficiency = allocations_processed as f64 / batch_size as f64;
547
548 let metrics = PerformanceMetrics {
549 processing_time_ms: processing_time.as_millis() as u64,
550 memory_usage_mb,
551 allocations_per_second,
552 cache_hit_ratio,
553 batch_efficiency,
554 timestamp: Instant::now(),
555 };
556
557 self.batch_controller.record_performance(metrics);
558 self.memory_monitor.update_usage(memory_usage_mb);
559 }
560
561 pub fn get_cached_type_info(&self, type_name: &str) -> Option<serde_json::Value> {
563 if !self.optimization_enabled {
564 return None;
565 }
566
567 self.type_cache.get(type_name)
568 }
569
570 pub fn cache_type_info(&self, type_name: String, info: serde_json::Value) {
572 if self.optimization_enabled {
573 self.type_cache.store(type_name, info);
574 }
575 }
576
577 pub fn get_performance_report(&self) -> serde_json::Value {
579 let (cache_hits, cache_misses, cache_hit_ratio) = self.type_cache.get_stats();
580 let memory_pressure = self.memory_monitor.get_memory_pressure();
581 let memory_trend = self.memory_monitor.get_usage_trend();
582 let performance_trend = self.batch_controller.get_performance_trend();
583
584 serde_json::json!({
585 "adaptive_optimization": {
586 "enabled": self.optimization_enabled,
587 "uptime_seconds": self.start_time.elapsed().as_secs(),
588 "current_batch_size": self.get_optimal_batch_size(),
589 "cache_statistics": {
590 "hits": cache_hits,
591 "misses": cache_misses,
592 "hit_ratio": cache_hit_ratio,
593 "total_requests": cache_hits + cache_misses
594 },
595 "memory_monitoring": {
596 "current_usage_mb": self.memory_monitor.current_usage_mb,
597 "peak_usage_mb": self.memory_monitor.peak_usage_mb,
598 "pressure_level": format!("{:?}", memory_pressure),
599 "trend": memory_trend.map(|t| format!("{t:?}")).unwrap_or_else(|| "Unknown".to_string())
600 },
601 "performance_trend": performance_trend.unwrap_or_else(|| "Insufficient data".to_string()),
602 "optimization_recommendations": self.get_optimization_recommendations()
603 }
604 })
605 }
606
607 fn get_optimization_recommendations(&self) -> Vec<String> {
609 let mut recommendations = Vec::new();
610
611 let (_, _, cache_hit_ratio) = self.type_cache.get_stats();
612 if cache_hit_ratio < 0.7 {
613 recommendations.push("Consider increasing cache size for better hit ratio".to_string());
614 }
615
616 match self.memory_monitor.get_memory_pressure() {
617 MemoryPressureLevel::Critical => {
618 recommendations.push(
619 "URGENT: Reduce batch sizes and enable streaming to reduce memory pressure"
620 .to_string(),
621 );
622 }
623 MemoryPressureLevel::High => {
624 recommendations
625 .push("Consider reducing batch sizes or enabling compression".to_string());
626 }
627 _ => {}
628 }
629
630 if let Some(MemoryTrend::Increasing) = self.memory_monitor.get_usage_trend() {
631 recommendations
632 .push("Memory usage is trending upward - monitor for potential leaks".to_string());
633 }
634
635 if recommendations.is_empty() {
636 recommendations.push("Performance is optimal - no recommendations".to_string());
637 }
638
639 recommendations
640 }
641
642 pub fn set_optimization_enabled(&mut self, enabled: bool) {
644 self.optimization_enabled = enabled;
645 tracing::info!(
646 "๐ง Adaptive optimization {}",
647 if enabled { "enabled" } else { "disabled" }
648 );
649 }
650
651 pub fn reset(&mut self) {
653 self.type_cache.clear();
654 self.memory_monitor = MemoryUsageMonitor::new();
655 self.start_time = Instant::now();
656 tracing::info!("๐ Adaptive performance optimizer reset");
657 }
658}
659
660impl Default for AdaptivePerformanceOptimizer {
661 fn default() -> Self {
662 Self::new(1000, 500) }
664}
665
666#[cfg(test)]
667mod tests {
668 use super::*;
669 use std::thread;
670 use std::time::Duration;
671
672 #[test]
673 fn test_performance_metrics_default() {
674 let metrics = PerformanceMetrics::default();
675 assert_eq!(metrics.processing_time_ms, 0);
676 assert_eq!(metrics.memory_usage_mb, 0);
677 assert_eq!(metrics.allocations_per_second, 0.0);
678 assert_eq!(metrics.cache_hit_ratio, 0.0);
679 assert_eq!(metrics.batch_efficiency, 0.0);
680 assert!(metrics.timestamp.elapsed().as_secs() < 1);
682 }
683
684 #[test]
685 fn test_adaptive_batch_controller_creation() {
686 let controller = AdaptiveBatchController::new(500);
687 assert_eq!(controller.get_optimal_batch_size(), 500);
688 }
689
690 #[test]
691 fn test_batch_controller_performance_recording() {
692 let mut controller = AdaptiveBatchController::new(1000);
693
694 let metrics = PerformanceMetrics {
695 processing_time_ms: 5, memory_usage_mb: 100,
697 allocations_per_second: 1000.0,
698 cache_hit_ratio: 0.8,
699 batch_efficiency: 0.9,
700 timestamp: Instant::now(),
701 };
702
703 let initial_size = controller.get_optimal_batch_size();
704 controller.record_performance(metrics);
705
706 let new_size = controller.get_optimal_batch_size();
708 assert!(new_size >= initial_size);
709 }
710
711 #[test]
712 fn test_batch_controller_slow_processing_adjustment() {
713 let mut controller = AdaptiveBatchController::new(1000);
714
715 let slow_metrics = PerformanceMetrics {
716 processing_time_ms: 50, memory_usage_mb: 100,
718 allocations_per_second: 100.0,
719 cache_hit_ratio: 0.5,
720 batch_efficiency: 0.7,
721 timestamp: Instant::now(),
722 };
723
724 let initial_size = controller.get_optimal_batch_size();
725 controller.record_performance(slow_metrics);
726
727 let new_size = controller.get_optimal_batch_size();
729 assert!(new_size < initial_size);
730 }
731
732 #[test]
733 fn test_batch_controller_memory_pressure_adjustment() {
734 let mut controller = AdaptiveBatchController::new(1000);
735
736 let high_memory_metrics = PerformanceMetrics {
737 processing_time_ms: 5, memory_usage_mb: 600, allocations_per_second: 1000.0,
740 cache_hit_ratio: 0.8,
741 batch_efficiency: 0.9,
742 timestamp: Instant::now(),
743 };
744
745 let initial_size = controller.get_optimal_batch_size();
746 controller.record_performance(high_memory_metrics);
747
748 let new_size = controller.get_optimal_batch_size();
750 assert!(new_size < initial_size);
751 }
752
753 #[test]
754 fn test_batch_controller_performance_trend() {
755 let mut controller = AdaptiveBatchController::new(1000);
756
757 assert!(controller.get_performance_trend().is_none());
759
760 for i in 0..10 {
762 let metrics = PerformanceMetrics {
763 processing_time_ms: 10 + i, memory_usage_mb: 100,
765 allocations_per_second: 1000.0,
766 cache_hit_ratio: 0.8,
767 batch_efficiency: 0.9,
768 timestamp: Instant::now(),
769 };
770 controller.record_performance(metrics);
771 thread::sleep(Duration::from_millis(1)); }
773
774 let trend = controller.get_performance_trend();
775 assert!(trend.is_some());
776 let trend_str = trend.unwrap();
777 assert!(trend_str == "Performance degrading" || trend_str == "Performance stable");
778 }
779
780 #[test]
781 fn test_type_info_cache_creation() {
782 let cache = TypeInfoCache::new(100);
783 let (hits, misses, hit_ratio) = cache.get_stats();
784 assert_eq!(hits, 0);
785 assert_eq!(misses, 0);
786 assert_eq!(hit_ratio, 0.0);
787 }
788
789 #[test]
790 fn test_type_info_cache_store_and_get() {
791 let cache = TypeInfoCache::new(100);
792 let type_name = "TestType".to_string();
793 let test_info = serde_json::json!({"name": "TestType", "size": 64});
794
795 assert!(cache.get(&type_name).is_none());
797 let (_, misses, _) = cache.get_stats();
798 assert_eq!(misses, 1);
799
800 cache.store(type_name.clone(), test_info.clone());
802
803 let retrieved = cache.get(&type_name);
805 assert!(retrieved.is_some());
806 assert_eq!(retrieved.unwrap(), test_info);
807
808 let (hits, _, hit_ratio) = cache.get_stats();
809 assert_eq!(hits, 1);
810 assert!(hit_ratio > 0.0);
811 }
812
813 #[test]
814 fn test_type_info_cache_clear() {
815 let cache = TypeInfoCache::new(100);
816 let test_info = serde_json::json!({"name": "TestType"});
817
818 cache.store("TestType".to_string(), test_info);
819 assert!(cache.get("TestType").is_some());
820
821 cache.clear();
822 assert!(cache.get("TestType").is_none());
823
824 let (hits, misses, hit_ratio) = cache.get_stats();
825 assert_eq!(hits, 0);
826 assert_eq!(misses, 1); assert_eq!(hit_ratio, 0.0);
828 }
829
830 #[test]
831 fn test_memory_usage_monitor_creation() {
832 let monitor = MemoryUsageMonitor::new();
833 assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Low);
834 assert!(monitor.get_usage_trend().is_none()); }
836
837 #[test]
838 fn test_memory_usage_monitor_pressure_levels() {
839 let mut monitor = MemoryUsageMonitor::new();
840
841 monitor.update_usage(100);
843 assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Low);
844
845 monitor.update_usage(600); assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Medium);
848
849 monitor.update_usage(1500); assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::High);
852
853 monitor.update_usage(3000); assert_eq!(monitor.get_memory_pressure(), MemoryPressureLevel::Critical);
856 }
857
858 #[test]
859 fn test_memory_usage_monitor_trend() {
860 let mut monitor = MemoryUsageMonitor::new();
861
862 for i in 0..5 {
864 monitor.update_usage(100 + i * 10);
865 }
866 assert!(monitor.get_usage_trend().is_none());
867
868 for i in 5..15 {
870 monitor.update_usage(100 + i * 20); thread::sleep(Duration::from_millis(1));
872 }
873
874 let trend = monitor.get_usage_trend();
875 assert!(trend.is_some());
876 assert_eq!(trend.unwrap(), MemoryTrend::Increasing);
877 }
878
879 #[test]
880 fn test_memory_pressure_level_equality() {
881 assert_eq!(MemoryPressureLevel::Low, MemoryPressureLevel::Low);
882 assert_ne!(MemoryPressureLevel::Low, MemoryPressureLevel::High);
883 }
884
885 #[test]
886 fn test_memory_trend_equality() {
887 assert_eq!(MemoryTrend::Stable, MemoryTrend::Stable);
888 assert_ne!(MemoryTrend::Increasing, MemoryTrend::Decreasing);
889 }
890
891 #[test]
892 fn test_adaptive_performance_optimizer_creation() {
893 let optimizer = AdaptivePerformanceOptimizer::new(500, 100);
894 assert_eq!(optimizer.get_optimal_batch_size(), 500);
895 assert!(optimizer.optimization_enabled);
896 }
897
898 #[test]
899 fn test_adaptive_performance_optimizer_default() {
900 let optimizer = AdaptivePerformanceOptimizer::default();
901 assert_eq!(optimizer.get_optimal_batch_size(), 1000);
902 assert!(optimizer.optimization_enabled);
903 }
904
905 #[test]
906 fn test_optimizer_batch_performance_recording() {
907 let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
908
909 optimizer.record_batch_performance(500, Duration::from_millis(5), 100, 450);
910
911 assert!(optimizer.get_optimal_batch_size() > 0);
913 }
914
915 #[test]
916 fn test_optimizer_memory_pressure_adjustment() {
917 let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
918
919 optimizer.record_batch_performance(
921 1000,
922 Duration::from_millis(5),
923 1500, 900,
925 );
926
927 let batch_size = optimizer.get_optimal_batch_size();
929 assert!(batch_size < 1000);
930 }
931
932 #[test]
933 fn test_optimizer_type_caching() {
934 let optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
935 let type_name = "TestType";
936 let test_info = serde_json::json!({"name": "TestType", "size": 64});
937
938 assert!(optimizer.get_cached_type_info(type_name).is_none());
940
941 optimizer.cache_type_info(type_name.to_string(), test_info.clone());
943
944 let cached = optimizer.get_cached_type_info(type_name);
946 assert!(cached.is_some());
947 assert_eq!(cached.unwrap(), test_info);
948 }
949
950 #[test]
951 fn test_optimizer_performance_report() {
952 let optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
953 let report = optimizer.get_performance_report();
954
955 assert!(report.is_object());
956 let adaptive_opt = &report["adaptive_optimization"];
957 assert!(adaptive_opt["enabled"].as_bool().unwrap());
958 assert!(adaptive_opt["current_batch_size"].as_u64().unwrap() > 0);
959 assert!(adaptive_opt["cache_statistics"].is_object());
960 assert!(adaptive_opt["memory_monitoring"].is_object());
961 }
962
963 #[test]
964 fn test_optimizer_enable_disable() {
965 let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
966
967 assert!(optimizer.optimization_enabled);
968
969 optimizer.set_optimization_enabled(false);
970 assert!(!optimizer.optimization_enabled);
971
972 assert_eq!(optimizer.get_optimal_batch_size(), 1000);
974
975 assert!(optimizer.get_cached_type_info("TestType").is_none());
977
978 optimizer.set_optimization_enabled(true);
979 assert!(optimizer.optimization_enabled);
980 }
981
982 #[test]
983 fn test_optimizer_reset() {
984 let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
985
986 optimizer.cache_type_info("TestType".to_string(), serde_json::json!({"test": true}));
988 optimizer.record_batch_performance(500, Duration::from_millis(10), 200, 450);
989
990 optimizer.reset();
992
993 assert!(optimizer.get_cached_type_info("TestType").is_none());
995
996 assert!(optimizer.get_optimal_batch_size() > 0);
998 }
999
1000 #[test]
1001 fn test_cache_eviction() {
1002 let cache = TypeInfoCache::new(2); cache.store("Type1".to_string(), serde_json::json!({"id": 1}));
1006 cache.store("Type2".to_string(), serde_json::json!({"id": 2}));
1007
1008 assert!(cache.get("Type1").is_some());
1010 assert!(cache.get("Type2").is_some());
1011
1012 cache.store("Type3".to_string(), serde_json::json!({"id": 3}));
1014
1015 assert!(cache.get("Type3").is_some());
1017
1018 let type1_available = cache.get("Type1").is_some();
1020 let type2_available = cache.get("Type2").is_some();
1021 assert!(type1_available || type2_available);
1022 }
1023
1024 #[test]
1025 fn test_memory_monitor_comprehensive() {
1026 let mut current_usage = 0usize;
1028 let mut peak_usage = 0usize;
1029
1030 assert_eq!(current_usage, 0);
1032 assert_eq!(peak_usage, 0);
1033
1034 current_usage += 1024;
1036 peak_usage = peak_usage.max(current_usage);
1037 assert_eq!(current_usage, 1024);
1038 assert_eq!(peak_usage, 1024);
1039
1040 current_usage += 2048;
1041 peak_usage = peak_usage.max(current_usage);
1042 assert_eq!(current_usage, 3072);
1043 assert_eq!(peak_usage, 3072);
1044
1045 current_usage = current_usage.saturating_sub(1024);
1047 assert_eq!(current_usage, 2048);
1048 assert_eq!(peak_usage, 3072); current_usage += 10240;
1052 peak_usage = peak_usage.max(current_usage);
1053 assert_eq!(current_usage, 12288);
1054 assert_eq!(peak_usage, 12288);
1055
1056 current_usage = 0;
1058 peak_usage = 0;
1059 assert_eq!(current_usage, 0);
1060 assert_eq!(peak_usage, 0);
1061 }
1062
1063 #[test]
1064 fn test_memory_monitor_edge_cases() {
1065 let mut current_usage = 0usize;
1066
1067 current_usage += 0;
1069 assert_eq!(current_usage, 0);
1070
1071 current_usage += usize::MAX / 2;
1073 assert_eq!(current_usage, usize::MAX / 2);
1074
1075 current_usage = current_usage.saturating_sub(usize::MAX);
1077 assert_eq!(current_usage, 0);
1079 }
1080
1081 #[test]
1082 fn test_batch_size_calculator_comprehensive() {
1083 let mut current_batch_size = 1000usize;
1085 let min_batch_size = 100usize;
1086
1087 assert_eq!(current_batch_size, 1000);
1089
1090 let efficiency = 450.0 / 500.0; if efficiency > 0.8 {
1093 current_batch_size = (current_batch_size as f64 * 1.1) as usize; }
1095 assert!(current_batch_size >= 1000); let poor_efficiency = 200.0 / 1000.0; if poor_efficiency < 0.5 {
1100 current_batch_size = (current_batch_size as f64 * 0.8) as usize; }
1102 let poor_perf_size = current_batch_size;
1103 assert!(poor_perf_size <= 1100); for _ in 0..20 {
1107 let very_poor_efficiency = 10.0 / 50.0; if very_poor_efficiency < 0.5 {
1109 current_batch_size = (current_batch_size as f64 * 0.9) as usize;
1110 current_batch_size = current_batch_size.max(min_batch_size);
1111 }
1112 }
1113 assert!(current_batch_size >= min_batch_size); current_batch_size = 1000; for _ in 0..20 {
1118 let excellent_efficiency = 1950.0 / 2000.0; if excellent_efficiency > 0.9 {
1120 current_batch_size = (current_batch_size as f64 * 1.1) as usize;
1121 current_batch_size = current_batch_size.min(10000); }
1123 }
1124 assert!(current_batch_size <= 10000);
1126 }
1127
1128 #[test]
1129 fn test_batch_size_calculator_edge_cases() {
1130 let mut current_batch_size = 1000usize;
1131
1132 let _zero_duration_efficiency = if Duration::from_millis(0).as_millis() == 0 {
1134 1.0 } else {
1136 450.0 / 500.0
1137 };
1138 assert!(current_batch_size > 0);
1139
1140 let high_memory_usage = usize::MAX / 2;
1142 if high_memory_usage > 1024 * 1024 * 100 {
1143 current_batch_size = (current_batch_size as f64 * 0.5) as usize; }
1146 assert!(current_batch_size <= 1000); let zero_processed_efficiency = 0.0 / 500.0; if zero_processed_efficiency == 0.0 {
1151 current_batch_size = current_batch_size.max(100); }
1153 assert!(current_batch_size > 0); }
1155
1156 #[test]
1157 fn test_type_info_cache_comprehensive() {
1158 let cache = TypeInfoCache::new(10);
1159
1160 cache.store("String".to_string(), serde_json::json!("test"));
1162 cache.store("Number".to_string(), serde_json::json!(42));
1163 cache.store("Boolean".to_string(), serde_json::json!(true));
1164 cache.store("Array".to_string(), serde_json::json!([1, 2, 3]));
1165 cache.store("Object".to_string(), serde_json::json!({"key": "value"}));
1166 cache.store("Null".to_string(), serde_json::json!(null));
1167
1168 assert_eq!(cache.get("String").unwrap().as_str().unwrap(), "test");
1170 assert_eq!(cache.get("Number").unwrap().as_i64().unwrap(), 42);
1171 assert!(cache.get("Boolean").unwrap().as_bool().unwrap());
1172 assert_eq!(cache.get("Array").unwrap().as_array().unwrap().len(), 3);
1173 assert!(cache
1174 .get("Object")
1175 .unwrap()
1176 .as_object()
1177 .unwrap()
1178 .contains_key("key"));
1179 assert!(cache.get("Null").unwrap().is_null());
1180
1181 assert!(cache.get("String").is_some());
1184 assert!(cache.get("Number").is_some());
1185 assert!(cache.get("Boolean").is_some());
1186 assert!(cache.get("Array").is_some());
1187 assert!(cache.get("Object").is_some());
1188 assert!(cache.get("Null").is_some());
1189
1190 cache.clear();
1192 assert!(cache.get("String").is_none());
1193 assert!(cache.get("String").is_none());
1194 }
1195
1196 #[test]
1197 fn test_adaptive_performance_optimizer_stress() {
1198 let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
1199
1200 for i in 0..1000 {
1202 let batch_size = 500 + (i % 500);
1203 let duration = Duration::from_millis(1 + (i % 100));
1204 let memory_before = i * 1024;
1205 let processed = batch_size - (i % 50);
1206
1207 optimizer.record_batch_performance(
1208 batch_size as usize,
1209 duration,
1210 memory_before,
1211 processed as usize,
1212 );
1213
1214 if i % 10 == 0 {
1216 let type_name = format!("Type_{}", i);
1217 let type_info = serde_json::json!({"size": i, "complexity": i % 5});
1218 optimizer.cache_type_info(type_name, type_info);
1219 }
1220 }
1221
1222 let optimal_size = optimizer.get_optimal_batch_size();
1224 assert!(
1225 optimal_size > 0,
1226 "Optimal size should be positive, got {}",
1227 optimal_size
1228 );
1229 assert!(
1230 optimal_size <= 20000,
1231 "Optimal size should be reasonable, got {}",
1232 optimal_size
1233 );
1234
1235 assert!(optimizer.get_cached_type_info("Type_0").is_some());
1237 assert!(optimizer.get_cached_type_info("Type_990").is_some());
1238
1239 let report = optimizer.get_performance_report();
1241 assert!(report.is_object());
1242 }
1243
1244 #[test]
1245 fn test_adaptive_performance_optimizer_memory_pressure() {
1246 let mut optimizer = AdaptivePerformanceOptimizer::new(2000, 200);
1247
1248 let scenarios = vec![
1250 (1000, Duration::from_millis(50), 1024 * 1024, 950), (1000, Duration::from_millis(100), 10 * 1024 * 1024, 900), (1000, Duration::from_millis(200), 100 * 1024 * 1024, 800), (1000, Duration::from_millis(500), 1024 * 1024 * 1024, 700), ];
1255
1256 let mut previous_size = optimizer.get_optimal_batch_size();
1257
1258 for (batch_size, duration, memory, processed) in scenarios {
1259 optimizer.record_batch_performance(batch_size, duration, memory, processed);
1260 let current_size = optimizer.get_optimal_batch_size();
1261
1262 if memory > 50 * 1024 * 1024 {
1264 assert!(current_size <= previous_size * 2); }
1267
1268 previous_size = current_size;
1269 }
1270
1271 let final_size = optimizer.get_optimal_batch_size();
1273 assert!(
1274 final_size > 0,
1275 "Batch size should be positive, got {}",
1276 final_size
1277 );
1278 }
1279
1280 #[test]
1281 fn test_performance_metrics_calculation() {
1282 let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
1283
1284 let test_cases = vec![
1286 (500, 10, 1024, 500), (500, 20, 2048, 450), (500, 50, 4096, 400), (500, 100, 8192, 300), ];
1291
1292 for (batch_size, duration_ms, memory, processed) in test_cases {
1293 optimizer.record_batch_performance(
1294 batch_size,
1295 Duration::from_millis(duration_ms),
1296 memory,
1297 processed,
1298 );
1299 }
1300
1301 let report = optimizer.get_performance_report();
1302 let adaptive_opt = &report["adaptive_optimization"];
1303
1304 assert!(adaptive_opt["enabled"].as_bool().unwrap());
1306 assert!(adaptive_opt["current_batch_size"].as_u64().unwrap() > 0);
1307 assert!(adaptive_opt["cache_statistics"].is_object());
1308 assert!(adaptive_opt["memory_monitoring"].is_object());
1309
1310 if let Some(cache_stats) = adaptive_opt.get("cache_statistics") {
1312 if cache_stats.is_object() {
1313 if let Some(_size) = cache_stats.get("size") {
1315 }
1317 if let Some(capacity) = cache_stats.get("capacity") {
1318 assert!(capacity.as_u64().unwrap_or(1) > 0);
1319 }
1320 }
1321 }
1322
1323 if let Some(memory_stats) = adaptive_opt.get("memory_monitoring") {
1325 if memory_stats.is_object() {
1326 if let Some(_current) = memory_stats.get("current_usage") {
1328 }
1330 if let Some(_peak) = memory_stats.get("peak_usage") {
1331 }
1333 }
1334 }
1335 }
1336
1337 #[test]
1338 fn test_optimizer_configuration_changes() {
1339 let mut optimizer = AdaptivePerformanceOptimizer::new(1000, 100);
1340
1341 optimizer.set_optimization_enabled(false);
1343 let disabled_size = optimizer.get_optimal_batch_size();
1344 assert_eq!(disabled_size, 1000); optimizer.set_optimization_enabled(true);
1347
1348 optimizer.record_batch_performance(750, Duration::from_millis(5), 1024, 750);
1350 let enabled_size = optimizer.get_optimal_batch_size();
1351
1352 assert!(enabled_size > 0);
1354
1355 optimizer.reset();
1357 let reset_size = optimizer.get_optimal_batch_size();
1358 assert!(reset_size > 0);
1359 }
1360}