1use crate::error::StatsResult;
10use scirs2_core::numeric::{Float, NumCast};
11use std::collections::{BTreeMap, HashMap, VecDeque};
12use std::sync::{
13 atomic::{AtomicUsize, Ordering},
14 Arc, Mutex, RwLock,
15};
16use std::thread;
17use std::time::{Duration, Instant};
18
19pub struct EnhancedMemoryOptimizer {
21 monitor: Arc<RwLock<MemoryMonitor>>,
23 cache_manager: Arc<RwLock<SmartCacheManager>>,
25 pool_allocator: Arc<Mutex<PoolAllocator>>,
27 algorithm_selector: Arc<RwLock<MemoryAwareSelector>>,
29 config: MemoryOptimizationConfig,
31}
32
33#[derive(Debug, Clone)]
35pub struct MemoryOptimizationConfig {
36 pub memory_limit: usize,
38 pub enable_monitoring: bool,
40 pub enable_smart_cache: bool,
42 pub enable_pool_allocation: bool,
44 pub cache_limit: usize,
46 pub monitoring_interval: Duration,
48 pub prefetch_strategy: PrefetchStrategy,
50 pub pressure_thresholds: MemoryPressureThresholds,
52}
53
54impl Default for MemoryOptimizationConfig {
55 fn default() -> Self {
56 Self {
57 memory_limit: 2 * 1024 * 1024 * 1024, enable_monitoring: true,
59 enable_smart_cache: true,
60 enable_pool_allocation: true,
61 cache_limit: 256 * 1024 * 1024, monitoring_interval: Duration::from_millis(100),
63 prefetch_strategy: PrefetchStrategy::Adaptive,
64 pressure_thresholds: MemoryPressureThresholds::default(),
65 }
66 }
67}
68
69#[derive(Debug, Clone)]
71pub struct MemoryPressureThresholds {
72 pub low: f64,
74 pub medium: f64,
76 pub high: f64,
78 pub critical: f64,
80}
81
82impl Default for MemoryPressureThresholds {
83 fn default() -> Self {
84 Self {
85 low: 0.5, medium: 0.7, high: 0.85, critical: 0.95, }
90 }
91}
92
93#[derive(Debug, Clone, Copy)]
95pub enum PrefetchStrategy {
96 None,
98 Sequential,
100 Adaptive,
102 MLBased,
104}
105
106#[derive(Debug, Clone, Copy, PartialEq, Eq)]
108pub enum MemoryPressure {
109 Low,
110 Medium,
111 High,
112 Critical,
113}
114
115#[allow(dead_code)]
117struct MemoryMonitor {
118 current_usage: AtomicUsize,
120 peak_usage: AtomicUsize,
122 allocation_events: Mutex<VecDeque<AllocationEvent>>,
124 usage_history: Mutex<VecDeque<MemorySnapshot>>,
126 performance_metrics: Mutex<PerformanceMetrics>,
128 last_update: Mutex<Instant>,
130}
131
132#[derive(Debug, Clone)]
134struct AllocationEvent {
135 timestamp: Instant,
136 size: usize,
137 operation: AllocationType,
138 context: String,
139}
140
141#[derive(Debug, Clone)]
142#[allow(dead_code)]
143enum AllocationType {
144 Allocate,
145 Deallocate,
146 Reallocate,
147}
148
149#[derive(Debug, Clone)]
151struct MemorySnapshot {
152 timestamp: Instant,
153 usage: usize,
154 pressure: MemoryPressure,
155 operations_per_second: f64,
156}
157
158#[derive(Debug, Clone)]
160#[allow(dead_code)]
161struct PerformanceMetrics {
162 avg_allocation_time: f64,
164 cache_hit_ratio: f64,
166 fragmentation_ratio: f64,
168 gc_frequency: f64,
170 algorithm_scores: HashMap<String, f64>,
172}
173
174struct SmartCacheManager {
176 cache: BTreeMap<String, CacheEntry>,
178 access_analyzer: AccessPatternAnalyzer,
180 prefetch_predictor: PrefetchPredictor,
182 stats: CacheStatistics,
184 config: MemoryOptimizationConfig,
186}
187
188#[derive(Debug, Clone)]
190struct CacheEntry {
191 data: Vec<u8>,
192 last_accessed: Instant,
193 access_count: usize,
194 size: usize,
195 priority: CachePriority,
196}
197
198#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
199enum CachePriority {
200 Low = 1,
201 Medium = 2,
202 High = 3,
203 Critical = 4,
204}
205
206struct AccessPatternAnalyzer {
208 sequential_patterns: HashMap<String, Vec<String>>,
210 temporal_patterns: HashMap<String, Vec<Instant>>,
212 frequency_map: HashMap<String, usize>,
214}
215
216struct PrefetchPredictor {
218 accuracy_scores: HashMap<PrefetchStrategy, f64>,
220 current_strategy: PrefetchStrategy,
222 prediction_queue: VecDeque<PrefetchPrediction>,
224}
225
226#[derive(Debug, Clone)]
227struct PrefetchPrediction {
228 key: String,
229 confidence: f64,
230 predicted_access_time: Instant,
231 strategy_used: PrefetchStrategy,
232}
233
234struct CacheStatistics {
236 hits: AtomicUsize,
237 misses: AtomicUsize,
238 evictions: AtomicUsize,
239 prefetch_hits: AtomicUsize,
240 prefetch_misses: AtomicUsize,
241}
242
243struct PoolAllocator {
245 pools: HashMap<usize, MemoryPool>,
247 large_allocations: Vec<LargeAllocation>,
249 pool_stats: PoolStatistics,
251}
252
253struct MemoryPool {
255 blocksize: usize,
257 available_blocks: VecDeque<*mut u8>,
259 total_blocks: usize,
261 capacity: usize,
263 usage_stats: PoolUsageStats,
265}
266
267struct LargeAllocation {
269 ptr: *mut u8,
270 size: usize,
271 timestamp: Instant,
272}
273
274struct PoolStatistics {
276 total_allocations: AtomicUsize,
277 total_deallocations: AtomicUsize,
278 pool_hits: AtomicUsize,
279 pool_misses: AtomicUsize,
280}
281
282struct PoolUsageStats {
284 allocations: usize,
285 deallocations: usize,
286 peak_usage: usize,
287 current_usage: usize,
288}
289
290struct MemoryAwareSelector {
292 algorithm_profiles: HashMap<String, AlgorithmProfile>,
294 current_conditions: MemoryConditions,
296 selection_history: Vec<SelectionEvent>,
298}
299
300#[derive(Debug, Clone)]
302struct AlgorithmProfile {
303 name: String,
305 memory_usage: MemoryUsageProfile,
307 performance_by_pressure: HashMap<MemoryPressure, PerformanceScore>,
309 optimaldatasizes: Vec<(usize, usize)>,
311}
312
313#[derive(Debug, Clone)]
315struct MemoryUsageProfile {
316 base_memory: usize,
318 scaling_factor: f64,
320 peak_multiplier: f64,
322 access_pattern: AccessPattern,
324}
325
326#[derive(Debug, Clone)]
327enum AccessPattern {
328 Sequential,
329 Random,
330 Strided(usize),
331 Temporal,
332}
333
334#[derive(Debug, Clone)]
336pub struct PerformanceScore {
337 time_score: f64,
339 memory_score: f64,
341 cache_score: f64,
343 overall_score: f64,
345}
346
347#[derive(Debug, Clone)]
349struct MemoryConditions {
350 available_memory: usize,
352 pressure: MemoryPressure,
354 cache_hit_ratio: f64,
356 bandwidth_utilization: f64,
358}
359
360struct SelectionEvent {
362 timestamp: Instant,
363 algorithm: String,
364 datasize: usize,
365 memory_conditions: MemoryConditions,
366 performance_result: PerformanceScore,
367}
368
369impl EnhancedMemoryOptimizer {
370 pub fn new(config: MemoryOptimizationConfig) -> Self {
372 let monitor = Arc::new(RwLock::new(MemoryMonitor::new()));
373 let cache_manager = Arc::new(RwLock::new(SmartCacheManager::new(&config)));
374 let pool_allocator = Arc::new(Mutex::new(PoolAllocator::new()));
375 let algorithm_selector = Arc::new(RwLock::new(MemoryAwareSelector::new()));
376
377 Self {
378 monitor,
379 cache_manager,
380 pool_allocator,
381 algorithm_selector,
382 config,
383 }
384 }
385
386 pub fn initialize(&self) -> StatsResult<()> {
388 if self.config.enable_monitoring {
389 self.start_memory_monitoring()?;
390 }
391
392 if self.config.enable_smart_cache {
393 self.initialize_smart_cache()?;
394 }
395
396 if self.config.enable_pool_allocation {
397 self.initialize_memory_pools()?;
398 }
399
400 Ok(())
401 }
402
403 pub fn get_memory_stats(&self) -> MemoryStatistics {
405 let monitor = self.monitor.read().unwrap();
406 let current_usage = monitor.current_usage.load(Ordering::Relaxed);
407 let peak_usage = monitor.peak_usage.load(Ordering::Relaxed);
408
409 let pressure = self.calculate_memory_pressure(current_usage);
410
411 MemoryStatistics {
412 current_usage,
413 peak_usage,
414 pressure,
415 available_memory: self.config.memory_limit.saturating_sub(current_usage),
416 fragmentation_ratio: self.calculate_fragmentation_ratio(),
417 cache_hit_ratio: self.get_cache_hit_ratio(),
418 allocation_efficiency: self.calculate_allocation_efficiency(),
419 }
420 }
421
422 pub fn optimize_for_computation<F>(
424 &self,
425 datasize: usize,
426 operation: &str,
427 ) -> OptimizationRecommendation
428 where
429 F: Float + NumCast + std::fmt::Display,
430 {
431 let current_conditions = self.assess_memory_conditions();
432 let algorithm_selector = self.algorithm_selector.read().unwrap();
433
434 let recommended_algorithm =
436 algorithm_selector.select_algorithm(operation, datasize, ¤t_conditions);
437
438 let memory_layout = self.determine_optimal_layout(datasize, ¤t_conditions);
440
441 let cache_strategy = self.recommend_cache_strategy(datasize, operation);
443
444 OptimizationRecommendation {
445 algorithm: recommended_algorithm,
446 memory_layout,
447 cache_strategy,
448 expected_performance: self.predict_performance(datasize, operation),
449 memory_requirements: self.estimate_memory_requirements(datasize, operation),
450 }
451 }
452
453 pub fn garbage_collect(&self) -> StatsResult<GarbageCollectionResult> {
455 let start_time = Instant::now();
456 let initial_usage = self.get_current_memory_usage();
457
458 let cache_freed = self.cleanup_cache()?;
460
461 let pool_freed = self.consolidate_memory_pools()?;
463
464 let large_freed = self.cleanup_large_allocations()?;
466
467 let final_usage = self.get_current_memory_usage();
468 let total_freed = initial_usage.saturating_sub(final_usage);
469 let duration = start_time.elapsed();
470
471 Ok(GarbageCollectionResult {
472 total_freed,
473 cache_freed,
474 pool_freed,
475 large_freed,
476 duration,
477 fragmentation_improved: self.calculate_fragmentation_improvement(),
478 })
479 }
480
481 pub fn select_algorithm<F>(&self, operation: &str, datasize: usize) -> String
483 where
484 F: Float + NumCast + std::fmt::Display,
485 {
486 let conditions = self.assess_memory_conditions();
487 let selector = self.algorithm_selector.read().unwrap();
488 selector.select_algorithm(operation, datasize, &conditions)
489 }
490
491 fn start_memory_monitoring(&self) -> StatsResult<()> {
494 let monitor = Arc::clone(&self.monitor);
495 let interval = self.config.monitoring_interval;
496
497 thread::spawn(move || loop {
498 thread::sleep(interval);
499
500 let mut monitor = monitor.write().unwrap();
501 monitor.update_memory_metrics();
502 monitor.analyze_trends();
503 monitor.update_performance_metrics();
504 });
505
506 Ok(())
507 }
508
509 fn initialize_smart_cache(&self) -> StatsResult<()> {
510 let _cache_manager = self.cache_manager.write().unwrap();
511 Ok(())
513 }
514
515 fn initialize_memory_pools(&self) -> StatsResult<()> {
516 let mut allocator = self.pool_allocator.lock().unwrap();
517 allocator.initialize_pools();
518 Ok(())
519 }
520
521 fn calculate_memory_pressure(&self, current_usage: usize) -> MemoryPressure {
522 let usage_ratio = current_usage as f64 / self.config.memory_limit as f64;
523 let thresholds = &self.config.pressure_thresholds;
524
525 if usage_ratio >= thresholds.critical {
526 MemoryPressure::Critical
527 } else if usage_ratio >= thresholds.high {
528 MemoryPressure::High
529 } else if usage_ratio >= thresholds.medium {
530 MemoryPressure::Medium
531 } else {
532 MemoryPressure::Low
533 }
534 }
535
536 fn calculate_fragmentation_ratio(&self) -> f64 {
537 0.1 }
540
541 fn get_cache_hit_ratio(&self) -> f64 {
542 let cache_manager = self.cache_manager.read().unwrap();
543 cache_manager.get_hit_ratio()
544 }
545
546 fn calculate_allocation_efficiency(&self) -> f64 {
547 let allocator = self.pool_allocator.lock().unwrap();
548 allocator.calculate_efficiency()
549 }
550
551 fn assess_memory_conditions(&self) -> MemoryConditions {
552 let current_usage = self.get_current_memory_usage();
553 MemoryConditions {
554 available_memory: self.config.memory_limit.saturating_sub(current_usage),
555 pressure: self.calculate_memory_pressure(current_usage),
556 cache_hit_ratio: self.get_cache_hit_ratio(),
557 bandwidth_utilization: self.estimate_bandwidth_utilization(),
558 }
559 }
560
561 fn determine_optimal_layout(
562 &self,
563 datasize: usize,
564 conditions: &MemoryConditions,
565 ) -> MemoryLayout {
566 match conditions.pressure {
567 MemoryPressure::Low => MemoryLayout::Contiguous,
568 MemoryPressure::Medium => MemoryLayout::Chunked(self.optimal_chunksize(datasize)),
569 MemoryPressure::High => MemoryLayout::Streaming,
570 MemoryPressure::Critical => MemoryLayout::MemoryMapped,
571 }
572 }
573
574 fn recommend_cache_strategy(&self, datasize: usize, operation: &str) -> CacheStrategy {
575 if datasize < 1024 * 1024 {
576 CacheStrategy::Aggressive
578 } else if datasize < 100 * 1024 * 1024 {
579 CacheStrategy::Selective
581 } else {
582 CacheStrategy::Minimal
583 }
584 }
585
586 fn predict_performance(&self, size: usize, operation: &str) -> PerformanceScore {
587 PerformanceScore {
589 time_score: 85.0,
590 memory_score: 78.0,
591 cache_score: 92.0,
592 overall_score: 85.0,
593 }
594 }
595
596 fn estimate_memory_requirements(&self, datasize: usize, operation: &str) -> MemoryRequirements {
597 let base_memory = datasize * std::mem::size_of::<f64>();
598 let overhead_multiplier = match operation {
599 "mean" => 1.1,
600 "variance" => 1.3,
601 "correlation" => 2.0,
602 "regression" => 2.5,
603 _ => 1.5,
604 };
605
606 MemoryRequirements {
607 minimum: base_memory,
608 recommended: (base_memory as f64 * overhead_multiplier) as usize,
609 peak: (base_memory as f64 * overhead_multiplier * 1.5) as usize,
610 }
611 }
612
613 fn get_current_memory_usage(&self) -> usize {
614 self.monitor
615 .read()
616 .unwrap()
617 .current_usage
618 .load(Ordering::Relaxed)
619 }
620
621 fn cleanup_cache(&self) -> StatsResult<usize> {
622 let mut cache_manager = self.cache_manager.write().unwrap();
623 Ok(cache_manager.cleanup_expired_entries())
624 }
625
626 fn consolidate_memory_pools(&self) -> StatsResult<usize> {
627 let mut allocator = self.pool_allocator.lock().unwrap();
628 Ok(allocator.consolidate_pools())
629 }
630
631 fn cleanup_large_allocations(&self) -> StatsResult<usize> {
632 let mut allocator = self.pool_allocator.lock().unwrap();
633 Ok(allocator.cleanup_large_allocations())
634 }
635
636 fn calculate_fragmentation_improvement(&self) -> f64 {
637 0.15 }
640
641 fn optimal_chunksize(&self, datasize: usize) -> usize {
642 (32 * 1024).min(datasize / 4) }
645
646 fn estimate_bandwidth_utilization(&self) -> f64 {
647 0.65 }
650}
651
652#[derive(Debug, Clone)]
656pub struct MemoryStatistics {
657 pub current_usage: usize,
658 pub peak_usage: usize,
659 pub pressure: MemoryPressure,
660 pub available_memory: usize,
661 pub fragmentation_ratio: f64,
662 pub cache_hit_ratio: f64,
663 pub allocation_efficiency: f64,
664}
665
666#[derive(Debug, Clone)]
668pub struct OptimizationRecommendation {
669 pub algorithm: String,
670 pub memory_layout: MemoryLayout,
671 pub cache_strategy: CacheStrategy,
672 pub expected_performance: PerformanceScore,
673 pub memory_requirements: MemoryRequirements,
674}
675
676#[derive(Debug, Clone)]
678pub enum MemoryLayout {
679 Contiguous,
680 Chunked(usize),
681 Streaming,
682 MemoryMapped,
683}
684
685#[derive(Debug, Clone)]
687pub enum CacheStrategy {
688 Aggressive,
689 Selective,
690 Minimal,
691}
692
693#[derive(Debug, Clone)]
695pub struct MemoryRequirements {
696 pub minimum: usize,
697 pub recommended: usize,
698 pub peak: usize,
699}
700
701#[derive(Debug, Clone)]
703pub struct GarbageCollectionResult {
704 pub total_freed: usize,
705 pub cache_freed: usize,
706 pub pool_freed: usize,
707 pub large_freed: usize,
708 pub duration: Duration,
709 pub fragmentation_improved: f64,
710}
711
712impl MemoryMonitor {
714 fn new() -> Self {
715 Self {
716 current_usage: AtomicUsize::new(0),
717 peak_usage: AtomicUsize::new(0),
718 allocation_events: Mutex::new(VecDeque::new()),
719 usage_history: Mutex::new(VecDeque::new()),
720 performance_metrics: Mutex::new(PerformanceMetrics::default()),
721 last_update: Mutex::new(Instant::now()),
722 }
723 }
724
725 fn update_memory_metrics(&mut self) {
726 }
728
729 fn analyze_trends(&self) {
730 }
732
733 fn update_performance_metrics(&self) {
734 }
736}
737
738impl SmartCacheManager {
739 fn new(config: &MemoryOptimizationConfig) -> Self {
740 Self {
741 cache: BTreeMap::new(),
742 access_analyzer: AccessPatternAnalyzer::new(),
743 prefetch_predictor: PrefetchPredictor::new(),
744 stats: CacheStatistics::new(),
745 config: config.clone(),
746 }
747 }
748
749 fn get_hit_ratio(&self) -> f64 {
750 let hits = self.stats.hits.load(Ordering::Relaxed);
751 let total = hits + self.stats.misses.load(Ordering::Relaxed);
752 if total > 0 {
753 hits as f64 / total as f64
754 } else {
755 0.0
756 }
757 }
758
759 fn cleanup_expired_entries(&mut self) -> usize {
760 0
762 }
763}
764
765impl PoolAllocator {
766 fn new() -> Self {
767 Self {
768 pools: HashMap::new(),
769 large_allocations: Vec::new(),
770 pool_stats: PoolStatistics::new(),
771 }
772 }
773
774 fn initialize_pools(&mut self) {
775 }
777
778 fn calculate_efficiency(&self) -> f64 {
779 0.85
781 }
782
783 fn consolidate_pools(&mut self) -> usize {
784 0
786 }
787
788 fn cleanup_large_allocations(&mut self) -> usize {
789 0
791 }
792}
793
794impl MemoryAwareSelector {
795 fn new() -> Self {
796 Self {
797 algorithm_profiles: HashMap::new(),
798 current_conditions: MemoryConditions::default(),
799 selection_history: Vec::new(),
800 }
801 }
802
803 fn select_algorithm(
804 &self,
805 operation: &str,
806 datasize: usize,
807 conditions: &MemoryConditions,
808 ) -> String {
809 match conditions.pressure {
811 MemoryPressure::Low => format!("{}_full", operation),
812 MemoryPressure::Medium => format!("{}_optimized", operation),
813 MemoryPressure::High => format!("{}_streaming", operation),
814 MemoryPressure::Critical => format!("{}_minimal", operation),
815 }
816 }
817}
818
819impl Default for PerformanceMetrics {
821 fn default() -> Self {
822 Self {
823 avg_allocation_time: 0.0,
824 cache_hit_ratio: 0.0,
825 fragmentation_ratio: 0.0,
826 gc_frequency: 0.0,
827 algorithm_scores: HashMap::new(),
828 }
829 }
830}
831
832impl AccessPatternAnalyzer {
833 fn new() -> Self {
834 Self {
835 sequential_patterns: HashMap::new(),
836 temporal_patterns: HashMap::new(),
837 frequency_map: HashMap::new(),
838 }
839 }
840}
841
842impl PrefetchPredictor {
843 fn new() -> Self {
844 Self {
845 accuracy_scores: HashMap::new(),
846 current_strategy: PrefetchStrategy::Adaptive,
847 prediction_queue: VecDeque::new(),
848 }
849 }
850}
851
852impl CacheStatistics {
853 fn new() -> Self {
854 Self {
855 hits: AtomicUsize::new(0),
856 misses: AtomicUsize::new(0),
857 evictions: AtomicUsize::new(0),
858 prefetch_hits: AtomicUsize::new(0),
859 prefetch_misses: AtomicUsize::new(0),
860 }
861 }
862}
863
864impl PoolStatistics {
865 fn new() -> Self {
866 Self {
867 total_allocations: AtomicUsize::new(0),
868 total_deallocations: AtomicUsize::new(0),
869 pool_hits: AtomicUsize::new(0),
870 pool_misses: AtomicUsize::new(0),
871 }
872 }
873}
874
875impl Default for MemoryConditions {
876 fn default() -> Self {
877 Self {
878 available_memory: 1024 * 1024 * 1024, pressure: MemoryPressure::Low,
880 cache_hit_ratio: 0.8,
881 bandwidth_utilization: 0.5,
882 }
883 }
884}
885
886#[allow(dead_code)]
888pub fn create_enhanced_memory_optimizer() -> EnhancedMemoryOptimizer {
889 EnhancedMemoryOptimizer::new(MemoryOptimizationConfig::default())
890}
891
892#[allow(dead_code)]
894pub fn create_configured_memory_optimizer(
895 config: MemoryOptimizationConfig,
896) -> EnhancedMemoryOptimizer {
897 EnhancedMemoryOptimizer::new(config)
898}