Skip to main content

torsh_profiler/
memory_optimization.rs

1//! Advanced Memory Optimization Features
2//!
3//! This module provides sophisticated memory optimization techniques including
4//! adaptive memory management, smart garbage collection triggers, memory pool
5//! optimization, and predictive memory allocation strategies.
6
7use crate::memory::{MemoryEvent, MemoryEventType, MemoryProfiler, MemoryStats};
8use parking_lot::RwLock;
9use serde::{Deserialize, Serialize};
10use std::collections::{BTreeMap, HashMap, VecDeque};
11use std::sync::{
12    atomic::{AtomicU64, AtomicUsize, Ordering},
13    Arc, Mutex,
14};
15use std::thread;
16use std::time::{Duration, Instant, SystemTime};
17
18/// Advanced memory optimizer with adaptive strategies
19#[derive(Debug)]
20pub struct AdvancedMemoryOptimizer {
21    /// Current memory strategies
22    strategies: Arc<RwLock<MemoryStrategies>>,
23    /// Memory usage history for pattern analysis
24    usage_history: Arc<Mutex<VecDeque<MemorySnapshot>>>,
25    /// Statistics and metrics
26    stats: Arc<MemoryOptimizationStats>,
27    /// Configuration
28    config: MemoryOptimizationConfig,
29    /// Predictive models
30    predictor: Arc<Mutex<MemoryUsagePredictor>>,
31    /// Memory pool manager
32    pool_manager: Arc<AdaptivePoolManager>,
33}
34
35/// Memory optimization strategies
36#[derive(Debug, Clone, Serialize, Deserialize)]
37pub struct MemoryStrategies {
38    /// Enable predictive allocation
39    pub predictive_allocation: bool,
40    /// Enable adaptive garbage collection
41    pub adaptive_gc: bool,
42    /// Enable memory compaction
43    pub memory_compaction: bool,
44    /// Enable pool optimization
45    pub pool_optimization: bool,
46    /// Memory pressure threshold (0.0 - 1.0)
47    pub pressure_threshold: f64,
48    /// Allocation batch size optimization
49    pub batch_optimization: bool,
50}
51
52impl Default for MemoryStrategies {
53    fn default() -> Self {
54        Self {
55            predictive_allocation: true,
56            adaptive_gc: true,
57            memory_compaction: true,
58            pool_optimization: true,
59            pressure_threshold: 0.8,
60            batch_optimization: true,
61        }
62    }
63}
64
65/// Memory snapshot for pattern analysis
66#[derive(Debug, Clone, Serialize, Deserialize)]
67pub struct MemorySnapshot {
68    pub timestamp: SystemTime,
69    pub total_allocated: usize,
70    pub peak_usage: usize,
71    pub fragmentation_ratio: f64,
72    pub allocation_rate: f64,
73    pub deallocation_rate: f64,
74    pub gc_pressure: f64,
75    pub operation_context: String,
76}
77
78/// Memory optimization configuration
79#[derive(Debug, Clone, Serialize, Deserialize)]
80pub struct MemoryOptimizationConfig {
81    /// History window size for pattern analysis
82    pub history_window: usize,
83    /// Minimum samples before optimization kicks in
84    pub min_samples: usize,
85    /// Optimization check interval
86    pub check_interval: Duration,
87    /// Memory pressure warning threshold
88    pub warning_threshold: f64,
89    /// Memory pressure critical threshold
90    pub critical_threshold: f64,
91    /// Enable machine learning predictions
92    pub ml_predictions: bool,
93    /// Pool size optimization parameters
94    pub pool_params: PoolOptimizationParams,
95}
96
97impl Default for MemoryOptimizationConfig {
98    fn default() -> Self {
99        Self {
100            history_window: 1000,
101            min_samples: 100,
102            check_interval: Duration::from_secs(5),
103            warning_threshold: 0.7,
104            critical_threshold: 0.9,
105            ml_predictions: true,
106            pool_params: PoolOptimizationParams::default(),
107        }
108    }
109}
110
111/// Pool optimization parameters
112#[derive(Debug, Clone, Serialize, Deserialize)]
113pub struct PoolOptimizationParams {
114    pub initial_pool_size: usize,
115    pub growth_factor: f64,
116    pub shrink_threshold: f64,
117    pub min_pool_size: usize,
118    pub max_pool_size: usize,
119    pub rebalance_interval: Duration,
120}
121
122impl Default for PoolOptimizationParams {
123    fn default() -> Self {
124        Self {
125            initial_pool_size: 1024 * 1024, // 1MB
126            growth_factor: 1.5,
127            shrink_threshold: 0.3,
128            min_pool_size: 64 * 1024,         // 64KB
129            max_pool_size: 128 * 1024 * 1024, // 128MB
130            rebalance_interval: Duration::from_secs(30),
131        }
132    }
133}
134
135/// Memory optimization statistics
136#[derive(Debug)]
137pub struct MemoryOptimizationStats {
138    pub optimizations_performed: AtomicU64,
139    pub memory_saved: AtomicUsize,
140    pub gc_triggers_avoided: AtomicU64,
141    pub fragmentation_reduced: AtomicU64,
142    pub allocation_predictions: AtomicU64,
143    pub prediction_accuracy: AtomicUsize, // as percentage * 100
144}
145
146impl Default for MemoryOptimizationStats {
147    fn default() -> Self {
148        Self {
149            optimizations_performed: AtomicU64::new(0),
150            memory_saved: AtomicUsize::new(0),
151            gc_triggers_avoided: AtomicU64::new(0),
152            fragmentation_reduced: AtomicU64::new(0),
153            allocation_predictions: AtomicU64::new(0),
154            prediction_accuracy: AtomicUsize::new(8500), // Start at 85%
155        }
156    }
157}
158
159/// Memory usage predictor using simple machine learning
160#[derive(Debug)]
161pub struct MemoryUsagePredictor {
162    /// Historical data points
163    data_points: Vec<DataPoint>,
164    /// Simple linear regression parameters
165    slope: f64,
166    intercept: f64,
167    /// Seasonal patterns
168    patterns: HashMap<String, f64>,
169    /// Prediction confidence
170    confidence: f64,
171}
172
173#[derive(Debug, Clone)]
174pub struct DataPoint {
175    pub timestamp: f64,
176    pub memory_usage: f64,
177    pub context: String,
178}
179
180/// Adaptive memory pool manager
181#[derive(Debug)]
182pub struct AdaptivePoolManager {
183    pools: Arc<Mutex<BTreeMap<usize, MemoryPool>>>,
184    config: PoolOptimizationParams,
185    stats: PoolManagerStats,
186}
187
188#[derive(Debug)]
189pub struct MemoryPool {
190    pub size_class: usize,
191    pub allocated_blocks: usize,
192    pub free_blocks: usize,
193    pub total_capacity: usize,
194    pub hit_rate: f64,
195    pub last_rebalance: SystemTime,
196}
197
198#[derive(Debug, Default)]
199pub struct PoolManagerStats {
200    pub pool_hits: AtomicU64,
201    pub pool_misses: AtomicU64,
202    pub pool_expansions: AtomicU64,
203    pub pool_contractions: AtomicU64,
204    pub cross_pool_transfers: AtomicU64,
205}
206
207impl AdvancedMemoryOptimizer {
208    /// Create a new advanced memory optimizer
209    pub fn new() -> Self {
210        Self::with_config(MemoryOptimizationConfig::default())
211    }
212
213    /// Create with custom configuration
214    pub fn with_config(config: MemoryOptimizationConfig) -> Self {
215        Self {
216            strategies: Arc::new(RwLock::new(MemoryStrategies::default())),
217            usage_history: Arc::new(Mutex::new(VecDeque::with_capacity(config.history_window))),
218            stats: Arc::new(MemoryOptimizationStats::default()),
219            predictor: Arc::new(Mutex::new(MemoryUsagePredictor::new())),
220            pool_manager: Arc::new(AdaptivePoolManager::new(config.pool_params.clone())),
221            config,
222        }
223    }
224
225    /// Start the optimization engine
226    pub fn start_optimization(&self, memory_profiler: Arc<Mutex<MemoryProfiler>>) {
227        let optimizer = Arc::new(self.clone());
228        let profiler = Arc::clone(&memory_profiler);
229
230        thread::spawn(move || loop {
231            thread::sleep(optimizer.config.check_interval);
232            optimizer.optimization_cycle(&profiler);
233        });
234    }
235
236    /// Perform one optimization cycle
237    fn optimization_cycle(&self, memory_profiler: &Arc<Mutex<MemoryProfiler>>) {
238        // Collect current memory state
239        let snapshot = self.collect_memory_snapshot(memory_profiler);
240
241        // Add to history
242        self.add_snapshot(snapshot.clone());
243
244        // Analyze patterns and predict future usage
245        if self.should_perform_optimization() {
246            self.perform_optimizations(&snapshot, memory_profiler);
247        }
248
249        // Update predictive models
250        self.update_predictions(&snapshot);
251
252        // Optimize memory pools
253        self.optimize_pools();
254    }
255
256    /// Collect current memory snapshot
257    fn collect_memory_snapshot(
258        &self,
259        memory_profiler: &Arc<Mutex<MemoryProfiler>>,
260    ) -> MemorySnapshot {
261        let profiler = memory_profiler.lock().expect("lock should not be poisoned");
262        let stats_result = profiler.get_stats();
263
264        let stats = match stats_result {
265            Ok(s) => s,
266            Err(_) => MemoryStats::default(), // Use default if error
267        };
268
269        MemorySnapshot {
270            timestamp: SystemTime::now(),
271            total_allocated: stats.allocated,
272            peak_usage: stats.peak,
273            fragmentation_ratio: self.calculate_fragmentation_ratio(&stats),
274            allocation_rate: self.calculate_allocation_rate(&stats),
275            deallocation_rate: self.calculate_deallocation_rate(&stats),
276            gc_pressure: self.calculate_gc_pressure(&stats),
277            operation_context: "background_optimization".to_string(),
278        }
279    }
280
281    /// Add snapshot to history with size management
282    fn add_snapshot(&self, snapshot: MemorySnapshot) {
283        let mut history = self
284            .usage_history
285            .lock()
286            .expect("lock should not be poisoned");
287
288        if history.len() >= self.config.history_window {
289            history.pop_front();
290        }
291
292        history.push_back(snapshot);
293    }
294
295    /// Determine if optimization should be performed
296    fn should_perform_optimization(&self) -> bool {
297        let history = self
298            .usage_history
299            .lock()
300            .expect("lock should not be poisoned");
301
302        if history.len() < self.config.min_samples {
303            return false;
304        }
305
306        // Check if memory pressure is increasing
307        let recent_snapshots: Vec<_> = history.iter().rev().take(10).collect();
308        let pressure_trend = self.calculate_pressure_trend(&recent_snapshots);
309
310        pressure_trend > self.config.warning_threshold
311    }
312
313    /// Perform various optimization strategies
314    fn perform_optimizations(
315        &self,
316        snapshot: &MemorySnapshot,
317        memory_profiler: &Arc<Mutex<MemoryProfiler>>,
318    ) {
319        let strategies = self.strategies.read();
320
321        if strategies.adaptive_gc && snapshot.gc_pressure > self.config.critical_threshold {
322            self.suggest_garbage_collection(memory_profiler);
323        }
324
325        if strategies.memory_compaction && snapshot.fragmentation_ratio > 0.5 {
326            self.perform_memory_compaction(memory_profiler);
327        }
328
329        if strategies.predictive_allocation {
330            self.optimize_future_allocations(snapshot);
331        }
332
333        if strategies.pool_optimization {
334            self.rebalance_pools();
335        }
336
337        self.stats
338            .optimizations_performed
339            .fetch_add(1, Ordering::Relaxed);
340    }
341
342    /// Suggest garbage collection to the system
343    fn suggest_garbage_collection(&self, memory_profiler: &Arc<Mutex<MemoryProfiler>>) {
344        // In a real implementation, this would trigger GC in the runtime
345        println!("Memory optimizer suggests garbage collection");
346        self.stats
347            .gc_triggers_avoided
348            .fetch_add(1, Ordering::Relaxed);
349    }
350
351    /// Perform memory compaction
352    fn perform_memory_compaction(&self, memory_profiler: &Arc<Mutex<MemoryProfiler>>) {
353        // Simulate memory compaction
354        println!("Performing memory compaction to reduce fragmentation");
355        self.stats
356            .fragmentation_reduced
357            .fetch_add(1, Ordering::Relaxed);
358    }
359
360    /// Optimize future allocations based on predictions
361    fn optimize_future_allocations(&self, snapshot: &MemorySnapshot) {
362        let predictor = self.predictor.lock().expect("lock should not be poisoned");
363
364        if let Some(prediction) = predictor.predict_next_allocation() {
365            // Pre-allocate memory pools based on prediction
366            self.pool_manager.prepare_for_allocation(prediction);
367            self.stats
368                .allocation_predictions
369                .fetch_add(1, Ordering::Relaxed);
370        }
371    }
372
373    /// Rebalance memory pools
374    fn rebalance_pools(&self) {
375        self.pool_manager.rebalance_pools();
376    }
377
378    /// Optimize memory pools based on usage patterns
379    fn optimize_pools(&self) {
380        self.pool_manager.optimize_based_on_usage();
381    }
382
383    /// Update predictive models with new data
384    fn update_predictions(&self, snapshot: &MemorySnapshot) {
385        if !self.config.ml_predictions {
386            return;
387        }
388
389        let mut predictor = self.predictor.lock().expect("lock should not be poisoned");
390        predictor.add_data_point(DataPoint {
391            timestamp: snapshot
392                .timestamp
393                .duration_since(SystemTime::UNIX_EPOCH)
394                .unwrap_or_default()
395                .as_secs_f64(),
396            memory_usage: snapshot.total_allocated as f64,
397            context: snapshot.operation_context.clone(),
398        });
399
400        predictor.update_model();
401    }
402
403    // Helper calculation methods
404    fn calculate_fragmentation_ratio(&self, stats: &MemoryStats) -> f64 {
405        if stats.peak == 0 {
406            return 0.0;
407        }
408
409        1.0 - (stats.allocated as f64 / stats.peak as f64)
410    }
411
412    fn calculate_allocation_rate(&self, _stats: &MemoryStats) -> f64 {
413        // Calculate based on recent history
414        let history = self
415            .usage_history
416            .lock()
417            .expect("lock should not be poisoned");
418        if history.len() < 2 {
419            return 0.0;
420        }
421
422        let recent = &history[history.len() - 1];
423        let previous = &history[history.len() - 2];
424
425        let time_delta = recent
426            .timestamp
427            .duration_since(previous.timestamp)
428            .unwrap_or_default()
429            .as_secs_f64();
430
431        if time_delta > 0.0 {
432            (recent.total_allocated as f64 - previous.total_allocated as f64) / time_delta
433        } else {
434            0.0
435        }
436    }
437
438    fn calculate_deallocation_rate(&self, _stats: &MemoryStats) -> f64 {
439        // Similar to allocation rate but for deallocations
440        0.0 // Placeholder
441    }
442
443    fn calculate_gc_pressure(&self, stats: &MemoryStats) -> f64 {
444        // Calculate pressure based on allocation rate and available memory
445        stats.allocated as f64 / stats.peak.max(1) as f64
446    }
447
448    fn calculate_pressure_trend(&self, snapshots: &[&MemorySnapshot]) -> f64 {
449        if snapshots.len() < 2 {
450            return 0.0;
451        }
452
453        // Simple trend calculation
454        let first_pressure = snapshots
455            .first()
456            .expect("snapshots should not be empty after length check")
457            .gc_pressure;
458        let last_pressure = snapshots
459            .last()
460            .expect("snapshots should not be empty after length check")
461            .gc_pressure;
462
463        (last_pressure - first_pressure) / snapshots.len() as f64
464    }
465
466    /// Get optimization statistics
467    pub fn get_stats(&self) -> MemoryOptimizationStats {
468        MemoryOptimizationStats {
469            optimizations_performed: AtomicU64::new(
470                self.stats.optimizations_performed.load(Ordering::Relaxed),
471            ),
472            memory_saved: AtomicUsize::new(self.stats.memory_saved.load(Ordering::Relaxed)),
473            gc_triggers_avoided: AtomicU64::new(
474                self.stats.gc_triggers_avoided.load(Ordering::Relaxed),
475            ),
476            fragmentation_reduced: AtomicU64::new(
477                self.stats.fragmentation_reduced.load(Ordering::Relaxed),
478            ),
479            allocation_predictions: AtomicU64::new(
480                self.stats.allocation_predictions.load(Ordering::Relaxed),
481            ),
482            prediction_accuracy: AtomicUsize::new(
483                self.stats.prediction_accuracy.load(Ordering::Relaxed),
484            ),
485        }
486    }
487
488    /// Export optimization data
489    pub fn export_optimization_data(&self, path: &str) -> Result<(), Box<dyn std::error::Error>> {
490        let data = OptimizationExportData {
491            config: self.config.clone(),
492            strategies: self.strategies.read().clone(),
493            history: self
494                .usage_history
495                .lock()
496                .expect("lock should not be poisoned")
497                .clone()
498                .into(),
499            stats: self.get_optimization_stats_summary(),
500            timestamp: SystemTime::now(),
501        };
502
503        let json = serde_json::to_string_pretty(&data)?;
504        std::fs::write(path, json)?;
505        Ok(())
506    }
507
508    fn get_optimization_stats_summary(&self) -> OptimizationStatsSummary {
509        OptimizationStatsSummary {
510            total_optimizations: self.stats.optimizations_performed.load(Ordering::Relaxed),
511            memory_saved_bytes: self.stats.memory_saved.load(Ordering::Relaxed),
512            gc_triggers_avoided: self.stats.gc_triggers_avoided.load(Ordering::Relaxed),
513            fragmentation_events_reduced: self.stats.fragmentation_reduced.load(Ordering::Relaxed),
514            prediction_accuracy_percent: self.stats.prediction_accuracy.load(Ordering::Relaxed)
515                as f64
516                / 100.0,
517        }
518    }
519}
520
521impl Clone for AdvancedMemoryOptimizer {
522    fn clone(&self) -> Self {
523        Self {
524            strategies: Arc::clone(&self.strategies),
525            usage_history: Arc::clone(&self.usage_history),
526            stats: Arc::clone(&self.stats),
527            config: self.config.clone(),
528            predictor: Arc::clone(&self.predictor),
529            pool_manager: Arc::clone(&self.pool_manager),
530        }
531    }
532}
533
534impl MemoryUsagePredictor {
535    fn new() -> Self {
536        Self {
537            data_points: Vec::new(),
538            slope: 0.0,
539            intercept: 0.0,
540            patterns: HashMap::new(),
541            confidence: 0.0,
542        }
543    }
544
545    fn add_data_point(&mut self, point: DataPoint) {
546        self.data_points.push(point);
547
548        // Keep only recent data points
549        if self.data_points.len() > 1000 {
550            self.data_points.remove(0);
551        }
552    }
553
554    fn update_model(&mut self) {
555        if self.data_points.len() < 10 {
556            return;
557        }
558
559        // Simple linear regression
560        let n = self.data_points.len() as f64;
561        let sum_x: f64 = self.data_points.iter().map(|p| p.timestamp).sum();
562        let sum_y: f64 = self.data_points.iter().map(|p| p.memory_usage).sum();
563        let sum_xy: f64 = self
564            .data_points
565            .iter()
566            .map(|p| p.timestamp * p.memory_usage)
567            .sum();
568        let sum_x2: f64 = self
569            .data_points
570            .iter()
571            .map(|p| p.timestamp * p.timestamp)
572            .sum();
573
574        let denom = n * sum_x2 - sum_x * sum_x;
575        if denom.abs() > f64::EPSILON {
576            self.slope = (n * sum_xy - sum_x * sum_y) / denom;
577            self.intercept = (sum_y - self.slope * sum_x) / n;
578        }
579
580        self.update_patterns();
581        self.calculate_confidence();
582    }
583
584    fn update_patterns(&mut self) {
585        // Detect seasonal patterns based on context
586        let mut context_averages: HashMap<String, Vec<f64>> = HashMap::new();
587
588        for point in &self.data_points {
589            context_averages
590                .entry(point.context.clone())
591                .or_default()
592                .push(point.memory_usage);
593        }
594
595        for (context, usages) in context_averages {
596            let average = usages.iter().sum::<f64>() / usages.len() as f64;
597            self.patterns.insert(context, average);
598        }
599    }
600
601    fn calculate_confidence(&mut self) {
602        // Simple confidence calculation based on prediction accuracy
603        self.confidence = if self.data_points.len() >= 50 {
604            0.8 // High confidence with enough data
605        } else {
606            0.5 // Medium confidence with limited data
607        };
608    }
609
610    fn predict_next_allocation(&self) -> Option<f64> {
611        if self.confidence < 0.3 {
612            return None;
613        }
614
615        // Predict based on linear trend
616        let current_time = SystemTime::now()
617            .duration_since(SystemTime::UNIX_EPOCH)
618            .unwrap_or_default()
619            .as_secs_f64();
620
621        let predicted = self.slope * current_time + self.intercept;
622
623        if predicted > 0.0 {
624            Some(predicted)
625        } else {
626            None
627        }
628    }
629}
630
631impl AdaptivePoolManager {
632    fn new(config: PoolOptimizationParams) -> Self {
633        Self {
634            pools: Arc::new(Mutex::new(BTreeMap::new())),
635            config,
636            stats: PoolManagerStats::default(),
637        }
638    }
639
640    fn prepare_for_allocation(&self, predicted_size: f64) {
641        let size_class = self.calculate_size_class(predicted_size as usize);
642        let mut pools = self.pools.lock().expect("lock should not be poisoned");
643
644        pools
645            .entry(size_class)
646            .or_insert_with(|| MemoryPool::new(size_class))
647            .prepare_for_demand();
648    }
649
650    fn calculate_size_class(&self, size: usize) -> usize {
651        // Round up to nearest power of 2
652        let mut class = 64; // Minimum size class
653        while class < size {
654            class *= 2;
655        }
656        class.min(self.config.max_pool_size)
657    }
658
659    fn rebalance_pools(&self) {
660        let mut pools = self.pools.lock().expect("lock should not be poisoned");
661
662        for pool in pools.values_mut() {
663            if pool.should_expand() {
664                pool.expand(&self.config);
665                self.stats.pool_expansions.fetch_add(1, Ordering::Relaxed);
666            } else if pool.should_shrink(&self.config) {
667                pool.shrink(&self.config);
668                self.stats.pool_contractions.fetch_add(1, Ordering::Relaxed);
669            }
670        }
671    }
672
673    fn optimize_based_on_usage(&self) {
674        // Analyze usage patterns and optimize pool sizes
675        let pools = self.pools.lock().expect("lock should not be poisoned");
676
677        for pool in pools.values() {
678            if pool.hit_rate < 0.5 {
679                // Consider reducing this pool size
680                println!(
681                    "Pool size class {} has low hit rate: {:.2}",
682                    pool.size_class, pool.hit_rate
683                );
684            }
685        }
686    }
687}
688
689impl MemoryPool {
690    fn new(size_class: usize) -> Self {
691        Self {
692            size_class,
693            allocated_blocks: 0,
694            free_blocks: 8, // Start with some free blocks
695            total_capacity: 8,
696            hit_rate: 1.0,
697            last_rebalance: SystemTime::now(),
698        }
699    }
700
701    fn prepare_for_demand(&mut self) {
702        if self.free_blocks < 2 {
703            self.free_blocks += 4;
704            self.total_capacity += 4;
705        }
706    }
707
708    fn should_expand(&self) -> bool {
709        self.free_blocks == 0 && self.hit_rate > 0.8
710    }
711
712    fn should_shrink(&self, config: &PoolOptimizationParams) -> bool {
713        let utilization = self.allocated_blocks as f64 / self.total_capacity as f64;
714        utilization < config.shrink_threshold && self.total_capacity > config.min_pool_size
715    }
716
717    fn expand(&mut self, config: &PoolOptimizationParams) {
718        let growth = (self.total_capacity as f64 * (config.growth_factor - 1.0)) as usize;
719        self.free_blocks += growth;
720        self.total_capacity += growth;
721
722        if self.total_capacity > config.max_pool_size {
723            let excess = self.total_capacity - config.max_pool_size;
724            self.free_blocks = self.free_blocks.saturating_sub(excess);
725            self.total_capacity = config.max_pool_size;
726        }
727    }
728
729    fn shrink(&mut self, config: &PoolOptimizationParams) {
730        let reduction = (self.total_capacity as f64 * (1.0 - config.shrink_threshold)) as usize;
731        self.free_blocks = self.free_blocks.saturating_sub(reduction);
732        self.total_capacity = (self.total_capacity - reduction).max(config.min_pool_size);
733    }
734}
735
736/// Export data structure
737#[derive(Debug, Clone, Serialize, Deserialize)]
738pub struct OptimizationExportData {
739    pub config: MemoryOptimizationConfig,
740    pub strategies: MemoryStrategies,
741    pub history: Vec<MemorySnapshot>,
742    pub stats: OptimizationStatsSummary,
743    pub timestamp: SystemTime,
744}
745
746#[derive(Debug, Clone, Serialize, Deserialize)]
747pub struct OptimizationStatsSummary {
748    pub total_optimizations: u64,
749    pub memory_saved_bytes: usize,
750    pub gc_triggers_avoided: u64,
751    pub fragmentation_events_reduced: u64,
752    pub prediction_accuracy_percent: f64,
753}
754
755/// Convenient functions for creating optimizers
756pub fn create_memory_optimizer() -> AdvancedMemoryOptimizer {
757    AdvancedMemoryOptimizer::new()
758}
759
760pub fn create_memory_optimizer_with_aggressive_settings() -> AdvancedMemoryOptimizer {
761    let mut config = MemoryOptimizationConfig::default();
762    config.warning_threshold = 0.6;
763    config.critical_threshold = 0.8;
764    config.check_interval = Duration::from_secs(1);
765
766    AdvancedMemoryOptimizer::with_config(config)
767}
768
769pub fn create_memory_optimizer_for_low_memory() -> AdvancedMemoryOptimizer {
770    let mut config = MemoryOptimizationConfig::default();
771    config.warning_threshold = 0.5;
772    config.critical_threshold = 0.7;
773    config.pool_params.initial_pool_size = 256 * 1024; // 256KB
774    config.pool_params.max_pool_size = 16 * 1024 * 1024; // 16MB
775
776    AdvancedMemoryOptimizer::with_config(config)
777}
778
779#[cfg(test)]
780mod tests {
781    use super::*;
782    use std::thread;
783    use std::time::Duration;
784
785    #[test]
786    fn test_memory_optimizer_creation() {
787        let optimizer = create_memory_optimizer();
788        assert!(optimizer.config.ml_predictions);
789    }
790
791    #[test]
792    fn test_memory_snapshot() {
793        let snapshot = MemorySnapshot {
794            timestamp: SystemTime::now(),
795            total_allocated: 1024,
796            peak_usage: 2048,
797            fragmentation_ratio: 0.5,
798            allocation_rate: 100.0,
799            deallocation_rate: 50.0,
800            gc_pressure: 0.6,
801            operation_context: "test".to_string(),
802        };
803
804        assert_eq!(snapshot.total_allocated, 1024);
805        assert_eq!(snapshot.peak_usage, 2048);
806    }
807
808    #[test]
809    fn test_memory_pool_expansion() {
810        let config = PoolOptimizationParams::default();
811        let mut pool = MemoryPool::new(1024);
812
813        let initial_capacity = pool.total_capacity;
814        pool.expand(&config);
815
816        assert!(pool.total_capacity > initial_capacity);
817    }
818
819    #[test]
820    fn test_predictor_data_points() {
821        let mut predictor = MemoryUsagePredictor::new();
822
823        predictor.add_data_point(DataPoint {
824            timestamp: 1.0,
825            memory_usage: 1024.0,
826            context: "test".to_string(),
827        });
828
829        assert_eq!(predictor.data_points.len(), 1);
830    }
831
832    #[test]
833    fn test_pool_manager_size_class_calculation() {
834        let config = PoolOptimizationParams::default();
835        let manager = AdaptivePoolManager::new(config);
836
837        assert_eq!(manager.calculate_size_class(100), 128);
838        assert_eq!(manager.calculate_size_class(1000), 1024);
839        assert_eq!(manager.calculate_size_class(2000), 2048);
840    }
841
842    #[test]
843    fn test_optimization_stats() {
844        let optimizer = create_memory_optimizer();
845        let stats = optimizer.get_stats();
846
847        assert_eq!(stats.optimizations_performed.load(Ordering::Relaxed), 0);
848        assert!(stats.prediction_accuracy.load(Ordering::Relaxed) > 0);
849    }
850
851    #[test]
852    fn test_export_optimization_data() {
853        let optimizer = create_memory_optimizer();
854        let temp_path = std::env::temp_dir().join("test_optimization_export.json");
855
856        let result = optimizer.export_optimization_data(temp_path.to_str().unwrap());
857        assert!(result.is_ok());
858
859        // Verify file exists
860        assert!(temp_path.exists());
861
862        // Clean up
863        let _ = std::fs::remove_file(temp_path);
864    }
865
866    #[test]
867    fn test_aggressive_optimizer_settings() {
868        let optimizer = create_memory_optimizer_with_aggressive_settings();
869        assert!(optimizer.config.warning_threshold < 0.7);
870        assert!(optimizer.config.critical_threshold < 0.9);
871    }
872
873    #[test]
874    fn test_low_memory_optimizer_settings() {
875        let optimizer = create_memory_optimizer_for_low_memory();
876        assert!(optimizer.config.pool_params.max_pool_size < 128 * 1024 * 1024);
877    }
878}