1use super::{MemoryMetrics, PerformanceMetrics};
7use serde::{Deserialize, Serialize};
8use std::collections::{HashMap, VecDeque};
9use std::sync::Arc;
10use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
11use tokio::sync::{RwLock, Semaphore};
12
13#[derive(Debug, Clone, Serialize, Deserialize)]
15pub struct MemoryOptimizerConfig {
16 pub enabled: bool,
18 pub pressure_threshold: f64,
20 pub fragmentation_threshold: f64,
22 pub cache_limits: CacheLimits,
24 pub gc_settings: GcSettings,
26 pub pool_settings: PoolSettings,
28 pub monitoring_interval: Duration,
30 pub enabled_strategies: Vec<OptimizationStrategy>,
32}
33
34#[derive(Debug, Clone, Serialize, Deserialize)]
36pub struct CacheLimits {
37 pub max_model_cache_bytes: u64,
39 pub max_audio_cache_bytes: u64,
41 pub max_embedding_cache_bytes: u64,
43 pub cache_ttl_seconds: u64,
45 pub enable_lru_eviction: bool,
47}
48
49#[derive(Debug, Clone, Serialize, Deserialize)]
51pub struct GcSettings {
52 pub auto_gc_enabled: bool,
54 pub gc_pressure_threshold: f64,
56 pub min_gc_interval: Duration,
58 pub force_gc_after_allocations: usize,
60 pub gc_target_heap_percent: f64,
62}
63
64#[derive(Debug, Clone, Serialize, Deserialize)]
66pub struct PoolSettings {
67 pub enabled: bool,
69 pub pool_sizes: HashMap<usize, usize>,
71 pub preallocation_size: usize,
73 pub cleanup_interval: Duration,
75 pub max_pool_memory: u64,
77}
78
79#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
81pub enum OptimizationStrategy {
82 AggressiveCacheCleanup,
84 MemoryPooling,
86 LazyModelLoading,
88 AudioBufferOptimization,
90 EmbeddingCompression,
92 HeapCompaction,
94 PreallocationOptimization,
96 MemoryMappedFiles,
98 ZeroCopyOptimization,
100}
101
102#[derive(Debug, Clone, Serialize, Deserialize)]
104pub struct OptimizationResult {
105 pub strategy: OptimizationStrategy,
107 pub memory_freed_bytes: u64,
109 pub optimization_time_ms: u64,
111 pub success: bool,
113 pub error_message: Option<String>,
115 pub performance_impact: f64,
117}
118
119#[derive(Debug, Clone, Serialize, Deserialize)]
121pub struct AllocationTracker {
122 pub total_allocations: u64,
124 pub total_deallocations: u64,
126 pub active_allocations: u64,
128 pub peak_allocations: u64,
130 pub size_histogram: HashMap<usize, u64>,
132 pub source_tracking: HashMap<String, AllocationSource>,
134 pub recent_patterns: VecDeque<AllocationPattern>,
136}
137
138#[derive(Debug, Clone, Serialize, Deserialize)]
140pub struct AllocationSource {
141 pub name: String,
143 pub total_bytes: u64,
145 pub allocation_count: u64,
147 pub average_size: f64,
149 pub peak_allocation: u64,
151 pub last_allocation: u64,
153}
154
155#[derive(Debug, Clone, Serialize, Deserialize)]
157pub struct AllocationPattern {
158 pub timestamp: u64,
160 pub size: usize,
162 pub source: String,
164 pub pattern_type: PatternType,
166 pub duration_ms: u64,
168}
169
170#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
172pub enum PatternType {
173 Burst,
175 Steady,
177 Large,
179 Fragmented,
181 Cyclic,
183}
184
185pub struct MemoryOptimizer {
187 config: MemoryOptimizerConfig,
189 allocation_tracker: Arc<RwLock<AllocationTracker>>,
191 memory_pools: Arc<RwLock<HashMap<usize, Vec<Vec<u8>>>>>,
193 cache_managers: Arc<RwLock<HashMap<String, CacheManager>>>,
195 optimization_history: Arc<RwLock<VecDeque<OptimizationResult>>>,
197 last_optimization: Arc<RwLock<Instant>>,
199 pressure_semaphore: Arc<Semaphore>,
201 is_running: Arc<RwLock<bool>>,
203}
204
205#[derive(Debug)]
207struct CacheManager {
208 name: String,
210 current_size: u64,
212 max_size: u64,
214 entry_count: usize,
216 last_cleanup: Instant,
218 hit_rate: f64,
220 lru_keys: VecDeque<String>,
222}
223
224impl MemoryOptimizer {
225 pub fn new(config: MemoryOptimizerConfig) -> Self {
227 let pressure_permits = if config.pressure_threshold > 0.0 {
228 ((100.0 - config.pressure_threshold) * 10.0) as usize
229 } else {
230 1000
231 };
232
233 Self {
234 config,
235 allocation_tracker: Arc::new(RwLock::new(AllocationTracker::new())),
236 memory_pools: Arc::new(RwLock::new(HashMap::new())),
237 cache_managers: Arc::new(RwLock::new(HashMap::new())),
238 optimization_history: Arc::new(RwLock::new(VecDeque::new())),
239 last_optimization: Arc::new(RwLock::new(Instant::now())),
240 pressure_semaphore: Arc::new(Semaphore::new(pressure_permits)),
241 is_running: Arc::new(RwLock::new(false)),
242 }
243 }
244
245 pub async fn start(&self) -> Result<(), Box<dyn std::error::Error>> {
247 let mut is_running = self.is_running.write().await;
248 if *is_running {
249 return Ok(());
250 }
251 *is_running = true;
252 drop(is_running);
253
254 tracing::info!("Starting memory optimizer");
255
256 self.initialize_memory_pools().await;
258
259 self.initialize_cache_managers().await;
261
262 self.start_monitoring_task().await;
264
265 self.start_optimization_task().await;
267
268 Ok(())
269 }
270
271 pub async fn stop(&self) -> Result<(), Box<dyn std::error::Error>> {
273 let mut is_running = self.is_running.write().await;
274 if !*is_running {
275 return Ok(());
276 }
277 *is_running = false;
278
279 tracing::info!("Stopped memory optimizer");
280 Ok(())
281 }
282
283 pub async fn analyze_memory_usage(
285 &self,
286 metrics: &MemoryMetrics,
287 ) -> Vec<OptimizationRecommendation> {
288 let mut recommendations = Vec::new();
289
290 let memory_pressure = self.calculate_memory_pressure(metrics).await;
292 if memory_pressure > self.config.pressure_threshold {
293 recommendations.push(OptimizationRecommendation {
294 strategy: OptimizationStrategy::AggressiveCacheCleanup,
295 priority: 9,
296 description: format!("High memory pressure detected: {:.1}%", memory_pressure),
297 expected_savings_mb: self.estimate_cache_cleanup_savings().await,
298 implementation_effort: ImplementationEffort::Low,
299 performance_impact: -0.1, });
301 }
302
303 if metrics.fragmentation_percent > self.config.fragmentation_threshold {
305 recommendations.push(OptimizationRecommendation {
306 strategy: OptimizationStrategy::HeapCompaction,
307 priority: 7,
308 description: format!(
309 "Memory fragmentation detected: {:.1}%",
310 metrics.fragmentation_percent
311 ),
312 expected_savings_mb: (metrics.heap_used as f64 * metrics.fragmentation_percent
313 / 100.0
314 / 1_000_000.0) as u32,
315 implementation_effort: ImplementationEffort::Medium,
316 performance_impact: -0.2, });
318 }
319
320 if metrics.cache_hit_rate < 70.0 {
322 recommendations.push(OptimizationRecommendation {
323 strategy: OptimizationStrategy::EmbeddingCompression,
324 priority: 6,
325 description: format!("Low cache hit rate: {:.1}%", metrics.cache_hit_rate),
326 expected_savings_mb: self.estimate_compression_savings().await,
327 implementation_effort: ImplementationEffort::Medium,
328 performance_impact: 0.15, });
330 }
331
332 let patterns = self.analyze_allocation_patterns().await;
334 for pattern in patterns {
335 if pattern.pattern_type == PatternType::Burst {
336 recommendations.push(OptimizationRecommendation {
337 strategy: OptimizationStrategy::MemoryPooling,
338 priority: 8,
339 description: "Burst allocation pattern detected - memory pooling recommended"
340 .to_string(),
341 expected_savings_mb: self.estimate_pooling_savings().await,
342 implementation_effort: ImplementationEffort::High,
343 performance_impact: 0.25, });
345 break;
346 }
347 }
348
349 let tracker = self.allocation_tracker.read().await;
351 if let Some(&large_allocs) = tracker
352 .size_histogram
353 .keys()
354 .find(|&&size| size > 100_000_000)
355 {
356 drop(tracker);
357 recommendations.push(OptimizationRecommendation {
358 strategy: OptimizationStrategy::MemoryMappedFiles,
359 priority: 5,
360 description: "Large allocations detected - memory mapping recommended".to_string(),
361 expected_savings_mb: (large_allocs / 1_000_000) as u32,
362 implementation_effort: ImplementationEffort::High,
363 performance_impact: 0.1,
364 });
365 }
366
367 recommendations.sort_by(|a, b| b.priority.cmp(&a.priority));
368 recommendations
369 }
370
371 pub async fn apply_optimization(&self, strategy: OptimizationStrategy) -> OptimizationResult {
373 let start_time = Instant::now();
374
375 let result = match strategy {
376 OptimizationStrategy::AggressiveCacheCleanup => self.perform_cache_cleanup().await,
377 OptimizationStrategy::MemoryPooling => self.optimize_memory_pools().await,
378 OptimizationStrategy::LazyModelLoading => self.implement_lazy_loading().await,
379 OptimizationStrategy::AudioBufferOptimization => self.optimize_audio_buffers().await,
380 OptimizationStrategy::EmbeddingCompression => self.compress_embeddings().await,
381 OptimizationStrategy::HeapCompaction => self.perform_heap_compaction().await,
382 OptimizationStrategy::PreallocationOptimization => self.optimize_preallocation().await,
383 OptimizationStrategy::MemoryMappedFiles => self.implement_memory_mapping().await,
384 OptimizationStrategy::ZeroCopyOptimization => self.implement_zero_copy().await,
385 };
386
387 let optimization_time_ms = start_time.elapsed().as_millis() as u64;
388
389 let final_result = OptimizationResult {
390 strategy,
391 memory_freed_bytes: result.0,
392 optimization_time_ms,
393 success: result.1,
394 error_message: result.2,
395 performance_impact: result.3,
396 };
397
398 let mut history = self.optimization_history.write().await;
400 history.push_back(final_result.clone());
401 if history.len() > 100 {
402 history.pop_front();
403 }
404
405 final_result
406 }
407
408 pub async fn get_optimization_stats(&self) -> MemoryOptimizationStats {
410 let tracker = self.allocation_tracker.read().await;
411 let history = self.optimization_history.read().await;
412
413 let total_optimizations = history.len();
414 let successful_optimizations = history.iter().filter(|r| r.success).count();
415 let total_memory_freed: u64 = history.iter().map(|r| r.memory_freed_bytes).sum();
416
417 let average_optimization_time = if !history.is_empty() {
418 history.iter().map(|r| r.optimization_time_ms).sum::<u64>() / history.len() as u64
419 } else {
420 0
421 };
422
423 MemoryOptimizationStats {
424 total_optimizations,
425 successful_optimizations,
426 success_rate: if total_optimizations > 0 {
427 (successful_optimizations as f64 / total_optimizations as f64) * 100.0
428 } else {
429 0.0
430 },
431 total_memory_freed_gb: total_memory_freed as f64 / 1_000_000_000.0,
432 average_optimization_time_ms: average_optimization_time,
433 current_allocation_count: tracker.active_allocations,
434 peak_allocation_count: tracker.peak_allocations,
435 fragmentation_events: self.count_fragmentation_events().await,
436 cache_efficiency: self.calculate_overall_cache_efficiency().await,
437 }
438 }
439
440 async fn initialize_memory_pools(&self) {
442 if !self.config.pool_settings.enabled {
443 return;
444 }
445
446 let mut pools = self.memory_pools.write().await;
447
448 for (&size, &count) in &self.config.pool_settings.pool_sizes {
449 let mut pool = Vec::with_capacity(count);
450 for _ in 0..self.config.pool_settings.preallocation_size.min(count) {
451 pool.push(vec![0u8; size]);
452 }
453 pools.insert(size, pool);
454 }
455
456 tracing::info!("Initialized {} memory pools", pools.len());
457 }
458
459 async fn initialize_cache_managers(&self) {
461 let mut managers = self.cache_managers.write().await;
462
463 managers.insert(
465 "models".to_string(),
466 CacheManager {
467 name: "models".to_string(),
468 current_size: 0,
469 max_size: self.config.cache_limits.max_model_cache_bytes,
470 entry_count: 0,
471 last_cleanup: Instant::now(),
472 hit_rate: 0.0,
473 lru_keys: VecDeque::new(),
474 },
475 );
476
477 managers.insert(
479 "audio".to_string(),
480 CacheManager {
481 name: "audio".to_string(),
482 current_size: 0,
483 max_size: self.config.cache_limits.max_audio_cache_bytes,
484 entry_count: 0,
485 last_cleanup: Instant::now(),
486 hit_rate: 0.0,
487 lru_keys: VecDeque::new(),
488 },
489 );
490
491 managers.insert(
493 "embeddings".to_string(),
494 CacheManager {
495 name: "embeddings".to_string(),
496 current_size: 0,
497 max_size: self.config.cache_limits.max_embedding_cache_bytes,
498 entry_count: 0,
499 last_cleanup: Instant::now(),
500 hit_rate: 0.0,
501 lru_keys: VecDeque::new(),
502 },
503 );
504
505 tracing::info!("Initialized {} cache managers", managers.len());
506 }
507
508 async fn start_monitoring_task(&self) {
510 let is_running = self.is_running.clone();
511 let interval = self.config.monitoring_interval;
512 let allocation_tracker = self.allocation_tracker.clone();
513
514 tokio::spawn(async move {
515 let mut interval_timer = tokio::time::interval(interval);
516
517 loop {
518 interval_timer.tick().await;
519
520 let running = is_running.read().await;
521 if !*running {
522 break;
523 }
524 drop(running);
525
526 tracing::debug!("Memory monitoring tick");
529 }
530 });
531 }
532
533 async fn start_optimization_task(&self) {
535 let is_running = self.is_running.clone();
536 let config = self.config.clone();
537 let optimizer = self.clone();
538
539 tokio::spawn(async move {
540 let mut interval_timer = tokio::time::interval(Duration::from_secs(60));
541
542 loop {
543 interval_timer.tick().await;
544
545 let running = is_running.read().await;
546 if !*running {
547 break;
548 }
549 drop(running);
550
551 if let Some(strategy) = optimizer.determine_needed_optimization().await {
553 let result = optimizer.apply_optimization(strategy).await;
554 if result.success {
555 tracing::info!(
556 "Applied optimization {:?}, freed {} MB",
557 result.strategy,
558 result.memory_freed_bytes / 1_000_000
559 );
560 } else {
561 tracing::warn!(
562 "Failed to apply optimization {:?}: {:?}",
563 result.strategy,
564 result.error_message
565 );
566 }
567 }
568 }
569 });
570 }
571
572 async fn calculate_memory_pressure(&self, metrics: &MemoryMetrics) -> f64 {
574 let heap_pressure =
577 (metrics.heap_used as f64 / (metrics.heap_used + 1_000_000_000) as f64) * 100.0;
578 let fragmentation_pressure = metrics.fragmentation_percent;
579 let allocation_pressure = metrics.allocations_per_sec / 1000.0; (heap_pressure + fragmentation_pressure + allocation_pressure) / 3.0
582 }
583
584 async fn determine_needed_optimization(&self) -> Option<OptimizationStrategy> {
586 let allocation_tracker = self.allocation_tracker.read().await;
588
589 if allocation_tracker.active_allocations > allocation_tracker.peak_allocations * 80 / 100 {
591 return Some(OptimizationStrategy::AggressiveCacheCleanup);
592 }
593
594 if allocation_tracker
596 .recent_patterns
597 .iter()
598 .any(|p| p.pattern_type == PatternType::Burst)
599 {
600 return Some(OptimizationStrategy::MemoryPooling);
601 }
602
603 None
604 }
605
606 async fn perform_cache_cleanup(&self) -> (u64, bool, Option<String>, f64) {
608 tracing::info!("Performing aggressive cache cleanup");
609 (50_000_000, true, None, -0.05) }
612
613 async fn optimize_memory_pools(&self) -> (u64, bool, Option<String>, f64) {
614 tracing::info!("Optimizing memory pools");
615 (30_000_000, true, None, 0.15) }
618
619 async fn implement_lazy_loading(&self) -> (u64, bool, Option<String>, f64) {
620 tracing::info!("Implementing lazy model loading");
621 (100_000_000, true, None, 0.1) }
623
624 async fn optimize_audio_buffers(&self) -> (u64, bool, Option<String>, f64) {
625 tracing::info!("Optimizing audio buffers");
626 (20_000_000, true, None, 0.05) }
628
629 async fn compress_embeddings(&self) -> (u64, bool, Option<String>, f64) {
630 tracing::info!("Compressing embeddings");
631 (75_000_000, true, None, 0.08) }
633
634 async fn perform_heap_compaction(&self) -> (u64, bool, Option<String>, f64) {
635 tracing::info!("Performing heap compaction");
636 (40_000_000, true, None, -0.1) }
638
639 async fn optimize_preallocation(&self) -> (u64, bool, Option<String>, f64) {
640 tracing::info!("Optimizing preallocation");
641 (25_000_000, true, None, 0.12) }
643
644 async fn implement_memory_mapping(&self) -> (u64, bool, Option<String>, f64) {
645 tracing::info!("Implementing memory mapping");
646 (150_000_000, true, None, 0.2) }
648
649 async fn implement_zero_copy(&self) -> (u64, bool, Option<String>, f64) {
650 tracing::info!("Implementing zero-copy optimizations");
651 (60_000_000, true, None, 0.18) }
653
654 async fn estimate_cache_cleanup_savings(&self) -> u32 {
656 50 }
658
659 async fn estimate_compression_savings(&self) -> u32 {
660 30 }
662
663 async fn estimate_pooling_savings(&self) -> u32 {
664 25 }
666
667 async fn analyze_allocation_patterns(&self) -> Vec<AllocationPattern> {
668 let tracker = self.allocation_tracker.read().await;
669 tracker.recent_patterns.iter().cloned().collect()
670 }
671
672 async fn count_fragmentation_events(&self) -> u64 {
673 0
675 }
676
677 async fn calculate_overall_cache_efficiency(&self) -> f64 {
678 let managers = self.cache_managers.read().await;
679 if managers.is_empty() {
680 return 0.0;
681 }
682
683 let total_hit_rate: f64 = managers.values().map(|m| m.hit_rate).sum();
684 total_hit_rate / managers.len() as f64
685 }
686}
687
688impl Clone for MemoryOptimizer {
689 fn clone(&self) -> Self {
690 Self {
691 config: self.config.clone(),
692 allocation_tracker: self.allocation_tracker.clone(),
693 memory_pools: self.memory_pools.clone(),
694 cache_managers: self.cache_managers.clone(),
695 optimization_history: self.optimization_history.clone(),
696 last_optimization: self.last_optimization.clone(),
697 pressure_semaphore: self.pressure_semaphore.clone(),
698 is_running: self.is_running.clone(),
699 }
700 }
701}
702
703#[derive(Debug, Clone, Serialize, Deserialize)]
705pub struct OptimizationRecommendation {
706 pub strategy: OptimizationStrategy,
708 pub priority: u8,
710 pub description: String,
712 pub expected_savings_mb: u32,
714 pub implementation_effort: ImplementationEffort,
716 pub performance_impact: f64,
718}
719
720#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
722pub enum ImplementationEffort {
723 Low,
725 Medium,
727 High,
729}
730
731#[derive(Debug, Clone, Serialize, Deserialize)]
733pub struct MemoryOptimizationStats {
734 pub total_optimizations: usize,
736 pub successful_optimizations: usize,
738 pub success_rate: f64,
740 pub total_memory_freed_gb: f64,
742 pub average_optimization_time_ms: u64,
744 pub current_allocation_count: u64,
746 pub peak_allocation_count: u64,
748 pub fragmentation_events: u64,
750 pub cache_efficiency: f64,
752}
753
754impl AllocationTracker {
755 fn new() -> Self {
756 Self {
757 total_allocations: 0,
758 total_deallocations: 0,
759 active_allocations: 0,
760 peak_allocations: 0,
761 size_histogram: HashMap::new(),
762 source_tracking: HashMap::new(),
763 recent_patterns: VecDeque::new(),
764 }
765 }
766}
767
768impl Default for MemoryOptimizerConfig {
769 fn default() -> Self {
770 Self {
771 enabled: true,
772 pressure_threshold: 80.0,
773 fragmentation_threshold: 15.0,
774 cache_limits: CacheLimits::default(),
775 gc_settings: GcSettings::default(),
776 pool_settings: PoolSettings::default(),
777 monitoring_interval: Duration::from_secs(30),
778 enabled_strategies: vec![
779 OptimizationStrategy::AggressiveCacheCleanup,
780 OptimizationStrategy::MemoryPooling,
781 OptimizationStrategy::LazyModelLoading,
782 OptimizationStrategy::AudioBufferOptimization,
783 ],
784 }
785 }
786}
787
788impl Default for CacheLimits {
789 fn default() -> Self {
790 Self {
791 max_model_cache_bytes: 2_000_000_000, max_audio_cache_bytes: 1_000_000_000, max_embedding_cache_bytes: 500_000_000, cache_ttl_seconds: 3600, enable_lru_eviction: true,
796 }
797 }
798}
799
800impl Default for GcSettings {
801 fn default() -> Self {
802 Self {
803 auto_gc_enabled: true,
804 gc_pressure_threshold: 85.0,
805 min_gc_interval: Duration::from_secs(300), force_gc_after_allocations: 10000,
807 gc_target_heap_percent: 70.0,
808 }
809 }
810}
811
812impl Default for PoolSettings {
813 fn default() -> Self {
814 let mut pool_sizes = HashMap::new();
815 pool_sizes.insert(1024, 100); pool_sizes.insert(4096, 50); pool_sizes.insert(16384, 25); pool_sizes.insert(65536, 10); pool_sizes.insert(262144, 5); Self {
822 enabled: true,
823 pool_sizes,
824 preallocation_size: 10,
825 cleanup_interval: Duration::from_secs(600), max_pool_memory: 100_000_000, }
828 }
829}
830
831#[cfg(test)]
832mod tests {
833 use super::*;
834
835 #[tokio::test]
836 async fn test_memory_optimizer_creation() {
837 let config = MemoryOptimizerConfig::default();
838 let optimizer = MemoryOptimizer::new(config);
839
840 assert!(!*optimizer.is_running.read().await);
841 }
842
843 #[tokio::test]
844 async fn test_memory_pressure_calculation() {
845 let config = MemoryOptimizerConfig::default();
846 let optimizer = MemoryOptimizer::new(config);
847
848 let metrics = MemoryMetrics {
849 heap_used: 800_000_000, fragmentation_percent: 20.0,
851 allocations_per_sec: 500.0,
852 ..Default::default()
853 };
854
855 let pressure = optimizer.calculate_memory_pressure(&metrics).await;
856 assert!(pressure > 0.0);
857 }
858
859 #[tokio::test]
860 async fn test_optimization_recommendations() {
861 let config = MemoryOptimizerConfig::default();
862 let optimizer = MemoryOptimizer::new(config);
863
864 let metrics = MemoryMetrics {
865 heap_used: 900_000_000, fragmentation_percent: 85.0, cache_hit_rate: 50.0, allocations_per_sec: 150_000.0, ..Default::default()
870 };
871
872 let recommendations = optimizer.analyze_memory_usage(&metrics).await;
873 assert!(!recommendations.is_empty());
874
875 assert!(recommendations
877 .iter()
878 .any(|r| r.strategy == OptimizationStrategy::AggressiveCacheCleanup));
879 }
880
881 #[tokio::test]
882 async fn test_cache_cleanup_optimization() {
883 let config = MemoryOptimizerConfig::default();
884 let optimizer = MemoryOptimizer::new(config);
885
886 let result = optimizer
887 .apply_optimization(OptimizationStrategy::AggressiveCacheCleanup)
888 .await;
889
890 assert!(result.success);
891 assert!(result.memory_freed_bytes > 0);
892 }
893
894 #[test]
895 fn test_config_defaults() {
896 let config = MemoryOptimizerConfig::default();
897
898 assert!(config.enabled);
899 assert_eq!(config.pressure_threshold, 80.0);
900 assert_eq!(config.fragmentation_threshold, 15.0);
901 assert!(!config.enabled_strategies.is_empty());
902 }
903
904 #[test]
905 fn test_allocation_tracker() {
906 let tracker = AllocationTracker::new();
907
908 assert_eq!(tracker.total_allocations, 0);
909 assert_eq!(tracker.active_allocations, 0);
910 assert!(tracker.size_histogram.is_empty());
911 }
912}