1use crate::core::types::{MemoryStats, TrackingResult};
10use crate::core::ultra_fast_tracker::{SamplingStats, UltraFastSamplingConfig, UltraFastTracker};
11use serde::{Deserialize, Serialize};
12use std::collections::{HashMap, VecDeque};
13use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
14use std::sync::{Arc, RwLock};
15use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
16
17#[derive(Debug, Clone, Serialize, Deserialize)]
19pub struct AllocationPattern {
20 pub avg_allocation_size: f64,
22 pub size_std_dev: f64,
24 pub allocation_frequency: f64,
26 pub size_distribution: HashMap<u32, u64>,
28 pub peak_allocation_rate: f64,
30 pub memory_pressure: f64,
32 pub thread_contention: f64,
34}
35
36impl Default for AllocationPattern {
37 fn default() -> Self {
38 Self {
39 avg_allocation_size: 0.0,
40 size_std_dev: 0.0,
41 allocation_frequency: 0.0,
42 size_distribution: HashMap::new(),
43 peak_allocation_rate: 0.0,
44 memory_pressure: 0.0,
45 thread_contention: 0.0,
46 }
47 }
48}
49
50#[derive(Debug, Clone, Serialize, Deserialize)]
52pub struct OptimizationRecommendations {
53 pub recommended_config: UltraFastSamplingConfig,
55 pub expected_improvement: f64,
57 pub confidence: f64,
59 pub actions: Vec<OptimizationAction>,
61 pub memory_overhead_reduction: f64,
63 pub cpu_overhead_reduction: f64,
65}
66
67#[derive(Debug, Clone, Serialize, Deserialize)]
68pub enum OptimizationAction {
69 AdjustSampling {
71 size_threshold: usize,
72 new_rate: f32,
73 reason: String,
74 },
75 ToggleFeature {
77 feature: String,
78 enable: bool,
79 reason: String,
80 },
81 AdjustBuffers { new_size: usize, reason: String },
83 SwitchAlgorithm { algorithm: String, reason: String },
85}
86
87#[derive(Debug, Clone)]
89pub struct PerformanceMetrics {
90 pub cpu_overhead_percent: f64,
92 pub memory_overhead_bytes: u64,
94 pub tracking_latency_ns: u64,
96 pub throughput_ops_per_sec: f64,
98 pub data_quality_score: f64,
100 pub system_health_score: f64,
102}
103
104pub struct PerformanceOptimizer {
106 tracker: Arc<UltraFastTracker>,
108 pattern_analyzer: Arc<RwLock<PatternAnalyzer>>,
110 metrics_collector: Arc<MetricsCollector>,
112 optimization_engine: Arc<OptimizationEngine>,
114 current_config: Arc<RwLock<UltraFastSamplingConfig>>,
116 optimization_enabled: AtomicBool,
118}
119
120impl PerformanceOptimizer {
121 pub fn new() -> Self {
123 let config = UltraFastSamplingConfig::default();
124 let tracker = Arc::new(UltraFastTracker::with_config(config.clone()));
125
126 Self {
127 tracker,
128 pattern_analyzer: Arc::new(RwLock::new(PatternAnalyzer::new())),
129 metrics_collector: Arc::new(MetricsCollector::new()),
130 optimization_engine: Arc::new(OptimizationEngine::new()),
131 current_config: Arc::new(RwLock::new(config)),
132 optimization_enabled: AtomicBool::new(true),
133 }
134 }
135
136 pub fn track_allocation(&self, ptr: usize, size: usize, type_name: &str) -> TrackingResult<()> {
138 let start_time = Instant::now();
139
140 if let Ok(mut analyzer) = self.pattern_analyzer.write() {
142 analyzer.record_allocation(size, type_name);
143 }
144
145 let result = self.tracker.track_allocation(ptr, size, type_name);
147
148 let duration = start_time.elapsed();
150 self.metrics_collector.record_operation_latency(duration);
151
152 if self.optimization_enabled.load(Ordering::Relaxed) {
154 self.consider_optimization();
155 }
156
157 result
158 }
159
160 pub fn track_deallocation(&self, ptr: usize) -> TrackingResult<()> {
162 let start_time = Instant::now();
163
164 let result = self.tracker.track_deallocation(ptr);
165
166 let duration = start_time.elapsed();
167 self.metrics_collector.record_operation_latency(duration);
168
169 result
170 }
171
172 pub fn get_performance_metrics(&self) -> PerformanceMetrics {
174 self.metrics_collector.get_current_metrics()
175 }
176
177 pub fn get_allocation_patterns(&self) -> AllocationPattern {
179 self.pattern_analyzer
180 .read()
181 .map(|analyzer| analyzer.get_current_pattern())
182 .unwrap_or_default()
183 }
184
185 pub fn get_optimization_recommendations(&self) -> OptimizationRecommendations {
187 let patterns = self.get_allocation_patterns();
188 let metrics = self.get_performance_metrics();
189 let current_config = self.current_config.read().unwrap().clone();
190
191 self.optimization_engine
192 .generate_recommendations(patterns, metrics, current_config)
193 }
194
195 pub fn apply_optimizations(
197 &self,
198 recommendations: &OptimizationRecommendations,
199 ) -> TrackingResult<()> {
200 if let Ok(mut config) = self.current_config.write() {
202 *config = recommendations.recommended_config.clone();
203 }
204
205 for action in &recommendations.actions {
207 self.apply_optimization_action(action)?;
208 }
209
210 self.metrics_collector.record_optimization_event();
212
213 Ok(())
214 }
215
216 pub fn set_optimization_enabled(&self, enabled: bool) {
218 self.optimization_enabled.store(enabled, Ordering::Relaxed);
219 }
220
221 pub fn get_stats(&self) -> TrackingResult<MemoryStats> {
223 self.tracker.get_stats()
224 }
225
226 pub fn get_sampling_stats(&self) -> SamplingStats {
228 self.tracker.get_sampling_stats()
229 }
230
231 pub fn force_optimization(&self) -> TrackingResult<()> {
233 let recommendations = self.get_optimization_recommendations();
234 if recommendations.confidence > 0.5 {
235 self.apply_optimizations(&recommendations)?;
236 }
237 Ok(())
238 }
239
240 fn consider_optimization(&self) {
242 if self.metrics_collector.get_operation_count() % 10000 == 0 {
244 if let Ok(recommendations) =
245 std::panic::catch_unwind(|| self.get_optimization_recommendations())
246 {
247 if recommendations.confidence > 0.7 {
248 let _ = self.apply_optimizations(&recommendations);
249 }
250 }
251 }
252 }
253
254 fn apply_optimization_action(&self, action: &OptimizationAction) -> TrackingResult<()> {
256 match action {
257 OptimizationAction::AdjustSampling {
258 size_threshold,
259 new_rate,
260 ..
261 } => {
262 if let Ok(mut config) = self.current_config.write() {
263 if *size_threshold >= 1024 {
264 config.medium_sample_rate = *new_rate;
265 } else {
266 config.small_sample_rate = *new_rate;
267 }
268 }
269 }
270 OptimizationAction::AdjustBuffers { new_size, .. } => {
271 if let Ok(mut config) = self.current_config.write() {
272 config.max_records_per_thread = *new_size;
273 }
274 }
275 OptimizationAction::ToggleFeature {
276 feature, enable, ..
277 } => {
278 if feature == "simd" {
279 if let Ok(mut config) = self.current_config.write() {
280 config.enable_simd = *enable;
281 }
282 }
283 }
284 OptimizationAction::SwitchAlgorithm { .. } => {
285 }
287 }
288 Ok(())
289 }
290}
291
292struct PatternAnalyzer {
294 recent_sizes: VecDeque<usize>,
296 size_buckets: HashMap<u32, u64>,
298 allocation_times: VecDeque<u64>,
300 type_frequency: HashMap<String, u64>,
302 window_size: usize,
304}
305
306impl PatternAnalyzer {
307 fn new() -> Self {
308 Self {
309 recent_sizes: VecDeque::with_capacity(10000),
310 size_buckets: HashMap::new(),
311 allocation_times: VecDeque::with_capacity(10000),
312 type_frequency: HashMap::new(),
313 window_size: 10000,
314 }
315 }
316
317 fn record_allocation(&mut self, size: usize, type_name: &str) {
318 let now = SystemTime::now()
319 .duration_since(UNIX_EPOCH)
320 .unwrap_or_default()
321 .as_micros() as u64;
322
323 self.recent_sizes.push_back(size);
325 if self.recent_sizes.len() > self.window_size {
326 self.recent_sizes.pop_front();
327 }
328
329 self.allocation_times.push_back(now);
331 if self.allocation_times.len() > self.window_size {
332 self.allocation_times.pop_front();
333 }
334
335 let bucket = Self::size_to_bucket(size);
337 *self.size_buckets.entry(bucket).or_insert(0) += 1;
338
339 *self
341 .type_frequency
342 .entry(type_name.to_string())
343 .or_insert(0) += 1;
344 }
345
346 fn get_current_pattern(&self) -> AllocationPattern {
347 let avg_size = if !self.recent_sizes.is_empty() {
348 self.recent_sizes.iter().sum::<usize>() as f64 / self.recent_sizes.len() as f64
349 } else {
350 0.0
351 };
352
353 let size_variance = if self.recent_sizes.len() > 1 {
354 let mean = avg_size;
355 let variance = self
356 .recent_sizes
357 .iter()
358 .map(|&size| {
359 let diff = size as f64 - mean;
360 diff * diff
361 })
362 .sum::<f64>()
363 / (self.recent_sizes.len() - 1) as f64;
364 variance.sqrt()
365 } else {
366 0.0
367 };
368
369 let frequency = if self.allocation_times.len() >= 2 {
370 let time_span =
371 self.allocation_times.back().unwrap() - self.allocation_times.front().unwrap();
372 if time_span > 0 {
373 (self.allocation_times.len() as f64 * 1_000_000.0) / time_span as f64
374 } else {
375 0.0
376 }
377 } else {
378 0.0
379 };
380
381 AllocationPattern {
382 avg_allocation_size: avg_size,
383 size_std_dev: size_variance,
384 allocation_frequency: frequency,
385 size_distribution: self.size_buckets.clone(),
386 peak_allocation_rate: frequency * 1.5, memory_pressure: self.calculate_memory_pressure(),
388 thread_contention: 0.1, }
390 }
391
392 fn size_to_bucket(size: usize) -> u32 {
393 if size == 0 {
395 return 0;
396 }
397 let log_size = (size as f64).log2() as u32;
398 log_size.min(31) }
400
401 fn calculate_memory_pressure(&self) -> f64 {
402 let avg_size = if !self.recent_sizes.is_empty() {
404 self.recent_sizes.iter().sum::<usize>() as f64 / self.recent_sizes.len() as f64
405 } else {
406 0.0
407 };
408
409 let frequency = self.allocation_times.len() as f64;
410 let pressure = (avg_size * frequency) / 1_000_000.0; pressure.min(1.0)
412 }
413}
414
415struct MetricsCollector {
417 operation_latencies: RwLock<VecDeque<u64>>,
419 operation_count: AtomicU64,
421 memory_overhead: AtomicU64,
423 cpu_time_ns: AtomicU64,
425 last_optimization: AtomicU64,
427}
428
429impl MetricsCollector {
430 fn new() -> Self {
431 Self {
432 operation_latencies: RwLock::new(VecDeque::with_capacity(10000)),
433 operation_count: AtomicU64::new(0),
434 memory_overhead: AtomicU64::new(0),
435 cpu_time_ns: AtomicU64::new(0),
436 last_optimization: AtomicU64::new(0),
437 }
438 }
439
440 fn record_operation_latency(&self, duration: Duration) {
441 let latency_ns = duration.as_nanos() as u64;
442
443 self.operation_count.fetch_add(1, Ordering::Relaxed);
444 self.cpu_time_ns.fetch_add(latency_ns, Ordering::Relaxed);
445
446 if let Ok(mut latencies) = self.operation_latencies.write() {
447 latencies.push_back(latency_ns);
448 if latencies.len() > 10000 {
449 latencies.pop_front();
450 }
451 }
452 }
453
454 fn record_optimization_event(&self) {
455 let now = SystemTime::now()
456 .duration_since(UNIX_EPOCH)
457 .unwrap_or_default()
458 .as_secs();
459 self.last_optimization.store(now, Ordering::Relaxed);
460 }
461
462 fn get_current_metrics(&self) -> PerformanceMetrics {
463 let avg_latency = if let Ok(latencies) = self.operation_latencies.read() {
464 if !latencies.is_empty() {
465 latencies.iter().sum::<u64>() / latencies.len() as u64
466 } else {
467 0
468 }
469 } else {
470 0
471 };
472
473 let operations = self.operation_count.load(Ordering::Relaxed);
474 let cpu_time = self.cpu_time_ns.load(Ordering::Relaxed);
475
476 let throughput = if cpu_time > 0 {
477 (operations as f64 * 1_000_000_000.0) / cpu_time as f64
478 } else {
479 0.0
480 };
481
482 PerformanceMetrics {
483 cpu_overhead_percent: 5.0, memory_overhead_bytes: self.memory_overhead.load(Ordering::Relaxed),
485 tracking_latency_ns: avg_latency,
486 throughput_ops_per_sec: throughput,
487 data_quality_score: 0.95, system_health_score: 0.9,
489 }
490 }
491
492 fn get_operation_count(&self) -> u64 {
493 self.operation_count.load(Ordering::Relaxed)
494 }
495}
496
497struct OptimizationEngine;
499
500impl OptimizationEngine {
501 fn new() -> Self {
502 Self
503 }
504
505 fn generate_recommendations(
506 &self,
507 patterns: AllocationPattern,
508 metrics: PerformanceMetrics,
509 current_config: UltraFastSamplingConfig,
510 ) -> OptimizationRecommendations {
511 let mut actions = Vec::new();
512 let mut confidence = 0.5;
513
514 if patterns.avg_allocation_size < 512.0 && current_config.small_sample_rate > 0.001 {
518 actions.push(OptimizationAction::AdjustSampling {
519 size_threshold: 512,
520 new_rate: 0.001,
521 reason: "Small allocations detected, reducing sampling overhead".to_string(),
522 });
523 confidence += 0.2;
524 }
525
526 if patterns.allocation_frequency > 1000.0 && current_config.max_records_per_thread < 20000 {
528 actions.push(OptimizationAction::AdjustBuffers {
529 new_size: 20000,
530 reason: "High allocation frequency, increasing buffer size".to_string(),
531 });
532 confidence += 0.15;
533 }
534
535 if metrics.cpu_overhead_percent > 10.0 {
537 actions.push(OptimizationAction::AdjustSampling {
538 size_threshold: 1024,
539 new_rate: current_config.medium_sample_rate * 0.5,
540 reason: "High CPU overhead detected, reducing sampling".to_string(),
541 });
542 confidence += 0.25;
543 }
544
545 if !current_config.enable_simd && cfg!(target_feature = "avx2") {
547 actions.push(OptimizationAction::ToggleFeature {
548 feature: "simd".to_string(),
549 enable: true,
550 reason: "SIMD support detected, enabling optimizations".to_string(),
551 });
552 confidence += 0.1;
553 }
554
555 let mut optimized_config = current_config;
556
557 for action in &actions {
559 match action {
560 OptimizationAction::AdjustSampling {
561 size_threshold,
562 new_rate,
563 ..
564 } => {
565 if *size_threshold >= 1024 {
566 optimized_config.medium_sample_rate = *new_rate;
567 } else {
568 optimized_config.small_sample_rate = *new_rate;
569 }
570 }
571 OptimizationAction::AdjustBuffers { new_size, .. } => {
572 optimized_config.max_records_per_thread = *new_size;
573 }
574 OptimizationAction::ToggleFeature {
575 feature, enable, ..
576 } => {
577 if feature == "simd" {
578 optimized_config.enable_simd = *enable;
579 }
580 }
581 _ => {}
582 }
583 }
584
585 OptimizationRecommendations {
586 recommended_config: optimized_config,
587 expected_improvement: confidence * 30.0, confidence: confidence.min(1.0),
589 actions,
590 memory_overhead_reduction: 15.0,
591 cpu_overhead_reduction: 25.0,
592 }
593 }
594}
595
596impl Default for PerformanceOptimizer {
597 fn default() -> Self {
598 Self::new()
599 }
600}
601
602#[cfg(test)]
603mod tests {
604 use super::*;
605
606 #[test]
607 fn test_performance_optimizer_basic() {
608 let optimizer = PerformanceOptimizer::new();
609
610 optimizer
612 .track_allocation(0x1000, 1024, "Vec<i32>")
613 .unwrap();
614 optimizer.track_allocation(0x2000, 512, "String").unwrap();
615
616 let stats = optimizer.get_stats().unwrap();
617 assert_eq!(stats.total_allocations, 2);
618
619 let metrics = optimizer.get_performance_metrics();
620 assert!(metrics.throughput_ops_per_sec >= 0.0);
621 }
622
623 #[test]
624 fn test_pattern_analysis() {
625 let optimizer = PerformanceOptimizer::new();
626
627 for i in 0..100 {
629 optimizer
630 .track_allocation(0x1000 + i, 1024, "TestType")
631 .unwrap();
632 }
633
634 let patterns = optimizer.get_allocation_patterns();
635 assert!(patterns.avg_allocation_size > 0.0);
636 assert!(patterns.allocation_frequency >= 0.0);
637 }
638
639 #[test]
640 fn test_optimization_recommendations() {
641 let optimizer = PerformanceOptimizer::new();
642
643 for i in 0..1000 {
645 optimizer
646 .track_allocation(0x1000 + i, 64, "SmallAlloc")
647 .unwrap();
648 }
649
650 let recommendations = optimizer.get_optimization_recommendations();
651 assert!(recommendations.confidence >= 0.5);
652 assert!(!recommendations.actions.is_empty());
653 }
654
655 #[test]
656 fn test_pattern_analyzer() {
657 let mut analyzer = PatternAnalyzer::new();
658
659 analyzer.record_allocation(1024, "Vec<i32>");
660 analyzer.record_allocation(512, "String");
661 analyzer.record_allocation(1024, "HashMap");
662
663 let pattern = analyzer.get_current_pattern();
664 assert!(pattern.avg_allocation_size > 0.0);
665 }
666}