1use crate::error::{Result, TextError};
7use std::collections::HashMap;
8use std::sync::{Arc, Mutex, RwLock};
9use std::time::{Duration, Instant};
10
11#[derive(Debug)]
13pub struct AdvancedPerformanceMonitor {
14 metricshistory: Arc<RwLock<Vec<PerformanceDataPoint>>>,
16 realtime_aggregator: Arc<Mutex<RealtimeAggregator>>,
18 alert_thresholds: PerformanceThresholds,
20 resource_monitor: Arc<Mutex<SystemResourceMonitor>>,
22 optimization_engine: Arc<Mutex<OptimizationEngine>>,
24}
25
26#[derive(Debug, Clone)]
28pub struct PerformanceDataPoint {
29 pub timestamp: Instant,
31 pub operationtype: String,
33 pub processing_time: Duration,
35 pub itemsprocessed: usize,
37 pub memory_usage: usize,
39 pub cpu_utilization: f64,
41 pub gpu_utilization: f64,
43 pub cache_hit_rate: f64,
45 pub custom_metrics: HashMap<String, f64>,
47}
48
49#[derive(Debug)]
51struct RealtimeAggregator {
52 current_operation: Option<Instant>,
54 running_stats: HashMap<String, RunningStatistics>,
56 alert_counts: HashMap<String, usize>,
58}
59
60#[derive(Debug, Clone)]
62struct RunningStatistics {
63 count: usize,
65 sum: f64,
67 sum_squared: f64,
69 min: f64,
71 max: f64,
73 moving_average: f64,
75}
76
77#[derive(Debug, Clone)]
79pub struct PerformanceThresholds {
80 pub max_processing_time_ms: u64,
82 pub min_throughput: f64,
84 pub max_memory_usage_mb: usize,
86 pub max_cpu_utilization: f64,
88 pub min_cache_hit_rate: f64,
90}
91
92#[derive(Debug)]
94struct SystemResourceMonitor {
95 memory_tracker: MemoryTracker,
97 cpu_tracker: CpuUsageTracker,
99 #[allow(dead_code)]
101 gpu_tracker: Option<GpuUsageTracker>,
102 network_tracker: NetworkTracker,
104}
105
106#[derive(Debug)]
108struct MemoryTracker {
109 peak_usage: usize,
111 #[allow(dead_code)]
113 current_usage: usize,
114 #[allow(dead_code)]
116 allocations: Vec<AllocationEvent>,
117}
118
119#[derive(Debug, Clone)]
121struct AllocationEvent {
122 #[allow(dead_code)]
124 timestamp: Instant,
125 #[allow(dead_code)]
127 size: usize,
128 #[allow(dead_code)]
130 allocation_type: String,
131}
132
133#[derive(Debug)]
135struct CpuUsageTracker {
136 #[allow(dead_code)]
138 usage_samples: Vec<CpuUsageSample>,
139 load_average: f64,
141}
142
143#[derive(Debug, Clone)]
145struct CpuUsageSample {
146 #[allow(dead_code)]
148 timestamp: Instant,
149 #[allow(dead_code)]
151 utilization: f64,
152}
153
154#[derive(Debug)]
156struct GpuUsageTracker {
157 #[allow(dead_code)]
159 utilization_samples: Vec<GpuUsageSample>,
160 #[allow(dead_code)]
162 memory_usage: usize,
163}
164
165#[derive(Debug, Clone)]
167struct GpuUsageSample {
168 #[allow(dead_code)]
170 timestamp: Instant,
171 #[allow(dead_code)]
173 utilization: f64,
174 #[allow(dead_code)]
176 memory_utilization: f64,
177}
178
179#[derive(Debug)]
181struct NetworkTracker {
182 bytes_sent: usize,
184 bytes_received: usize,
186 #[allow(dead_code)]
188 latency_samples: Vec<NetworkLatencySample>,
189}
190
191#[derive(Debug, Clone)]
193struct NetworkLatencySample {
194 #[allow(dead_code)]
196 timestamp: Instant,
197 #[allow(dead_code)]
199 latency_ms: f64,
200}
201
202#[derive(Debug)]
204struct OptimizationEngine {
205 patterndatabase: Vec<PerformancePattern>,
207 current_recommendations: Vec<OptimizationRecommendation>,
209 optimizationhistory: Vec<OptimizationApplication>,
211}
212
213#[derive(Debug, Clone)]
215struct PerformancePattern {
216 #[allow(dead_code)]
218 id: String,
219 #[allow(dead_code)]
221 description: String,
222 conditions: Vec<PerformanceCondition>,
224 recommendations: Vec<OptimizationRecommendation>,
226}
227
228#[derive(Debug, Clone)]
230struct PerformanceCondition {
231 metric: String,
233 operator: ComparisonOperator,
235 threshold: f64,
237}
238
239#[derive(Debug, Clone)]
241#[allow(dead_code)]
242enum ComparisonOperator {
243 GreaterThan,
245 LessThan,
247 EqualTo,
249 GreaterOrEqual,
251 LessOrEqual,
253}
254
255#[derive(Debug, Clone)]
257pub struct OptimizationRecommendation {
258 pub id: String,
260 pub category: String,
262 pub recommendation: String,
264 pub impact_estimate: f64,
266 pub complexity: u8,
268 pub prerequisites: Vec<String>,
270}
271
272#[derive(Debug, Clone)]
274pub struct OptimizationApplication {
275 #[allow(dead_code)]
277 timestamp: Instant,
278 #[allow(dead_code)]
280 optimization: OptimizationRecommendation,
281 #[allow(dead_code)]
283 performance_before: PerformanceSnapshot,
284 #[allow(dead_code)]
286 performance_after: Option<PerformanceSnapshot>,
287}
288
289#[derive(Debug, Clone)]
291struct PerformanceSnapshot {
292 #[allow(dead_code)]
294 avg_processing_time: Duration,
295 #[allow(dead_code)]
297 avg_throughput: f64,
298 #[allow(dead_code)]
300 avg_memory_usage: usize,
301 #[allow(dead_code)]
303 avg_cpu_utilization: f64,
304}
305
306impl Default for PerformanceThresholds {
307 fn default() -> Self {
308 Self {
309 max_processing_time_ms: 1000, min_throughput: 100.0, max_memory_usage_mb: 8192, max_cpu_utilization: 90.0, min_cache_hit_rate: 0.8, }
315 }
316}
317
318impl AdvancedPerformanceMonitor {
319 pub fn new() -> Self {
321 Self {
322 metricshistory: Arc::new(RwLock::new(Vec::new())),
323 realtime_aggregator: Arc::new(Mutex::new(RealtimeAggregator::new())),
324 alert_thresholds: PerformanceThresholds::default(),
325 resource_monitor: Arc::new(Mutex::new(SystemResourceMonitor::new())),
326 optimization_engine: Arc::new(Mutex::new(OptimizationEngine::new())),
327 }
328 }
329
330 pub fn with_thresholds(thresholds: PerformanceThresholds) -> Self {
332 Self {
333 metricshistory: Arc::new(RwLock::new(Vec::new())),
334 realtime_aggregator: Arc::new(Mutex::new(RealtimeAggregator::new())),
335 alert_thresholds: thresholds,
336 resource_monitor: Arc::new(Mutex::new(SystemResourceMonitor::new())),
337 optimization_engine: Arc::new(Mutex::new(OptimizationEngine::new())),
338 }
339 }
340
341 pub fn start_operation(&self, operationtype: &str) -> Result<OperationMonitor> {
343 let mut aggregator = self.realtime_aggregator.lock().unwrap();
344 aggregator.start_operation(operationtype)?;
345
346 Ok(OperationMonitor {
347 operationtype: operationtype.to_string(),
348 start_time: Instant::now(),
349 monitor: self,
350 })
351 }
352
353 pub fn record_performance(&self, datapoint: PerformanceDataPoint) -> Result<()> {
355 let mut history = self.metricshistory.write().unwrap();
357 history.push(datapoint.clone());
358
359 if history.len() > 10000 {
361 history.drain(0..1000); }
363 drop(history);
364
365 let mut aggregator = self.realtime_aggregator.lock().unwrap();
367 aggregator.update_statistics(&datapoint)?;
368 drop(aggregator);
369
370 self.check_alerts(&datapoint)?;
372
373 let mut optimizer = self.optimization_engine.lock().unwrap();
375 optimizer.update_recommendations(&datapoint)?;
376 drop(optimizer);
377
378 Ok(())
379 }
380
381 pub fn get_performance_summary(&self) -> Result<PerformanceSummary> {
383 let history = self.metricshistory.read().unwrap();
384 let aggregator = self.realtime_aggregator.lock().unwrap();
385
386 let recent_window = std::cmp::min(100, history.len());
387 let recentdata = if recent_window > 0 {
388 &history[history.len() - recent_window..]
389 } else {
390 &[]
391 };
392
393 let summary = PerformanceSummary {
394 total_operations: history.len(),
395 recent_avg_processing_time: Self::calculate_avg_processing_time(recentdata),
396 recent_avg_throughput: Self::calculate_avg_throughput(recentdata),
397 recent_avg_memory_usage: Self::calculate_avg_memory_usage(recentdata),
398 cache_hit_rate: Self::calculate_avg_cache_hit_rate(recentdata),
399 active_alerts: aggregator.get_active_alerts(),
400 optimization_opportunities: self.get_optimization_opportunities()?,
401 };
402
403 Ok(summary)
404 }
405
406 pub fn get_optimization_opportunities(&self) -> Result<Vec<OptimizationRecommendation>> {
408 let optimizer = self.optimization_engine.lock().unwrap();
409 Ok(optimizer.current_recommendations.clone())
410 }
411
412 pub fn apply_optimization(&self, optimizationid: &str) -> Result<()> {
414 let mut optimizer = self.optimization_engine.lock().unwrap();
415 optimizer.apply_optimization(optimizationid)?;
416 Ok(())
417 }
418
419 pub fn generate_performance_report(&self) -> Result<DetailedPerformanceReport> {
421 let summary = self.get_performance_summary()?;
423
424 let history = self.metricshistory.read().unwrap();
426 let resource_monitor = self.resource_monitor.lock().unwrap();
427 let optimization_engine = self.optimization_engine.lock().unwrap();
428
429 let report = DetailedPerformanceReport {
430 summary,
431 historical_trends: Self::analyze_trends(&history),
432 resource_utilization: resource_monitor.get_utilization_summary(),
433 bottleneck_analysis: Self::identify_bottlenecks(&history),
434 optimizationhistory: optimization_engine.optimizationhistory.clone(),
435 recommendations: optimization_engine.current_recommendations.clone(),
436 };
437
438 Ok(report)
439 }
440
441 fn check_alerts(&self, datapoint: &PerformanceDataPoint) -> Result<()> {
443 let mut aggregator = self.realtime_aggregator.lock().unwrap();
444
445 if datapoint.processing_time.as_millis()
446 > self.alert_thresholds.max_processing_time_ms as u128
447 {
448 aggregator.increment_alert("high_processing_time");
449 }
450
451 let throughput = datapoint.itemsprocessed as f64 / datapoint.processing_time.as_secs_f64();
452 if throughput < self.alert_thresholds.min_throughput {
453 aggregator.increment_alert("low_throughput");
454 }
455
456 if datapoint.memory_usage > self.alert_thresholds.max_memory_usage_mb * 1024 * 1024 {
457 aggregator.increment_alert("high_memory_usage");
458 }
459
460 if datapoint.cpu_utilization > self.alert_thresholds.max_cpu_utilization {
461 aggregator.increment_alert("high_cpu_utilization");
462 }
463
464 if datapoint.cache_hit_rate < self.alert_thresholds.min_cache_hit_rate {
465 aggregator.increment_alert("low_cache_hit_rate");
466 }
467
468 Ok(())
469 }
470
471 fn calculate_avg_processing_time(data: &[PerformanceDataPoint]) -> Duration {
472 if data.is_empty() {
473 return Duration::from_millis(0);
474 }
475
476 let total_ms: u128 = data.iter().map(|d| d.processing_time.as_millis()).sum();
477 Duration::from_millis((total_ms / data.len() as u128) as u64)
478 }
479
480 fn calculate_avg_throughput(data: &[PerformanceDataPoint]) -> f64 {
481 if data.is_empty() {
482 return 0.0;
483 }
484
485 let total_throughput: f64 = data
486 .iter()
487 .map(|d| d.itemsprocessed as f64 / d.processing_time.as_secs_f64())
488 .sum();
489 total_throughput / data.len() as f64
490 }
491
492 fn calculate_avg_memory_usage(data: &[PerformanceDataPoint]) -> usize {
493 if data.is_empty() {
494 return 0;
495 }
496
497 data.iter().map(|d| d.memory_usage).sum::<usize>() / data.len()
498 }
499
500 fn calculate_avg_cache_hit_rate(data: &[PerformanceDataPoint]) -> f64 {
501 if data.is_empty() {
502 return 0.0;
503 }
504
505 data.iter().map(|d| d.cache_hit_rate).sum::<f64>() / data.len() as f64
506 }
507
508 fn analyze_trends(history: &[PerformanceDataPoint]) -> TrendAnalysis {
509 TrendAnalysis {
510 processing_time_trend: Self::calculate_trend(
511 &history
512 .iter()
513 .map(|d| d.processing_time.as_millis() as f64)
514 .collect::<Vec<_>>(),
515 ),
516 throughput_trend: Self::calculate_trend(
517 &history
518 .iter()
519 .map(|d| d.itemsprocessed as f64 / d.processing_time.as_secs_f64())
520 .collect::<Vec<_>>(),
521 ),
522 memory_usage_trend: Self::calculate_trend(
523 &history
524 .iter()
525 .map(|d| d.memory_usage as f64)
526 .collect::<Vec<_>>(),
527 ),
528 }
529 }
530
531 fn calculate_trend(values: &[f64]) -> TrendDirection {
532 if values.len() < 2 {
533 return TrendDirection::Stable;
534 }
535
536 let mid_point = values.len() / 2;
537 let first_half_avg = values[..mid_point].iter().sum::<f64>() / mid_point as f64;
538 let second_half_avg =
539 values[mid_point..].iter().sum::<f64>() / (values.len() - mid_point) as f64;
540
541 let change_rate = (second_half_avg - first_half_avg) / first_half_avg;
542
543 if change_rate > 0.1 {
544 TrendDirection::Increasing
545 } else if change_rate < -0.1 {
546 TrendDirection::Decreasing
547 } else {
548 TrendDirection::Stable
549 }
550 }
551
552 fn identify_bottlenecks(history: &[PerformanceDataPoint]) -> Vec<BottleneckAnalysis> {
553 let mut bottlenecks = Vec::new();
554
555 let avg_processing_time = Self::calculate_avg_processing_time(history);
557 if avg_processing_time.as_millis() > 500 {
558 bottlenecks.push(BottleneckAnalysis {
559 component: "Processing Time".to_string(),
560 severity: if avg_processing_time.as_millis() > 1000 {
561 "High"
562 } else {
563 "Medium"
564 }
565 .to_string(),
566 description: format!(
567 "Average processing time is {}ms",
568 avg_processing_time.as_millis()
569 ),
570 recommendations: vec![
571 "Enable SIMD optimizations".to_string(),
572 "Increase parallel processing".to_string(),
573 "Optimize memory allocation".to_string(),
574 ],
575 });
576 }
577
578 let avg_memory = Self::calculate_avg_memory_usage(history);
580 if avg_memory > 4 * 1024 * 1024 * 1024 {
581 bottlenecks.push(BottleneckAnalysis {
583 component: "Memory Usage".to_string(),
584 severity: "High".to_string(),
585 description: {
586 let avg_memory_mb = avg_memory / (1024 * 1024);
587 format!("Average memory usage is {avg_memory_mb} MB")
588 },
589 recommendations: vec![
590 "Implement memory pooling".to_string(),
591 "Use streaming processing".to_string(),
592 "Optimize data structures".to_string(),
593 ],
594 });
595 }
596
597 bottlenecks
598 }
599}
600
601pub struct OperationMonitor<'a> {
603 operationtype: String,
604 start_time: Instant,
605 monitor: &'a AdvancedPerformanceMonitor,
606}
607
608impl<'a> OperationMonitor<'a> {
609 pub fn complete(self, itemsprocessed: usize) -> Result<()> {
611 let processing_time = self.start_time.elapsed();
612
613 let data_point = PerformanceDataPoint {
615 timestamp: self.start_time,
616 operationtype: self.operationtype,
617 processing_time,
618 itemsprocessed,
619 memory_usage: 0, cpu_utilization: 0.0, gpu_utilization: 0.0, cache_hit_rate: 0.9, custom_metrics: HashMap::new(),
624 };
625
626 self.monitor.record_performance(data_point)
627 }
628}
629
630#[derive(Debug)]
632pub struct PerformanceSummary {
633 pub total_operations: usize,
635 pub recent_avg_processing_time: Duration,
637 pub recent_avg_throughput: f64,
639 pub recent_avg_memory_usage: usize,
641 pub cache_hit_rate: f64,
643 pub active_alerts: Vec<String>,
645 pub optimization_opportunities: Vec<OptimizationRecommendation>,
647}
648
649#[derive(Debug)]
651pub struct DetailedPerformanceReport {
652 pub summary: PerformanceSummary,
654 pub historical_trends: TrendAnalysis,
656 pub resource_utilization: ResourceUtilizationSummary,
658 pub bottleneck_analysis: Vec<BottleneckAnalysis>,
660 pub optimizationhistory: Vec<OptimizationApplication>,
662 pub recommendations: Vec<OptimizationRecommendation>,
664}
665
666#[derive(Debug)]
668pub struct TrendAnalysis {
669 pub processing_time_trend: TrendDirection,
671 pub throughput_trend: TrendDirection,
673 pub memory_usage_trend: TrendDirection,
675}
676
677#[derive(Debug)]
679pub enum TrendDirection {
680 Increasing,
682 Decreasing,
684 Stable,
686}
687
688#[derive(Debug)]
690pub struct ResourceUtilizationSummary {
691 pub avg_cpu_utilization: f64,
693 pub peak_memory_usage: usize,
695 pub network_io: NetworkIOSummary,
697}
698
699#[derive(Debug)]
701pub struct NetworkIOSummary {
702 pub bytes_sent: usize,
704 pub bytes_received: usize,
706 pub avg_latency_ms: f64,
708}
709
710#[derive(Debug)]
712pub struct BottleneckAnalysis {
713 pub component: String,
715 pub severity: String,
717 pub description: String,
719 pub recommendations: Vec<String>,
721}
722
723impl RealtimeAggregator {
725 fn new() -> Self {
726 Self {
727 current_operation: None,
728 running_stats: HashMap::new(),
729 alert_counts: HashMap::new(),
730 }
731 }
732
733 fn start_operation(&mut self, _operationtype: &str) -> Result<()> {
734 self.current_operation = Some(Instant::now());
735 Ok(())
736 }
737
738 fn update_statistics(&mut self, datapoint: &PerformanceDataPoint) -> Result<()> {
739 let key = &datapoint.operationtype;
740 let stats = self
741 .running_stats
742 .entry(key.clone())
743 .or_insert_with(RunningStatistics::new);
744 stats.update(datapoint.processing_time.as_millis() as f64);
745 Ok(())
746 }
747
748 fn increment_alert(&mut self, alerttype: &str) {
749 *self.alert_counts.entry(alerttype.to_string()).or_insert(0) += 1;
750 }
751
752 fn get_active_alerts(&self) -> Vec<String> {
753 self.alert_counts.keys().cloned().collect()
754 }
755}
756
757impl RunningStatistics {
758 fn new() -> Self {
759 Self {
760 count: 0,
761 sum: 0.0,
762 sum_squared: 0.0,
763 min: f64::MAX,
764 max: f64::MIN,
765 moving_average: 0.0,
766 }
767 }
768
769 fn update(&mut self, value: f64) {
770 self.count += 1;
771 self.sum += value;
772 self.sum_squared += value * value;
773 self.min = self.min.min(value);
774 self.max = self.max.max(value);
775
776 let alpha = 0.1;
778 self.moving_average = alpha * value + (1.0 - alpha) * self.moving_average;
779 }
780}
781
782impl SystemResourceMonitor {
783 fn new() -> Self {
784 Self {
785 memory_tracker: MemoryTracker::new(),
786 cpu_tracker: CpuUsageTracker::new(),
787 gpu_tracker: None,
788 network_tracker: NetworkTracker::new(),
789 }
790 }
791
792 fn get_utilization_summary(&self) -> ResourceUtilizationSummary {
793 ResourceUtilizationSummary {
794 avg_cpu_utilization: self.cpu_tracker.load_average,
795 peak_memory_usage: self.memory_tracker.peak_usage,
796 network_io: NetworkIOSummary {
797 bytes_sent: self.network_tracker.bytes_sent,
798 bytes_received: self.network_tracker.bytes_received,
799 avg_latency_ms: 5.0, },
801 }
802 }
803}
804
805impl MemoryTracker {
806 fn new() -> Self {
807 Self {
808 peak_usage: 0,
809 current_usage: 0,
810 allocations: Vec::new(),
811 }
812 }
813}
814
815impl CpuUsageTracker {
816 fn new() -> Self {
817 Self {
818 usage_samples: Vec::new(),
819 load_average: 0.0,
820 }
821 }
822}
823
824impl NetworkTracker {
825 fn new() -> Self {
826 Self {
827 bytes_sent: 0,
828 bytes_received: 0,
829 latency_samples: Vec::new(),
830 }
831 }
832}
833
834impl OptimizationEngine {
835 fn new() -> Self {
836 Self {
837 patterndatabase: Self::initialize_patterns(),
838 current_recommendations: Vec::new(),
839 optimizationhistory: Vec::new(),
840 }
841 }
842
843 fn initialize_patterns() -> Vec<PerformancePattern> {
844 vec![PerformancePattern {
845 id: "high_processing_time".to_string(),
846 description: "Processing time is consistently high".to_string(),
847 conditions: vec![PerformanceCondition {
848 metric: "processing_time_ms".to_string(),
849 operator: ComparisonOperator::GreaterThan,
850 threshold: 1000.0,
851 }],
852 recommendations: vec![
853 OptimizationRecommendation {
854 id: "enable_simd".to_string(),
855 category: "Performance".to_string(),
856 recommendation: "Enable SIMD optimizations for string operations".to_string(),
857 impact_estimate: 0.3,
858 complexity: 2,
859 prerequisites: vec!["SIMD-capable hardware".to_string()],
860 },
861 OptimizationRecommendation {
862 id: "increase_parallelism".to_string(),
863 category: "Performance".to_string(),
864 recommendation: "Increase parallel processing threads".to_string(),
865 impact_estimate: 0.25,
866 complexity: 1,
867 prerequisites: vec!["Multi-core CPU".to_string()],
868 },
869 ],
870 }]
871 }
872
873 fn update_recommendations(&mut self, datapoint: &PerformanceDataPoint) -> Result<()> {
874 for pattern in &self.patterndatabase {
876 if self.matches_pattern(datapoint, pattern) {
877 for recommendation in &pattern.recommendations {
879 if !self
880 .current_recommendations
881 .iter()
882 .any(|r| r.id == recommendation.id)
883 {
884 self.current_recommendations.push(recommendation.clone());
885 }
886 }
887 }
888 }
889 Ok(())
890 }
891
892 fn matches_pattern(
893 &self,
894 data_point: &PerformanceDataPoint,
895 pattern: &PerformancePattern,
896 ) -> bool {
897 pattern.conditions.iter().all(|condition| {
898 let value = match condition.metric.as_str() {
899 "processing_time_ms" => data_point.processing_time.as_millis() as f64,
900 "cpu_utilization" => data_point.cpu_utilization,
901 "memory_usage_mb" => data_point.memory_usage as f64 / (1024.0 * 1024.0),
902 "cache_hit_rate" => data_point.cache_hit_rate,
903 _ => return false,
904 };
905
906 match condition.operator {
907 ComparisonOperator::GreaterThan => value > condition.threshold,
908 ComparisonOperator::LessThan => value < condition.threshold,
909 ComparisonOperator::EqualTo => (value - condition.threshold).abs() < 0.001,
910 ComparisonOperator::GreaterOrEqual => value >= condition.threshold,
911 ComparisonOperator::LessOrEqual => value <= condition.threshold,
912 }
913 })
914 }
915
916 fn apply_optimization(&mut self, optimizationid: &str) -> Result<()> {
917 if let Some(optimization) = self
918 .current_recommendations
919 .iter()
920 .find(|r| r.id == optimizationid)
921 {
922 let application = OptimizationApplication {
923 timestamp: Instant::now(),
924 optimization: optimization.clone(),
925 performance_before: PerformanceSnapshot {
926 avg_processing_time: Duration::from_millis(100),
927 avg_throughput: 1000.0,
928 avg_memory_usage: 1024 * 1024 * 1024,
929 avg_cpu_utilization: 75.0,
930 },
931 performance_after: None, };
933
934 self.optimizationhistory.push(application);
935
936 self.current_recommendations
938 .retain(|r| r.id != optimizationid);
939
940 Ok(())
941 } else {
942 Err(TextError::InvalidInput(format!(
943 "Optimization not found: {optimizationid}"
944 )))
945 }
946 }
947}
948
949impl Default for AdvancedPerformanceMonitor {
950 fn default() -> Self {
951 Self::new()
952 }
953}
954
955#[cfg(test)]
956mod tests {
957 use super::*;
958
959 #[test]
960 fn test_performance_monitor_creation() {
961 let monitor = AdvancedPerformanceMonitor::new();
962 let summary = monitor.get_performance_summary().unwrap();
963 assert_eq!(summary.total_operations, 0);
964 }
965
966 #[test]
967 fn test_operation_monitoring() {
968 let monitor = AdvancedPerformanceMonitor::new();
969 let op_monitor = monitor.start_operation("test_operation").unwrap();
970
971 std::thread::sleep(Duration::from_millis(10));
973
974 op_monitor.complete(100).unwrap();
975
976 let summary = monitor.get_performance_summary().unwrap();
977 assert_eq!(summary.total_operations, 1);
978 }
979
980 #[test]
981 fn test_performance_thresholds() {
982 let thresholds = PerformanceThresholds {
983 max_processing_time_ms: 500,
984 min_throughput: 200.0,
985 max_memory_usage_mb: 4096,
986 max_cpu_utilization: 80.0,
987 min_cache_hit_rate: 0.9,
988 };
989
990 let monitor = AdvancedPerformanceMonitor::with_thresholds(thresholds);
991
992 let data_point = PerformanceDataPoint {
994 timestamp: Instant::now(),
995 operationtype: "test".to_string(),
996 processing_time: Duration::from_millis(1000), itemsprocessed: 10,
998 memory_usage: 6 * 1024 * 1024 * 1024, cpu_utilization: 95.0, gpu_utilization: 50.0,
1001 cache_hit_rate: 0.7, custom_metrics: HashMap::new(),
1003 };
1004
1005 monitor.record_performance(data_point).unwrap();
1006
1007 let summary = monitor.get_performance_summary().unwrap();
1008 assert!(!summary.active_alerts.is_empty());
1009 }
1010
1011 #[test]
1012 fn test_optimization_recommendations() {
1013 let monitor = AdvancedPerformanceMonitor::new();
1014
1015 let data_point = PerformanceDataPoint {
1017 timestamp: Instant::now(),
1018 operationtype: "slow_operation".to_string(),
1019 processing_time: Duration::from_millis(2000), itemsprocessed: 50,
1021 memory_usage: 1024 * 1024 * 1024, cpu_utilization: 80.0,
1023 gpu_utilization: 0.0,
1024 cache_hit_rate: 0.9,
1025 custom_metrics: HashMap::new(),
1026 };
1027
1028 monitor.record_performance(data_point).unwrap();
1029
1030 let recommendations = monitor.get_optimization_opportunities().unwrap();
1031 assert!(!recommendations.is_empty());
1032
1033 if let Some(first_rec) = recommendations.first() {
1035 monitor.apply_optimization(&first_rec.id).unwrap();
1036 }
1037 }
1038
1039 #[test]
1040 fn test_trend_analysis() {
1041 let monitor = AdvancedPerformanceMonitor::new();
1042
1043 for i in 1..=10 {
1045 let data_point = PerformanceDataPoint {
1046 timestamp: Instant::now(),
1047 operationtype: "trend_test".to_string(),
1048 processing_time: Duration::from_millis(100 + i * 10), itemsprocessed: 100,
1050 memory_usage: 1024 * 1024 * i as usize, cpu_utilization: 50.0 + i as f64,
1052 gpu_utilization: 0.0,
1053 cache_hit_rate: 0.9,
1054 custom_metrics: HashMap::new(),
1055 };
1056
1057 monitor.record_performance(data_point).unwrap();
1058 }
1059
1060 let report = monitor.generate_performance_report().unwrap();
1061 assert!(matches!(
1062 report.historical_trends.processing_time_trend,
1063 TrendDirection::Increasing
1064 ));
1065 assert!(matches!(
1066 report.historical_trends.memory_usage_trend,
1067 TrendDirection::Increasing
1068 ));
1069 }
1070}