1use crate::EvaluationError;
8use serde::{Deserialize, Serialize};
9use std::collections::{HashMap, VecDeque};
10use std::sync::{Arc, Mutex};
11use std::time::{Duration, Instant};
12use tokio::sync::RwLock;
13
14#[derive(Debug, Clone, Serialize, Deserialize)]
16pub struct PerformanceMonitorConfig {
17 pub max_history_size: usize,
19 pub sampling_interval_ms: u64,
21 pub slow_operation_threshold_ms: u64,
23 pub monitor_memory: bool,
25 pub monitor_cpu: bool,
27 pub detailed_metric_timing: bool,
29}
30
31impl Default for PerformanceMonitorConfig {
32 fn default() -> Self {
33 Self {
34 max_history_size: 1000,
35 sampling_interval_ms: 100,
36 slow_operation_threshold_ms: 1000,
37 monitor_memory: true,
38 monitor_cpu: true,
39 detailed_metric_timing: true,
40 }
41 }
42}
43
44#[derive(Debug, Clone, Serialize, Deserialize)]
46pub struct PerformanceMeasurement {
47 pub operation: String,
49 pub start_time: chrono::DateTime<chrono::Utc>,
51 pub duration_ms: u64,
53 pub memory_before_bytes: Option<u64>,
55 pub memory_after_bytes: Option<u64>,
57 pub cpu_usage_percent: Option<f32>,
59 pub audio_buffer_size: Option<usize>,
61 pub sample_rate: Option<u32>,
63 pub metadata: HashMap<String, String>,
65}
66
67#[derive(Debug, Clone, Serialize, Deserialize)]
69pub struct PerformanceStats {
70 pub operation: String,
72 pub measurement_count: usize,
74 pub total_duration_ms: u64,
76 pub avg_duration_ms: f64,
78 pub min_duration_ms: u64,
80 pub max_duration_ms: u64,
82 pub std_dev_duration_ms: f64,
84 pub p95_duration_ms: u64,
86 pub p99_duration_ms: u64,
88 pub avg_memory_usage_mb: Option<f64>,
90 pub avg_cpu_usage_percent: Option<f32>,
92 pub ops_per_second: f64,
94}
95
96#[derive(Debug, Clone, Serialize, Deserialize)]
98pub struct OptimizationRecommendation {
99 pub category: String,
101 pub severity: u8,
103 pub description: String,
105 pub recommendation: String,
107 pub expected_improvement: String,
109 pub complexity: String,
111}
112
113#[derive(Debug, Clone, Serialize, Deserialize)]
115pub struct PerformanceAlert {
116 pub alert_type: PerformanceAlertType,
118 pub timestamp: chrono::DateTime<chrono::Utc>,
120 pub operation: String,
122 pub message: String,
124 pub current_value: f64,
126 pub threshold: f64,
128}
129
130#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
132pub enum PerformanceAlertType {
133 SlowOperation,
135 HighMemoryUsage,
137 HighCpuUsage,
139 PerformanceDegradation,
141 MemoryLeak,
143}
144
145pub struct PerformanceMonitor {
147 config: PerformanceMonitorConfig,
148 measurements: Arc<Mutex<VecDeque<PerformanceMeasurement>>>,
149 stats_cache: Arc<RwLock<HashMap<String, PerformanceStats>>>,
150 alerts: Arc<Mutex<VecDeque<PerformanceAlert>>>,
151 baseline_stats: Arc<RwLock<HashMap<String, PerformanceStats>>>,
152}
153
154impl PerformanceMonitor {
155 pub fn new(config: PerformanceMonitorConfig) -> Self {
157 Self {
158 config,
159 measurements: Arc::new(Mutex::new(VecDeque::new())),
160 stats_cache: Arc::new(RwLock::new(HashMap::new())),
161 alerts: Arc::new(Mutex::new(VecDeque::new())),
162 baseline_stats: Arc::new(RwLock::new(HashMap::new())),
163 }
164 }
165
166 pub fn start_operation(&self, operation: &str) -> OperationTimer {
168 OperationTimer::new(operation.to_string(), self.config.clone())
169 }
170
171 pub async fn record_measurement(&self, measurement: PerformanceMeasurement) {
173 {
175 let mut measurements = self.measurements.lock().unwrap();
176 measurements.push_back(measurement.clone());
177
178 while measurements.len() > self.config.max_history_size {
180 measurements.pop_front();
181 }
182 }
183
184 self.update_stats_cache(&measurement).await;
186
187 self.check_performance_alerts(&measurement).await;
189 }
190
191 pub async fn get_stats(&self, operation: &str) -> Option<PerformanceStats> {
193 let stats_cache = self.stats_cache.read().await;
194 stats_cache.get(operation).cloned()
195 }
196
197 pub async fn get_all_stats(&self) -> HashMap<String, PerformanceStats> {
199 let stats_cache = self.stats_cache.read().await;
200 stats_cache.clone()
201 }
202
203 pub fn get_recent_alerts(&self, limit: usize) -> Vec<PerformanceAlert> {
205 let alerts = self.alerts.lock().unwrap();
206 alerts.iter().rev().take(limit).cloned().collect()
207 }
208
209 pub async fn generate_recommendations(&self) -> Vec<OptimizationRecommendation> {
211 let mut recommendations = Vec::new();
212 let stats = self.get_all_stats().await;
213
214 for (operation, stat) in &stats {
215 if stat.avg_duration_ms > self.config.slow_operation_threshold_ms as f64 {
217 recommendations.push(OptimizationRecommendation {
218 category: String::from("Performance"),
219 severity: 7,
220 description: format!(
221 "Operation '{}' has high average duration ({:.2}ms)",
222 operation, stat.avg_duration_ms
223 ),
224 recommendation: String::from(
225 "Consider optimizing the algorithm or using parallel processing",
226 ),
227 expected_improvement: String::from("30-50% reduction in processing time"),
228 complexity: String::from("Medium"),
229 });
230 }
231
232 if stat.std_dev_duration_ms > stat.avg_duration_ms * 0.5 {
234 recommendations.push(OptimizationRecommendation {
235 category: String::from("Consistency"),
236 severity: 5,
237 description: format!(
238 "Operation '{}' has inconsistent timing (std dev: {:.2}ms)",
239 operation, stat.std_dev_duration_ms
240 ),
241 recommendation: "Investigate variable load factors and consider caching"
242 .to_string(),
243 expected_improvement: String::from("More predictable performance"),
244 complexity: String::from("Low"),
245 });
246 }
247
248 if stat.ops_per_second < 1.0 {
250 recommendations.push(OptimizationRecommendation {
251 category: String::from("Throughput"),
252 severity: 6,
253 description: format!(
254 "Operation '{}' has low throughput ({:.2} ops/sec)",
255 operation, stat.ops_per_second
256 ),
257 recommendation: "Consider batch processing or algorithm optimization"
258 .to_string(),
259 expected_improvement: String::from("2-10x increase in throughput"),
260 complexity: String::from("High"),
261 });
262 }
263
264 if let Some(avg_memory) = stat.avg_memory_usage_mb {
266 if avg_memory > 100.0 {
267 recommendations.push(OptimizationRecommendation {
268 category: String::from("Memory"),
269 severity: 4,
270 description: format!(
271 "Operation '{}' uses high memory ({:.2} MB)",
272 operation, avg_memory
273 ),
274 recommendation: "Consider streaming processing or memory pooling"
275 .to_string(),
276 expected_improvement: String::from("50-70% reduction in memory usage"),
277 complexity: String::from("Medium"),
278 });
279 }
280 }
281 }
282
283 recommendations
284 }
285
286 pub async fn create_report(&self) -> String {
288 let mut report = String::new();
289
290 report.push_str("# VoiRS Evaluation Performance Report\n\n");
291
292 let stats = self.get_all_stats().await;
293 let recommendations = self.generate_recommendations().await;
294 let recent_alerts = self.get_recent_alerts(10);
295
296 report.push_str("## Performance Overview\n\n");
298 if stats.is_empty() {
299 report.push_str("No performance data available.\n\n");
300 } else {
301 report
302 .push_str("| Operation | Avg Duration | Ops/Sec | P95 Duration | Memory Usage |\n");
303 report.push_str(
304 "|-----------|--------------|---------|--------------|---------------|\n",
305 );
306
307 for (operation, stat) in &stats {
308 let memory_str = stat
309 .avg_memory_usage_mb
310 .map(|m| format!("{:.1} MB", m))
311 .unwrap_or_else(|| String::from("N/A"));
312
313 report.push_str(&format!(
314 "| {} | {:.2}ms | {:.2} | {}ms | {} |\n",
315 operation,
316 stat.avg_duration_ms,
317 stat.ops_per_second,
318 stat.p95_duration_ms,
319 memory_str
320 ));
321 }
322 report.push_str("\n");
323 }
324
325 if !recent_alerts.is_empty() {
327 report.push_str("## Recent Alerts\n\n");
328 for alert in &recent_alerts {
329 let icon = match alert.alert_type {
330 PerformanceAlertType::SlowOperation => "🐌",
331 PerformanceAlertType::HighMemoryUsage => "🧠",
332 PerformanceAlertType::HighCpuUsage => "⚡",
333 PerformanceAlertType::PerformanceDegradation => "📉",
334 PerformanceAlertType::MemoryLeak => "🔴",
335 };
336
337 report.push_str(&format!(
338 "- {} **{}**: {} ({})\n",
339 icon,
340 alert.operation,
341 alert.message,
342 alert.timestamp.format("%Y-%m-%d %H:%M:%S")
343 ));
344 }
345 report.push_str("\n");
346 }
347
348 if !recommendations.is_empty() {
350 report.push_str("## Optimization Recommendations\n\n");
351
352 let mut sorted_recommendations = recommendations;
354 sorted_recommendations.sort_by(|a, b| b.severity.cmp(&a.severity));
355
356 for (i, rec) in sorted_recommendations.iter().enumerate() {
357 let priority = match rec.severity {
358 8..=10 => "🔴 High",
359 5..=7 => "🟡 Medium",
360 1..=4 => "🟢 Low",
361 _ => "⚪ Unknown",
362 };
363
364 report.push_str(&format!(
365 "### {}. {} - {}\n\n",
366 i + 1,
367 rec.category,
368 priority
369 ));
370 report.push_str(&format!("**Issue:** {}\n\n", rec.description));
371 report.push_str(&format!("**Recommendation:** {}\n\n", rec.recommendation));
372 report.push_str(&format!(
373 "**Expected Improvement:** {}\n\n",
374 rec.expected_improvement
375 ));
376 report.push_str(&format!(
377 "**Implementation Complexity:** {}\n\n",
378 rec.complexity
379 ));
380 }
381 }
382
383 report
384 }
385
386 pub async fn set_baseline(&self, operation: &str, stats: PerformanceStats) {
388 let mut baseline_stats = self.baseline_stats.write().await;
389 baseline_stats.insert(operation.to_string(), stats);
390 }
391
392 pub async fn clear_data(&self) {
394 {
395 let mut measurements = self.measurements.lock().unwrap();
396 measurements.clear();
397 }
398
399 {
400 let mut stats_cache = self.stats_cache.write().await;
401 stats_cache.clear();
402 }
403
404 {
405 let mut alerts = self.alerts.lock().unwrap();
406 alerts.clear();
407 }
408 }
409
410 async fn update_stats_cache(&self, measurement: &PerformanceMeasurement) {
412 let mut stats_cache = self.stats_cache.write().await;
413
414 let measurements = self.measurements.lock().unwrap();
416 let operation_measurements: Vec<_> = measurements
417 .iter()
418 .filter(|m| m.operation == measurement.operation)
419 .collect();
420
421 if operation_measurements.is_empty() {
422 return;
423 }
424
425 let durations: Vec<u64> = operation_measurements
427 .iter()
428 .map(|m| m.duration_ms)
429 .collect();
430 let total_duration: u64 = durations.iter().sum();
431 let count = durations.len();
432 let avg_duration = total_duration as f64 / count as f64;
433
434 let min_duration = *durations.iter().min().unwrap();
435 let max_duration = *durations.iter().max().unwrap();
436
437 let variance = durations
439 .iter()
440 .map(|&d| (d as f64 - avg_duration).powi(2))
441 .sum::<f64>()
442 / count as f64;
443 let std_dev = variance.sqrt();
444
445 let mut sorted_durations = durations.clone();
447 sorted_durations.sort_unstable();
448 let p95_idx = ((count as f64 * 0.95) as usize).min(count - 1);
449 let p99_idx = ((count as f64 * 0.99) as usize).min(count - 1);
450 let p95_duration = sorted_durations[p95_idx];
451 let p99_duration = sorted_durations[p99_idx];
452
453 let avg_memory_usage_mb = if operation_measurements
455 .iter()
456 .any(|m| m.memory_after_bytes.is_some())
457 {
458 let memory_values: Vec<u64> = operation_measurements
459 .iter()
460 .filter_map(|m| m.memory_after_bytes)
461 .collect();
462
463 if !memory_values.is_empty() {
464 Some(
465 memory_values.iter().sum::<u64>() as f64
466 / memory_values.len() as f64
467 / 1_048_576.0,
468 )
469 } else {
470 None
471 }
472 } else {
473 None
474 };
475
476 let avg_cpu_usage = if operation_measurements
478 .iter()
479 .any(|m| m.cpu_usage_percent.is_some())
480 {
481 let cpu_values: Vec<f32> = operation_measurements
482 .iter()
483 .filter_map(|m| m.cpu_usage_percent)
484 .collect();
485
486 if !cpu_values.is_empty() {
487 Some(cpu_values.iter().sum::<f32>() / cpu_values.len() as f32)
488 } else {
489 None
490 }
491 } else {
492 None
493 };
494
495 let ops_per_second = if avg_duration > 0.0 {
497 1000.0 / avg_duration
498 } else {
499 0.0
500 };
501
502 let stats = PerformanceStats {
503 operation: measurement.operation.clone(),
504 measurement_count: count,
505 total_duration_ms: total_duration,
506 avg_duration_ms: avg_duration,
507 min_duration_ms: min_duration,
508 max_duration_ms: max_duration,
509 std_dev_duration_ms: std_dev,
510 p95_duration_ms: p95_duration,
511 p99_duration_ms: p99_duration,
512 avg_memory_usage_mb,
513 avg_cpu_usage_percent: avg_cpu_usage,
514 ops_per_second,
515 };
516
517 stats_cache.insert(measurement.operation.clone(), stats);
518 }
519
520 async fn check_performance_alerts(&self, measurement: &PerformanceMeasurement) {
522 let mut alerts_to_add = Vec::new();
523
524 if measurement.duration_ms > self.config.slow_operation_threshold_ms {
526 alerts_to_add.push(PerformanceAlert {
527 alert_type: PerformanceAlertType::SlowOperation,
528 timestamp: chrono::Utc::now(),
529 operation: measurement.operation.clone(),
530 message: format!(
531 "Operation took {}ms (threshold: {}ms)",
532 measurement.duration_ms, self.config.slow_operation_threshold_ms
533 ),
534 current_value: measurement.duration_ms as f64,
535 threshold: self.config.slow_operation_threshold_ms as f64,
536 });
537 }
538
539 if let (Some(before), Some(after)) = (
541 measurement.memory_before_bytes,
542 measurement.memory_after_bytes,
543 ) {
544 let memory_increase_mb = (after as i64 - before as i64) as f64 / 1_048_576.0;
545 if memory_increase_mb > 50.0 {
546 alerts_to_add.push(PerformanceAlert {
548 alert_type: PerformanceAlertType::HighMemoryUsage,
549 timestamp: chrono::Utc::now(),
550 operation: measurement.operation.clone(),
551 message: format!(
552 "Operation increased memory usage by {:.1}MB",
553 memory_increase_mb
554 ),
555 current_value: memory_increase_mb,
556 threshold: 50.0,
557 });
558 }
559 }
560
561 if let Some(cpu_usage) = measurement.cpu_usage_percent {
563 if cpu_usage > 80.0 {
564 alerts_to_add.push(PerformanceAlert {
565 alert_type: PerformanceAlertType::HighCpuUsage,
566 timestamp: chrono::Utc::now(),
567 operation: measurement.operation.clone(),
568 message: format!("Operation used {:.1}% CPU", cpu_usage),
569 current_value: cpu_usage as f64,
570 threshold: 80.0,
571 });
572 }
573 }
574
575 if !alerts_to_add.is_empty() {
577 let mut alerts = self.alerts.lock().unwrap();
578 for alert in alerts_to_add {
579 alerts.push_back(alert);
580 }
581
582 while alerts.len() > 100 {
584 alerts.pop_front();
585 }
586 }
587 }
588}
589
590pub struct OperationTimer {
592 operation: String,
593 start_time: Instant,
594 start_timestamp: chrono::DateTime<chrono::Utc>,
595 memory_before: Option<u64>,
596 config: PerformanceMonitorConfig,
597 metadata: HashMap<String, String>,
598}
599
600impl OperationTimer {
601 fn new(operation: String, config: PerformanceMonitorConfig) -> Self {
602 let memory_before = if config.monitor_memory {
603 Self::get_memory_usage()
604 } else {
605 None
606 };
607
608 Self {
609 operation,
610 start_time: Instant::now(),
611 start_timestamp: chrono::Utc::now(),
612 memory_before,
613 config,
614 metadata: HashMap::new(),
615 }
616 }
617
618 pub fn add_metadata(&mut self, key: &str, value: &str) {
620 self.metadata.insert(key.to_string(), value.to_string());
621 }
622
623 pub async fn finish(self, monitor: &PerformanceMonitor) -> Result<(), EvaluationError> {
625 let duration = self.start_time.elapsed();
626 let duration_ms = duration.as_millis() as u64;
627
628 let memory_after = if self.config.monitor_memory {
629 Self::get_memory_usage()
630 } else {
631 None
632 };
633
634 let cpu_usage = if self.config.monitor_cpu {
635 Self::get_cpu_usage()
636 } else {
637 None
638 };
639
640 let measurement = PerformanceMeasurement {
641 operation: self.operation,
642 start_time: self.start_timestamp,
643 duration_ms,
644 memory_before_bytes: self.memory_before,
645 memory_after_bytes: memory_after,
646 cpu_usage_percent: cpu_usage,
647 audio_buffer_size: None, sample_rate: None, metadata: self.metadata,
650 };
651
652 monitor.record_measurement(measurement).await;
653 Ok(())
654 }
655
656 fn get_memory_usage() -> Option<u64> {
658 None
661 }
662
663 fn get_cpu_usage() -> Option<f32> {
665 None
668 }
669}
670
671impl Default for PerformanceMonitor {
672 fn default() -> Self {
673 Self::new(PerformanceMonitorConfig::default())
674 }
675}
676
677#[derive(Clone)]
679pub struct AdvancedProfiler {
680 monitor: Arc<PerformanceMonitor>,
681 call_stack: Arc<Mutex<Vec<CallFrame>>>,
682 hotspots: Arc<Mutex<HashMap<String, HotspotData>>>,
683 sampling_enabled: Arc<std::sync::atomic::AtomicBool>,
684}
685
686#[derive(Debug, Clone)]
688pub struct CallFrame {
689 pub name: String,
691 pub entry_time: Instant,
693 pub memory_at_entry: Option<usize>,
695 pub parent_index: Option<usize>,
697}
698
699#[derive(Debug, Clone)]
701pub struct HotspotData {
702 pub total_time: Duration,
704 pub call_count: usize,
706 pub avg_time: Duration,
708 pub max_time: Duration,
710 pub total_memory: usize,
712 pub runtime_percentage: f64,
714}
715
716pub struct RegressionDetector {
718 baseline_stats: HashMap<String, PerformanceStats>,
719 sensitivity_threshold: f64,
720 window_size: usize,
721}
722
723impl AdvancedProfiler {
724 pub fn new(monitor: Arc<PerformanceMonitor>) -> Self {
726 Self {
727 monitor,
728 call_stack: Arc::new(Mutex::new(Vec::new())),
729 hotspots: Arc::new(Mutex::new(HashMap::new())),
730 sampling_enabled: Arc::new(std::sync::atomic::AtomicBool::new(true)),
731 }
732 }
733
734 pub fn enter_function(&self, name: &str) -> ProfilerScope {
736 if !self
737 .sampling_enabled
738 .load(std::sync::atomic::Ordering::Relaxed)
739 {
740 return ProfilerScope::disabled();
741 }
742
743 let frame = CallFrame {
744 name: name.to_string(),
745 entry_time: Instant::now(),
746 memory_at_entry: Self::get_current_memory(),
747 parent_index: {
748 let stack = self.call_stack.lock().unwrap();
749 if stack.is_empty() {
750 None
751 } else {
752 Some(stack.len() - 1)
753 }
754 },
755 };
756
757 let index = {
758 let mut stack = self.call_stack.lock().unwrap();
759 stack.push(frame);
760 stack.len() - 1
761 };
762
763 ProfilerScope::new(self.clone(), index)
764 }
765
766 pub fn exit_function(&self, frame_index: usize) {
768 let frame = {
769 let mut stack = self.call_stack.lock().unwrap();
770 if frame_index >= stack.len() {
771 return;
772 }
773 stack.remove(frame_index)
774 };
775
776 let duration = frame.entry_time.elapsed();
777 let memory_delta = Self::get_current_memory()
778 .and_then(|current| {
779 frame
780 .memory_at_entry
781 .map(|entry| current.saturating_sub(entry))
782 })
783 .unwrap_or(0);
784
785 {
787 let mut hotspots = self.hotspots.lock().unwrap();
788 let hotspot = hotspots.entry(frame.name).or_insert_with(|| HotspotData {
789 total_time: Duration::ZERO,
790 call_count: 0,
791 avg_time: Duration::ZERO,
792 max_time: Duration::ZERO,
793 total_memory: 0,
794 runtime_percentage: 0.0,
795 });
796
797 hotspot.total_time += duration;
798 hotspot.call_count += 1;
799 hotspot.avg_time = hotspot.total_time / hotspot.call_count as u32;
800 hotspot.max_time = hotspot.max_time.max(duration);
801 hotspot.total_memory += memory_delta;
802 }
803 }
804
805 pub fn get_hotspots(&self) -> Vec<(String, HotspotData)> {
807 let hotspots = self.hotspots.lock().unwrap();
808 let total_time: Duration = hotspots.values().map(|h| h.total_time).sum();
809
810 let mut results: Vec<_> = hotspots
811 .iter()
812 .map(|(name, data)| {
813 let mut data = data.clone();
814 data.runtime_percentage = if total_time.as_nanos() > 0 {
815 (data.total_time.as_nanos() as f64 / total_time.as_nanos() as f64) * 100.0
816 } else {
817 0.0
818 };
819 (name.clone(), data)
820 })
821 .collect();
822
823 results.sort_by(|a, b| b.1.total_time.cmp(&a.1.total_time));
825 results
826 }
827
828 pub fn generate_flame_graph(&self) -> String {
830 let hotspots = self.get_hotspots();
831 let mut flame_graph = String::new();
832
833 flame_graph.push_str("Function,Time(ms),Calls,Avg(ms),Memory(KB)\n");
834
835 for (name, data) in hotspots {
836 flame_graph.push_str(&format!(
837 "{},{:.2},{},{:.2},{}\n",
838 name,
839 data.total_time.as_secs_f64() * 1000.0,
840 data.call_count,
841 data.avg_time.as_secs_f64() * 1000.0,
842 data.total_memory / 1024
843 ));
844 }
845
846 flame_graph
847 }
848
849 pub fn reset(&self) {
851 self.call_stack.lock().unwrap().clear();
852 self.hotspots.lock().unwrap().clear();
853 }
854
855 pub fn set_sampling(&self, enabled: bool) {
857 self.sampling_enabled
858 .store(enabled, std::sync::atomic::Ordering::Relaxed);
859 }
860
861 fn get_current_memory() -> Option<usize> {
862 None
865 }
866}
867
868pub struct ProfilerScope {
870 profiler: Option<AdvancedProfiler>,
871 frame_index: usize,
872}
873
874impl ProfilerScope {
875 fn new(profiler: AdvancedProfiler, frame_index: usize) -> Self {
876 Self {
877 profiler: Some(profiler),
878 frame_index,
879 }
880 }
881
882 fn disabled() -> Self {
883 Self {
884 profiler: None,
885 frame_index: 0,
886 }
887 }
888}
889
890impl Drop for ProfilerScope {
891 fn drop(&mut self) {
892 if let Some(profiler) = &self.profiler {
893 profiler.exit_function(self.frame_index);
894 }
895 }
896}
897
898impl RegressionDetector {
899 pub fn new(sensitivity_threshold: f64, window_size: usize) -> Self {
901 Self {
902 baseline_stats: HashMap::new(),
903 sensitivity_threshold,
904 window_size,
905 }
906 }
907
908 pub fn set_baseline(&mut self, stats: HashMap<String, PerformanceStats>) {
910 self.baseline_stats = stats;
911 }
912
913 pub fn detect_regressions(
915 &self,
916 current_stats: &HashMap<String, PerformanceStats>,
917 ) -> Vec<String> {
918 let mut regressions = Vec::new();
919
920 for (operation, current) in current_stats {
921 if let Some(baseline) = self.baseline_stats.get(operation) {
922 let duration_change =
924 (current.avg_duration_ms - baseline.avg_duration_ms) / baseline.avg_duration_ms;
925 if duration_change > self.sensitivity_threshold {
926 regressions.push(format!(
927 "Duration regression in '{}': {:.1}% slower ({:.2}ms → {:.2}ms)",
928 operation,
929 duration_change * 100.0,
930 baseline.avg_duration_ms,
931 current.avg_duration_ms
932 ));
933 }
934
935 if let (Some(baseline_mem), Some(current_mem)) =
937 (baseline.avg_memory_usage_mb, current.avg_memory_usage_mb)
938 {
939 let memory_change = (current_mem - baseline_mem) / baseline_mem;
940 if memory_change > self.sensitivity_threshold {
941 regressions.push(format!(
942 "Memory regression in '{}': {:.1}% increase ({:.2}MB → {:.2}MB)",
943 operation,
944 memory_change * 100.0,
945 baseline_mem,
946 current_mem
947 ));
948 }
949 }
950
951 let throughput_change =
953 (baseline.ops_per_second - current.ops_per_second) / baseline.ops_per_second;
954 if throughput_change > self.sensitivity_threshold {
955 regressions.push(format!(
956 "Throughput regression in '{}': {:.1}% decrease ({:.2} → {:.2} ops/sec)",
957 operation,
958 throughput_change * 100.0,
959 baseline.ops_per_second,
960 current.ops_per_second
961 ));
962 }
963 }
964 }
965
966 regressions
967 }
968}
969
970#[macro_export]
972macro_rules! profile_function {
973 ($profiler:expr, $name:expr, $body:block) => {{
974 let _scope = $profiler.enter_function($name);
975 $body
976 }};
977}
978
979#[cfg(test)]
980mod tests {
981 use super::*;
982 use tokio::time::sleep;
983
984 #[tokio::test]
985 async fn test_performance_monitor_creation() {
986 let config = PerformanceMonitorConfig::default();
987 let monitor = PerformanceMonitor::new(config);
988
989 let stats = monitor.get_all_stats().await;
990 assert!(stats.is_empty());
991 }
992
993 #[tokio::test]
994 async fn test_operation_timing() {
995 let monitor = PerformanceMonitor::default();
996
997 {
998 let timer = monitor.start_operation("test_operation");
999 sleep(Duration::from_millis(10)).await;
1000 timer.finish(&monitor).await.unwrap();
1001 }
1002
1003 sleep(Duration::from_millis(50)).await;
1005
1006 let stats = monitor.get_stats("test_operation").await;
1007 assert!(stats.is_some());
1008 let stats = stats.unwrap();
1009 assert_eq!(stats.operation, "test_operation");
1010 assert_eq!(stats.measurement_count, 1);
1011 assert!(stats.avg_duration_ms >= 10.0);
1012 }
1013
1014 #[tokio::test]
1015 async fn test_multiple_measurements() {
1016 let monitor = PerformanceMonitor::default();
1017
1018 for i in 0..5 {
1020 let mut timer = monitor.start_operation("multi_test");
1021 timer.add_metadata("iteration", &i.to_string());
1022 sleep(Duration::from_millis(5 + i * 2)).await;
1023 timer.finish(&monitor).await.unwrap();
1024 }
1025
1026 sleep(Duration::from_millis(50)).await;
1027
1028 let stats = monitor.get_stats("multi_test").await;
1029 assert!(stats.is_some());
1030 let stats = stats.unwrap();
1031 assert_eq!(stats.measurement_count, 5);
1032 assert!(stats.avg_duration_ms > 0.0);
1033 assert!(stats.std_dev_duration_ms >= 0.0);
1034 }
1035
1036 #[tokio::test]
1037 async fn test_performance_alerts() {
1038 let config = PerformanceMonitorConfig {
1039 slow_operation_threshold_ms: 10,
1040 ..Default::default()
1041 };
1042 let monitor = PerformanceMonitor::new(config);
1043
1044 {
1046 let timer = monitor.start_operation("slow_operation");
1047 sleep(Duration::from_millis(20)).await;
1048 timer.finish(&monitor).await.unwrap();
1049 }
1050
1051 sleep(Duration::from_millis(50)).await;
1052
1053 let alerts = monitor.get_recent_alerts(10);
1054 assert!(!alerts.is_empty());
1055 assert_eq!(alerts[0].alert_type, PerformanceAlertType::SlowOperation);
1056 }
1057
1058 #[tokio::test]
1059 async fn test_optimization_recommendations() {
1060 let config = PerformanceMonitorConfig {
1061 slow_operation_threshold_ms: 5,
1062 ..Default::default()
1063 };
1064 let monitor = PerformanceMonitor::new(config);
1065
1066 {
1068 let timer = monitor.start_operation("slow_op");
1069 sleep(Duration::from_millis(10)).await;
1070 timer.finish(&monitor).await.unwrap();
1071 }
1072
1073 sleep(Duration::from_millis(50)).await;
1074
1075 let recommendations = monitor.generate_recommendations().await;
1076 assert!(!recommendations.is_empty());
1077 assert!(recommendations.iter().any(|r| r.category == "Performance"));
1078 }
1079
1080 #[tokio::test]
1081 async fn test_performance_report() {
1082 let monitor = PerformanceMonitor::default();
1083
1084 {
1085 let timer = monitor.start_operation("report_test");
1086 sleep(Duration::from_millis(5)).await;
1087 timer.finish(&monitor).await.unwrap();
1088 }
1089
1090 sleep(Duration::from_millis(50)).await;
1091
1092 let report = monitor.create_report().await;
1093 assert!(report.contains("Performance Report"));
1094 assert!(report.contains("report_test"));
1095 }
1096
1097 #[test]
1098 fn test_performance_measurement() {
1099 let measurement = PerformanceMeasurement {
1100 operation: String::from("test"),
1101 start_time: chrono::Utc::now(),
1102 duration_ms: 100,
1103 memory_before_bytes: Some(1000),
1104 memory_after_bytes: Some(1200),
1105 cpu_usage_percent: Some(50.0),
1106 audio_buffer_size: Some(16000),
1107 sample_rate: Some(44100),
1108 metadata: HashMap::new(),
1109 };
1110
1111 assert_eq!(measurement.operation, "test");
1112 assert_eq!(measurement.duration_ms, 100);
1113 }
1114
1115 #[test]
1116 fn test_performance_stats() {
1117 let stats = PerformanceStats {
1118 operation: String::from("test_op"),
1119 measurement_count: 10,
1120 total_duration_ms: 1000,
1121 avg_duration_ms: 100.0,
1122 min_duration_ms: 50,
1123 max_duration_ms: 200,
1124 std_dev_duration_ms: 25.0,
1125 p95_duration_ms: 180,
1126 p99_duration_ms: 195,
1127 avg_memory_usage_mb: Some(50.0),
1128 avg_cpu_usage_percent: Some(25.0),
1129 ops_per_second: 10.0,
1130 };
1131
1132 assert_eq!(stats.operation, "test_op");
1133 assert_eq!(stats.measurement_count, 10);
1134 assert_eq!(stats.ops_per_second, 10.0);
1135 }
1136}