1use crate::prelude::*;
7use serde::{Deserialize, Serialize};
8use std::collections::{BTreeMap, HashMap, VecDeque};
9use std::sync::{Arc, Mutex, RwLock};
10use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
11
12#[derive(Debug)]
14pub struct ConversionProfiler {
15 sessions: Arc<RwLock<HashMap<String, ProfilingSession>>>,
17 global_metrics: Arc<RwLock<GlobalMetrics>>,
19 config: ProfilingConfig,
21 bottleneck_analyzer: BottleneckAnalyzer,
23}
24
25#[derive(Debug, Clone)]
27pub struct ProfilingConfig {
28 pub max_sessions: usize,
30 pub enable_memory_tracking: bool,
32 pub enable_cpu_tracking: bool,
34 pub enable_bottleneck_analysis: bool,
36 pub sampling_interval: Duration,
38 pub max_samples_per_session: usize,
40}
41
42impl Default for ProfilingConfig {
43 fn default() -> Self {
44 Self {
45 max_sessions: 100,
46 enable_memory_tracking: true,
47 enable_cpu_tracking: true,
48 enable_bottleneck_analysis: true,
49 sampling_interval: Duration::from_millis(10),
50 max_samples_per_session: 1000,
51 }
52 }
53}
54
55#[derive(Debug, Clone, Serialize, Deserialize)]
57pub struct ProfilingSession {
58 pub session_id: String,
60 pub start_time: SystemTime,
62 pub end_time: Option<SystemTime>,
64 pub conversion_type: ConversionType,
66 pub audio_info: AudioInfo,
68 pub timing_data: TimingData,
70 pub memory_data: MemoryData,
72 pub cpu_data: CpuData,
74 pub bottlenecks: Vec<BottleneckInfo>,
76 pub performance_score: f64,
78}
79
80#[derive(Debug, Clone, Serialize, Deserialize)]
82pub struct AudioInfo {
83 pub length_samples: usize,
85 pub sample_rate: f32,
87 pub channels: usize,
89 pub duration_seconds: f64,
91 pub peak_amplitude: f32,
93 pub rms_level: f32,
95}
96
97#[derive(Debug, Clone, Serialize, Deserialize)]
99pub struct TimingData {
100 pub total_duration: Duration,
102 pub preprocessing_time: Duration,
104 pub conversion_time: Duration,
106 pub postprocessing_time: Duration,
108 pub model_init_time: Duration,
110 pub quality_assessment_time: Duration,
112 pub stage_timings: BTreeMap<String, StageTimingInfo>,
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct StageTimingInfo {
119 pub name: String,
121 pub duration: Duration,
123 pub execution_count: usize,
125 pub average_duration: Duration,
127 pub min_duration: Duration,
129 pub max_duration: Duration,
131}
132
133#[derive(Debug, Clone, Serialize, Deserialize)]
135pub struct MemoryData {
136 pub peak_memory: usize,
138 pub initial_memory: usize,
140 pub final_memory: usize,
142 pub allocation_count: usize,
144 pub deallocation_count: usize,
146 pub total_allocated: usize,
148 pub memory_samples: VecDeque<MemorySample>,
150}
151
152#[derive(Debug, Clone, Serialize, Deserialize)]
154pub struct MemorySample {
155 pub timestamp: SystemTime,
157 pub memory_usage: usize,
159 pub active_allocations: usize,
161}
162
163#[derive(Debug, Clone, Serialize, Deserialize)]
165pub struct CpuData {
166 pub average_cpu_usage: f64,
168 pub peak_cpu_usage: f64,
170 pub cpu_samples: VecDeque<CpuSample>,
172 pub thread_count: usize,
174}
175
176#[derive(Debug, Clone, Serialize, Deserialize)]
178pub struct CpuSample {
179 pub timestamp: SystemTime,
181 pub cpu_usage: f64,
183 pub memory_usage: usize,
185}
186
187#[derive(Debug, Default, Clone, Serialize, Deserialize)]
189pub struct GlobalMetrics {
190 pub total_conversions: usize,
192 pub average_processing_time: Duration,
194 pub average_memory_usage: usize,
196 pub average_cpu_usage: f64,
198 pub metrics_by_type: HashMap<ConversionType, ConversionTypeMetrics>,
200 pub performance_trends: VecDeque<TrendDataPoint>,
202}
203
204#[derive(Debug, Clone, Serialize, Deserialize)]
206pub struct ConversionTypeMetrics {
207 pub conversion_count: usize,
209 pub average_time: Duration,
211 pub average_memory: usize,
213 pub average_score: f64,
215 pub common_bottlenecks: HashMap<String, usize>,
217}
218
219#[derive(Debug, Clone, Serialize, Deserialize)]
221pub struct TrendDataPoint {
222 pub timestamp: SystemTime,
224 pub processing_time: Duration,
226 pub memory_usage: usize,
228 pub conversion_count: usize,
230}
231
232impl Default for ConversionProfiler {
233 fn default() -> Self {
234 Self::new()
235 }
236}
237
238impl ConversionProfiler {
239 pub fn new() -> Self {
241 Self::with_config(ProfilingConfig::default())
242 }
243
244 pub fn with_config(config: ProfilingConfig) -> Self {
246 Self {
247 sessions: Arc::new(RwLock::new(HashMap::new())),
248 global_metrics: Arc::new(RwLock::new(GlobalMetrics::default())),
249 bottleneck_analyzer: BottleneckAnalyzer::new(),
250 config,
251 }
252 }
253
254 pub fn start_session(&self, conversion_type: ConversionType, audio_info: AudioInfo) -> String {
256 let session_id = format!(
257 "session_{}",
258 SystemTime::now()
259 .duration_since(UNIX_EPOCH)
260 .expect("operation should succeed")
261 .as_nanos()
262 );
263
264 let session = ProfilingSession {
265 session_id: session_id.clone(),
266 start_time: SystemTime::now(),
267 end_time: None,
268 conversion_type,
269 audio_info,
270 timing_data: TimingData::new(),
271 memory_data: MemoryData::new(),
272 cpu_data: CpuData::new(),
273 bottlenecks: Vec::new(),
274 performance_score: 0.0,
275 };
276
277 let mut sessions = self.sessions.write().expect("RwLock write poisoned");
278 sessions.insert(session_id.clone(), session);
279
280 if sessions.len() > self.config.max_sessions {
282 let oldest_session = sessions
283 .keys()
284 .min_by_key(|&k| sessions[k].start_time)
285 .cloned()
286 .expect("operation should succeed");
287 sessions.remove(&oldest_session);
288 }
289
290 session_id
291 }
292
293 pub fn end_session(&self, session_id: &str) -> Result<ProfilingReport> {
295 let mut sessions = self.sessions.write().expect("RwLock write poisoned");
296
297 if let Some(session) = sessions.get_mut(session_id) {
298 session.end_time = Some(SystemTime::now());
299
300 if self.config.enable_bottleneck_analysis {
302 session.bottlenecks = self.bottleneck_analyzer.analyze_session(session);
303 }
304
305 session.performance_score = self.calculate_performance_score(session);
307
308 self.update_global_metrics(session);
310
311 let report = ProfilingReport::from_session(session);
313 Ok(report)
314 } else {
315 Err(Error::processing(format!("Session {session_id} not found")))
316 }
317 }
318
319 pub fn record_stage_timing(&self, session_id: &str, stage_name: &str, duration: Duration) {
321 let mut sessions = self.sessions.write().expect("RwLock write poisoned");
322
323 if let Some(session) = sessions.get_mut(session_id) {
324 let stage_info = session
325 .timing_data
326 .stage_timings
327 .entry(stage_name.to_string())
328 .or_insert_with(|| StageTimingInfo::new(stage_name));
329
330 stage_info.update_timing(duration);
331 }
332 }
333
334 pub fn record_memory_sample(
336 &self,
337 session_id: &str,
338 memory_usage: usize,
339 active_allocations: usize,
340 ) {
341 if !self.config.enable_memory_tracking {
342 return;
343 }
344
345 let mut sessions = self.sessions.write().expect("RwLock write poisoned");
346
347 if let Some(session) = sessions.get_mut(session_id) {
348 let sample = MemorySample {
349 timestamp: SystemTime::now(),
350 memory_usage,
351 active_allocations,
352 };
353
354 session.memory_data.memory_samples.push_back(sample);
355
356 if memory_usage > session.memory_data.peak_memory {
358 session.memory_data.peak_memory = memory_usage;
359 }
360
361 if session.memory_data.memory_samples.len() > self.config.max_samples_per_session {
363 session.memory_data.memory_samples.pop_front();
364 }
365 }
366 }
367
368 pub fn record_cpu_sample(&self, session_id: &str, cpu_usage: f64, memory_usage: usize) {
370 if !self.config.enable_cpu_tracking {
371 return;
372 }
373
374 let mut sessions = self.sessions.write().expect("RwLock write poisoned");
375
376 if let Some(session) = sessions.get_mut(session_id) {
377 let sample = CpuSample {
378 timestamp: SystemTime::now(),
379 cpu_usage,
380 memory_usage,
381 };
382
383 session.cpu_data.cpu_samples.push_back(sample);
384
385 if cpu_usage > session.cpu_data.peak_cpu_usage {
387 session.cpu_data.peak_cpu_usage = cpu_usage;
388 }
389
390 let total_usage: f64 = session
392 .cpu_data
393 .cpu_samples
394 .iter()
395 .map(|s| s.cpu_usage)
396 .sum();
397 session.cpu_data.average_cpu_usage =
398 total_usage / session.cpu_data.cpu_samples.len() as f64;
399
400 if session.cpu_data.cpu_samples.len() > self.config.max_samples_per_session {
402 session.cpu_data.cpu_samples.pop_front();
403 }
404 }
405 }
406
407 pub fn get_global_metrics(&self) -> GlobalMetrics {
409 self.global_metrics
410 .read()
411 .expect("RwLock read poisoned")
412 .clone()
413 }
414
415 pub fn get_session_report(&self, session_id: &str) -> Result<ProfilingReport> {
417 let sessions = self.sessions.read().expect("RwLock read poisoned");
418
419 if let Some(session) = sessions.get(session_id) {
420 Ok(ProfilingReport::from_session(session))
421 } else {
422 Err(Error::processing(format!("Session {session_id} not found")))
423 }
424 }
425
426 pub fn get_performance_trends(&self) -> Vec<TrendDataPoint> {
428 let metrics = self.global_metrics.read().expect("RwLock read poisoned");
429 metrics.performance_trends.iter().cloned().collect()
430 }
431
432 fn calculate_performance_score(&self, session: &ProfilingSession) -> f64 {
434 let mut score = 1.0;
435
436 let processing_efficiency =
438 session.audio_info.duration_seconds / session.timing_data.total_duration.as_secs_f64();
439
440 if processing_efficiency < 1.0 {
441 score *= processing_efficiency;
443 }
444
445 let memory_per_second =
447 session.memory_data.peak_memory as f64 / session.audio_info.duration_seconds;
448
449 if memory_per_second > 1_000_000.0 {
450 score *= 0.8;
452 }
453
454 if session.cpu_data.average_cpu_usage > 80.0 {
456 score *= 0.7;
457 }
458
459 let bottleneck_penalty = session.bottlenecks.len() as f64 * 0.1;
461 score *= (1.0 - bottleneck_penalty).max(0.1);
462
463 score.clamp(0.0, 1.0)
464 }
465
466 fn update_global_metrics(&self, session: &ProfilingSession) {
468 let mut metrics = self.global_metrics.write().expect("RwLock write poisoned");
469
470 metrics.total_conversions += 1;
471
472 let n = metrics.total_conversions as f64;
474 let new_weight = 1.0 / n;
475 let old_weight = (n - 1.0) / n;
476
477 metrics.average_processing_time = Duration::from_nanos(
478 (metrics.average_processing_time.as_nanos() as f64 * old_weight
479 + session.timing_data.total_duration.as_nanos() as f64 * new_weight)
480 as u64,
481 );
482
483 metrics.average_memory_usage = (metrics.average_memory_usage as f64 * old_weight
484 + session.memory_data.peak_memory as f64 * new_weight)
485 as usize;
486
487 metrics.average_cpu_usage = metrics.average_cpu_usage * old_weight
488 + session.cpu_data.average_cpu_usage * new_weight;
489
490 let type_metrics = metrics
492 .metrics_by_type
493 .entry(session.conversion_type.clone())
494 .or_default();
495
496 type_metrics.update_with_session(session);
497
498 let trend_point = TrendDataPoint {
500 timestamp: SystemTime::now(),
501 processing_time: session.timing_data.total_duration,
502 memory_usage: session.memory_data.peak_memory,
503 conversion_count: 1,
504 };
505
506 metrics.performance_trends.push_back(trend_point);
507
508 if metrics.performance_trends.len() > 1000 {
510 metrics.performance_trends.pop_front();
511 }
512 }
513}
514
515#[derive(Debug)]
517pub struct BottleneckAnalyzer {
518 thresholds: BottleneckThresholds,
520}
521
522#[derive(Debug, Clone)]
524pub struct BottleneckThresholds {
525 pub max_processing_ratio: f64,
527 pub max_memory_per_second: usize,
529 pub max_cpu_usage: f64,
531 pub max_stage_time_ratio: f64,
533}
534
535impl Default for BottleneckThresholds {
536 fn default() -> Self {
537 Self {
538 max_processing_ratio: 1.0, max_memory_per_second: 10_000_000, max_cpu_usage: 80.0,
541 max_stage_time_ratio: 0.5, }
543 }
544}
545
546#[derive(Debug, Clone, Serialize, Deserialize)]
548pub struct BottleneckInfo {
549 pub bottleneck_type: BottleneckType,
551 pub severity: f64,
553 pub description: String,
555 pub recommendations: Vec<String>,
557 pub affected_component: String,
559 pub measured_value: f64,
561 pub threshold_value: f64,
563}
564
565#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
567pub enum BottleneckType {
568 ProcessingSpeed,
570 MemoryUsage,
572 CpuUsage,
574 StagePerformance,
576 MemoryAllocation,
578 CacheEfficiency,
580 IoPerformance,
582}
583
584impl Default for BottleneckAnalyzer {
585 fn default() -> Self {
586 Self::new()
587 }
588}
589
590impl BottleneckAnalyzer {
591 pub fn new() -> Self {
593 Self {
594 thresholds: BottleneckThresholds::default(),
595 }
596 }
597
598 pub fn with_thresholds(thresholds: BottleneckThresholds) -> Self {
600 Self { thresholds }
601 }
602
603 pub fn analyze_session(&self, session: &ProfilingSession) -> Vec<BottleneckInfo> {
605 let mut bottlenecks = Vec::new();
606
607 bottlenecks.extend(self.check_processing_speed(session));
609
610 bottlenecks.extend(self.check_memory_usage(session));
612
613 bottlenecks.extend(self.check_cpu_usage(session));
615
616 bottlenecks.extend(self.check_stage_performance(session));
618
619 bottlenecks.extend(self.check_memory_allocation(session));
621
622 bottlenecks
623 }
624
625 fn check_processing_speed(&self, session: &ProfilingSession) -> Vec<BottleneckInfo> {
626 let mut bottlenecks = Vec::new();
627
628 let processing_ratio =
629 session.timing_data.total_duration.as_secs_f64() / session.audio_info.duration_seconds;
630
631 if processing_ratio > self.thresholds.max_processing_ratio {
632 let severity = ((processing_ratio - self.thresholds.max_processing_ratio)
633 / self.thresholds.max_processing_ratio)
634 .min(1.0);
635
636 bottlenecks.push(BottleneckInfo {
637 bottleneck_type: BottleneckType::ProcessingSpeed,
638 severity,
639 description: format!(
640 "Processing is {:.2}x slower than real-time (target: {:.2}x)",
641 processing_ratio, self.thresholds.max_processing_ratio
642 ),
643 recommendations: vec![
644 "Consider using GPU acceleration".to_string(),
645 "Optimize algorithm parameters for speed".to_string(),
646 "Use lower quality settings for real-time applications".to_string(),
647 ],
648 affected_component: "Overall Processing".to_string(),
649 measured_value: processing_ratio,
650 threshold_value: self.thresholds.max_processing_ratio,
651 });
652 }
653
654 bottlenecks
655 }
656
657 fn check_memory_usage(&self, session: &ProfilingSession) -> Vec<BottleneckInfo> {
658 let mut bottlenecks = Vec::new();
659
660 let memory_per_second =
661 session.memory_data.peak_memory as f64 / session.audio_info.duration_seconds;
662
663 if memory_per_second > self.thresholds.max_memory_per_second as f64 {
664 let severity = ((memory_per_second - self.thresholds.max_memory_per_second as f64)
665 / self.thresholds.max_memory_per_second as f64)
666 .min(1.0);
667
668 bottlenecks.push(BottleneckInfo {
669 bottleneck_type: BottleneckType::MemoryUsage,
670 severity,
671 description: format!(
672 "Memory usage is {:.2} MB per second (target: {:.2} MB/s)",
673 memory_per_second / 1_000_000.0,
674 self.thresholds.max_memory_per_second as f64 / 1_000_000.0
675 ),
676 recommendations: vec![
677 "Enable memory pooling".to_string(),
678 "Reduce buffer sizes".to_string(),
679 "Use streaming processing for large files".to_string(),
680 ],
681 affected_component: "Memory Management".to_string(),
682 measured_value: memory_per_second,
683 threshold_value: self.thresholds.max_memory_per_second as f64,
684 });
685 }
686
687 bottlenecks
688 }
689
690 fn check_cpu_usage(&self, session: &ProfilingSession) -> Vec<BottleneckInfo> {
691 let mut bottlenecks = Vec::new();
692
693 if session.cpu_data.average_cpu_usage > self.thresholds.max_cpu_usage {
694 let severity = ((session.cpu_data.average_cpu_usage - self.thresholds.max_cpu_usage)
695 / self.thresholds.max_cpu_usage)
696 .min(1.0);
697
698 bottlenecks.push(BottleneckInfo {
699 bottleneck_type: BottleneckType::CpuUsage,
700 severity,
701 description: format!(
702 "CPU usage is {:.1}% (target: {:.1}%)",
703 session.cpu_data.average_cpu_usage, self.thresholds.max_cpu_usage
704 ),
705 recommendations: vec![
706 "Use multi-threading for parallel processing".to_string(),
707 "Optimize algorithms for CPU efficiency".to_string(),
708 "Consider using SIMD instructions".to_string(),
709 ],
710 affected_component: "CPU Processing".to_string(),
711 measured_value: session.cpu_data.average_cpu_usage,
712 threshold_value: self.thresholds.max_cpu_usage,
713 });
714 }
715
716 bottlenecks
717 }
718
719 fn check_stage_performance(&self, session: &ProfilingSession) -> Vec<BottleneckInfo> {
720 let mut bottlenecks = Vec::new();
721
722 let total_time = session.timing_data.total_duration.as_secs_f64();
723
724 for (stage_name, stage_info) in &session.timing_data.stage_timings {
725 let stage_ratio = stage_info.duration.as_secs_f64() / total_time;
726
727 if stage_ratio > self.thresholds.max_stage_time_ratio {
728 let severity = ((stage_ratio - self.thresholds.max_stage_time_ratio)
729 / self.thresholds.max_stage_time_ratio)
730 .min(1.0);
731
732 bottlenecks.push(BottleneckInfo {
733 bottleneck_type: BottleneckType::StagePerformance,
734 severity,
735 description: format!(
736 "Stage '{}' takes {:.1}% of total processing time (target: {:.1}%)",
737 stage_name,
738 stage_ratio * 100.0,
739 self.thresholds.max_stage_time_ratio * 100.0
740 ),
741 recommendations: vec![
742 format!("Optimize {} algorithm", stage_name),
743 "Consider caching results for this stage".to_string(),
744 "Profile individual operations within this stage".to_string(),
745 ],
746 affected_component: stage_name.clone(),
747 measured_value: stage_ratio,
748 threshold_value: self.thresholds.max_stage_time_ratio,
749 });
750 }
751 }
752
753 bottlenecks
754 }
755
756 fn check_memory_allocation(&self, session: &ProfilingSession) -> Vec<BottleneckInfo> {
757 let mut bottlenecks = Vec::new();
758
759 let total_allocations = session.memory_data.allocation_count;
761 let allocation_rate = total_allocations as f64 / session.audio_info.duration_seconds;
762
763 if allocation_rate > 1000.0 {
764 bottlenecks.push(BottleneckInfo {
766 bottleneck_type: BottleneckType::MemoryAllocation,
767 severity: (allocation_rate / 10000.0).min(1.0),
768 description: format!(
769 "High allocation rate: {allocation_rate:.0} allocations per second"
770 ),
771 recommendations: vec![
772 "Use object pooling to reduce allocations".to_string(),
773 "Pre-allocate buffers when possible".to_string(),
774 "Consider using stack allocation for small objects".to_string(),
775 ],
776 affected_component: "Memory Allocator".to_string(),
777 measured_value: allocation_rate,
778 threshold_value: 1000.0,
779 });
780 }
781
782 let net_allocations = session.memory_data.allocation_count as i64
784 - session.memory_data.deallocation_count as i64;
785
786 if net_allocations > 100 {
787 bottlenecks.push(BottleneckInfo {
788 bottleneck_type: BottleneckType::MemoryAllocation,
789 severity: (net_allocations as f64 / 1000.0).min(1.0),
790 description: format!("Potential memory leak: {net_allocations} net allocations"),
791 recommendations: vec![
792 "Check for unreleased resources".to_string(),
793 "Ensure proper cleanup in error paths".to_string(),
794 "Use RAII patterns for resource management".to_string(),
795 ],
796 affected_component: "Memory Management".to_string(),
797 measured_value: net_allocations as f64,
798 threshold_value: 100.0,
799 });
800 }
801
802 bottlenecks
803 }
804}
805
806#[derive(Debug, Clone, Serialize, Deserialize)]
808pub struct ProfilingReport {
809 pub session_info: SessionInfo,
811 pub performance_summary: PerformanceSummary,
813 pub timing_breakdown: TimingBreakdown,
815 pub memory_analysis: MemoryAnalysis,
817 pub cpu_analysis: CpuAnalysis,
819 pub bottlenecks: Vec<BottleneckInfo>,
821 pub recommendations: Vec<String>,
823 pub performance_score: f64,
825}
826
827#[derive(Debug, Clone, Serialize, Deserialize)]
829pub struct SessionInfo {
830 pub session_id: String,
832 pub conversion_type: ConversionType,
834 pub audio_duration: f64,
836 pub audio_samples: usize,
838 pub sample_rate: f32,
840 pub channels: usize,
842 pub start_time: SystemTime,
844 pub end_time: Option<SystemTime>,
846}
847
848#[derive(Debug, Clone, Serialize, Deserialize)]
850pub struct PerformanceSummary {
851 pub total_processing_time: Duration,
853 pub real_time_factor: f64,
855 pub peak_memory_mb: f64,
857 pub average_cpu_usage: f64,
859 pub peak_cpu_usage: f64,
861 pub bottleneck_count: usize,
863 pub performance_grade: String,
865}
866
867#[derive(Debug, Clone, Serialize, Deserialize)]
869pub struct TimingBreakdown {
870 pub preprocessing_percentage: f64,
872 pub conversion_percentage: f64,
874 pub postprocessing_percentage: f64,
876 pub model_init_percentage: f64,
878 pub quality_assessment_percentage: f64,
880 pub slowest_stage: String,
882 pub fastest_stage: String,
884}
885
886#[derive(Debug, Clone, Serialize, Deserialize)]
888pub struct MemoryAnalysis {
889 pub peak_memory_mb: f64,
891 pub average_memory_mb: f64,
893 pub memory_efficiency_score: f64,
895 pub allocation_count: usize,
897 pub deallocation_count: usize,
899 pub potential_leaks: bool,
901 pub memory_growth_rate: f64,
903}
904
905#[derive(Debug, Clone, Serialize, Deserialize)]
907pub struct CpuAnalysis {
908 pub average_usage: f64,
910 pub peak_usage: f64,
912 pub cpu_efficiency_score: f64,
914 pub thread_utilization: f64,
916 pub cpu_intensive_stages: Vec<String>,
918}
919
920impl ProfilingReport {
921 pub fn from_session(session: &ProfilingSession) -> Self {
923 let session_info = SessionInfo {
924 session_id: session.session_id.clone(),
925 conversion_type: session.conversion_type.clone(),
926 audio_duration: session.audio_info.duration_seconds,
927 audio_samples: session.audio_info.length_samples,
928 sample_rate: session.audio_info.sample_rate,
929 channels: session.audio_info.channels,
930 start_time: session.start_time,
931 end_time: session.end_time,
932 };
933
934 let real_time_factor =
935 session.timing_data.total_duration.as_secs_f64() / session.audio_info.duration_seconds;
936
937 let performance_summary = PerformanceSummary {
938 total_processing_time: session.timing_data.total_duration,
939 real_time_factor,
940 peak_memory_mb: session.memory_data.peak_memory as f64 / 1_000_000.0,
941 average_cpu_usage: session.cpu_data.average_cpu_usage,
942 peak_cpu_usage: session.cpu_data.peak_cpu_usage,
943 bottleneck_count: session.bottlenecks.len(),
944 performance_grade: Self::calculate_grade(session.performance_score),
945 };
946
947 let total_time = session.timing_data.total_duration.as_secs_f64();
948 let timing_breakdown = TimingBreakdown {
949 preprocessing_percentage: session.timing_data.preprocessing_time.as_secs_f64()
950 / total_time
951 * 100.0,
952 conversion_percentage: session.timing_data.conversion_time.as_secs_f64() / total_time
953 * 100.0,
954 postprocessing_percentage: session.timing_data.postprocessing_time.as_secs_f64()
955 / total_time
956 * 100.0,
957 model_init_percentage: session.timing_data.model_init_time.as_secs_f64() / total_time
958 * 100.0,
959 quality_assessment_percentage: session
960 .timing_data
961 .quality_assessment_time
962 .as_secs_f64()
963 / total_time
964 * 100.0,
965 slowest_stage: Self::find_slowest_stage(&session.timing_data),
966 fastest_stage: Self::find_fastest_stage(&session.timing_data),
967 };
968
969 let memory_analysis = MemoryAnalysis {
970 peak_memory_mb: session.memory_data.peak_memory as f64 / 1_000_000.0,
971 average_memory_mb: Self::calculate_average_memory(&session.memory_data) / 1_000_000.0,
972 memory_efficiency_score: Self::calculate_memory_efficiency(
973 &session.memory_data,
974 session.audio_info.duration_seconds,
975 ),
976 allocation_count: session.memory_data.allocation_count,
977 deallocation_count: session.memory_data.deallocation_count,
978 potential_leaks: session.memory_data.allocation_count
979 > session.memory_data.deallocation_count + 10,
980 memory_growth_rate: Self::calculate_memory_growth_rate(&session.memory_data),
981 };
982
983 let cpu_analysis = CpuAnalysis {
984 average_usage: session.cpu_data.average_cpu_usage,
985 peak_usage: session.cpu_data.peak_cpu_usage,
986 cpu_efficiency_score: Self::calculate_cpu_efficiency(&session.cpu_data),
987 thread_utilization: session.cpu_data.thread_count as f64 / num_cpus::get() as f64,
988 cpu_intensive_stages: Self::find_cpu_intensive_stages(&session.timing_data),
989 };
990
991 let recommendations = Self::generate_recommendations(session);
992
993 Self {
994 session_info,
995 performance_summary,
996 timing_breakdown,
997 memory_analysis,
998 cpu_analysis,
999 bottlenecks: session.bottlenecks.clone(),
1000 recommendations,
1001 performance_score: session.performance_score,
1002 }
1003 }
1004
1005 fn calculate_grade(score: f64) -> String {
1006 if score >= 0.9 {
1007 "A".to_string()
1008 } else if score >= 0.8 {
1009 "B".to_string()
1010 } else if score >= 0.7 {
1011 "C".to_string()
1012 } else if score >= 0.6 {
1013 "D".to_string()
1014 } else {
1015 "F".to_string()
1016 }
1017 }
1018
1019 fn find_slowest_stage(timing_data: &TimingData) -> String {
1020 timing_data
1021 .stage_timings
1022 .iter()
1023 .max_by_key(|(_, info)| info.duration)
1024 .map(|(name, _)| name.clone())
1025 .unwrap_or_else(|| "Unknown".to_string())
1026 }
1027
1028 fn find_fastest_stage(timing_data: &TimingData) -> String {
1029 timing_data
1030 .stage_timings
1031 .iter()
1032 .min_by_key(|(_, info)| info.duration)
1033 .map(|(name, _)| name.clone())
1034 .unwrap_or_else(|| "Unknown".to_string())
1035 }
1036
1037 fn calculate_average_memory(memory_data: &MemoryData) -> f64 {
1038 if memory_data.memory_samples.is_empty() {
1039 memory_data.peak_memory as f64
1040 } else {
1041 let sum: usize = memory_data
1042 .memory_samples
1043 .iter()
1044 .map(|s| s.memory_usage)
1045 .sum();
1046 sum as f64 / memory_data.memory_samples.len() as f64
1047 }
1048 }
1049
1050 fn calculate_memory_efficiency(memory_data: &MemoryData, duration: f64) -> f64 {
1051 let memory_per_second = memory_data.peak_memory as f64 / duration;
1052 let efficiency = 1.0 - (memory_per_second / 10_000_000.0).min(1.0); efficiency.max(0.0)
1054 }
1055
1056 fn calculate_memory_growth_rate(memory_data: &MemoryData) -> f64 {
1057 if memory_data.memory_samples.len() < 2 {
1058 return 0.0;
1059 }
1060
1061 let first = memory_data
1062 .memory_samples
1063 .front()
1064 .expect("operation should succeed")
1065 .memory_usage as f64;
1066 let last = memory_data
1067 .memory_samples
1068 .back()
1069 .expect("operation should succeed")
1070 .memory_usage as f64;
1071 let growth = (last - first) / first;
1072 growth.clamp(-1.0, 10.0) }
1074
1075 fn calculate_cpu_efficiency(cpu_data: &CpuData) -> f64 {
1076 let efficiency = 1.0 - (cpu_data.average_cpu_usage / 100.0);
1077 efficiency.max(0.0)
1078 }
1079
1080 fn find_cpu_intensive_stages(timing_data: &TimingData) -> Vec<String> {
1081 let total_time = timing_data.total_duration.as_secs_f64();
1082 timing_data
1083 .stage_timings
1084 .iter()
1085 .filter(|(_, info)| info.duration.as_secs_f64() / total_time > 0.2) .map(|(name, _)| name.clone())
1087 .collect()
1088 }
1089
1090 fn generate_recommendations(session: &ProfilingSession) -> Vec<String> {
1091 let mut recommendations = Vec::new();
1092
1093 let rtf =
1094 session.timing_data.total_duration.as_secs_f64() / session.audio_info.duration_seconds;
1095 if rtf > 1.0 {
1096 recommendations
1097 .push("Consider enabling GPU acceleration for faster processing".to_string());
1098 recommendations
1099 .push("Use lower quality settings for real-time applications".to_string());
1100 }
1101
1102 let memory_per_sec =
1103 session.memory_data.peak_memory as f64 / session.audio_info.duration_seconds;
1104 if memory_per_sec > 10_000_000.0 {
1105 recommendations.push("Enable memory pooling to reduce memory usage".to_string());
1106 recommendations.push("Use streaming processing for large audio files".to_string());
1107 }
1108
1109 if session.cpu_data.average_cpu_usage > 80.0 {
1110 recommendations.push("Enable multi-threading for better CPU utilization".to_string());
1111 recommendations.push("Consider using SIMD optimizations".to_string());
1112 }
1113
1114 if session.bottlenecks.len() > 3 {
1115 recommendations
1116 .push("Address identified bottlenecks to improve overall performance".to_string());
1117 }
1118
1119 if recommendations.is_empty() {
1120 recommendations.push("Performance is good - no major optimizations needed".to_string());
1121 }
1122
1123 recommendations
1124 }
1125
1126 pub fn to_json(&self) -> Result<String> {
1128 serde_json::to_string_pretty(self).map_err(Error::Serialization)
1129 }
1130
1131 pub fn to_text(&self) -> String {
1133 let mut report = String::new();
1134
1135 report.push_str("=== VoiRS Conversion Performance Report ===\n\n");
1136
1137 report.push_str(&format!("Session ID: {}\n", self.session_info.session_id));
1139 report.push_str(&format!(
1140 "Conversion Type: {:?}\n",
1141 self.session_info.conversion_type
1142 ));
1143 report.push_str(&format!(
1144 "Audio Duration: {:.2}s\n",
1145 self.session_info.audio_duration
1146 ));
1147 report.push_str(&format!(
1148 "Sample Rate: {:.0} Hz\n",
1149 self.session_info.sample_rate
1150 ));
1151 report.push_str(&format!("Channels: {}\n\n", self.session_info.channels));
1152
1153 report.push_str("=== Performance Summary ===\n");
1155 report.push_str(&format!(
1156 "Processing Time: {:.2}s\n",
1157 self.performance_summary.total_processing_time.as_secs_f64()
1158 ));
1159 report.push_str(&format!(
1160 "Real-time Factor: {:.2}x\n",
1161 self.performance_summary.real_time_factor
1162 ));
1163 report.push_str(&format!(
1164 "Peak Memory: {:.1} MB\n",
1165 self.performance_summary.peak_memory_mb
1166 ));
1167 report.push_str(&format!(
1168 "Average CPU: {:.1}%\n",
1169 self.performance_summary.average_cpu_usage
1170 ));
1171 report.push_str(&format!(
1172 "Performance Grade: {}\n",
1173 self.performance_summary.performance_grade
1174 ));
1175 report.push_str(&format!(
1176 "Performance Score: {:.2}\n\n",
1177 self.performance_score
1178 ));
1179
1180 report.push_str("=== Timing Breakdown ===\n");
1182 report.push_str(&format!(
1183 "Preprocessing: {:.1}%\n",
1184 self.timing_breakdown.preprocessing_percentage
1185 ));
1186 report.push_str(&format!(
1187 "Conversion: {:.1}%\n",
1188 self.timing_breakdown.conversion_percentage
1189 ));
1190 report.push_str(&format!(
1191 "Postprocessing: {:.1}%\n",
1192 self.timing_breakdown.postprocessing_percentage
1193 ));
1194 report.push_str(&format!(
1195 "Model Init: {:.1}%\n",
1196 self.timing_breakdown.model_init_percentage
1197 ));
1198 report.push_str(&format!(
1199 "Quality Assessment: {:.1}%\n",
1200 self.timing_breakdown.quality_assessment_percentage
1201 ));
1202 report.push_str(&format!(
1203 "Slowest Stage: {}\n\n",
1204 self.timing_breakdown.slowest_stage
1205 ));
1206
1207 if !self.bottlenecks.is_empty() {
1209 report.push_str("=== Identified Bottlenecks ===\n");
1210 for bottleneck in &self.bottlenecks {
1211 report.push_str(&format!(
1212 "- {} (Severity: {:.2})\n",
1213 bottleneck.description, bottleneck.severity
1214 ));
1215 report.push_str(&format!(
1216 " Component: {component}\n",
1217 component = bottleneck.affected_component
1218 ));
1219 for rec in &bottleneck.recommendations {
1220 report.push_str(&format!(" → {rec}\n"));
1221 }
1222 report.push('\n');
1223 }
1224 }
1225
1226 report.push_str("=== Recommendations ===\n");
1228 for (i, rec) in self.recommendations.iter().enumerate() {
1229 report.push_str(&format!("{}. {}\n", i + 1, rec));
1230 }
1231
1232 report
1233 }
1234}
1235
1236impl Default for TimingData {
1238 fn default() -> Self {
1239 Self::new()
1240 }
1241}
1242
1243impl TimingData {
1244 pub fn new() -> Self {
1246 Self {
1247 total_duration: Duration::from_millis(0),
1248 preprocessing_time: Duration::from_millis(0),
1249 conversion_time: Duration::from_millis(0),
1250 postprocessing_time: Duration::from_millis(0),
1251 model_init_time: Duration::from_millis(0),
1252 quality_assessment_time: Duration::from_millis(0),
1253 stage_timings: BTreeMap::new(),
1254 }
1255 }
1256}
1257
1258impl Default for MemoryData {
1259 fn default() -> Self {
1260 Self::new()
1261 }
1262}
1263
1264impl MemoryData {
1265 pub fn new() -> Self {
1267 Self {
1268 peak_memory: 0,
1269 initial_memory: 0,
1270 final_memory: 0,
1271 allocation_count: 0,
1272 deallocation_count: 0,
1273 total_allocated: 0,
1274 memory_samples: VecDeque::new(),
1275 }
1276 }
1277}
1278
1279impl Default for CpuData {
1280 fn default() -> Self {
1281 Self::new()
1282 }
1283}
1284
1285impl CpuData {
1286 pub fn new() -> Self {
1288 Self {
1289 average_cpu_usage: 0.0,
1290 peak_cpu_usage: 0.0,
1291 cpu_samples: VecDeque::new(),
1292 thread_count: 1,
1293 }
1294 }
1295}
1296
1297impl StageTimingInfo {
1298 pub fn new(name: &str) -> Self {
1300 Self {
1301 name: name.to_string(),
1302 duration: Duration::from_millis(0),
1303 execution_count: 0,
1304 average_duration: Duration::from_millis(0),
1305 min_duration: Duration::from_secs(u64::MAX),
1306 max_duration: Duration::from_millis(0),
1307 }
1308 }
1309
1310 pub fn update_timing(&mut self, new_duration: Duration) {
1312 self.execution_count += 1;
1313 self.duration += new_duration;
1314
1315 if new_duration < self.min_duration {
1316 self.min_duration = new_duration;
1317 }
1318 if new_duration > self.max_duration {
1319 self.max_duration = new_duration;
1320 }
1321
1322 self.average_duration = self.duration / self.execution_count as u32;
1323 }
1324}
1325
1326impl Default for ConversionTypeMetrics {
1327 fn default() -> Self {
1328 Self::new()
1329 }
1330}
1331
1332impl ConversionTypeMetrics {
1333 pub fn new() -> Self {
1335 Self {
1336 conversion_count: 0,
1337 average_time: Duration::from_millis(0),
1338 average_memory: 0,
1339 average_score: 0.0,
1340 common_bottlenecks: HashMap::new(),
1341 }
1342 }
1343
1344 pub fn update_with_session(&mut self, session: &ProfilingSession) {
1346 self.conversion_count += 1;
1347
1348 let n = self.conversion_count as f64;
1349 let new_weight = 1.0 / n;
1350 let old_weight = (n - 1.0) / n;
1351
1352 self.average_time = Duration::from_nanos(
1353 (self.average_time.as_nanos() as f64 * old_weight
1354 + session.timing_data.total_duration.as_nanos() as f64 * new_weight)
1355 as u64,
1356 );
1357
1358 self.average_memory = (self.average_memory as f64 * old_weight
1359 + session.memory_data.peak_memory as f64 * new_weight)
1360 as usize;
1361
1362 self.average_score =
1363 self.average_score * old_weight + session.performance_score * new_weight;
1364
1365 for bottleneck in &session.bottlenecks {
1367 *self
1368 .common_bottlenecks
1369 .entry(bottleneck.affected_component.clone())
1370 .or_insert(0) += 1;
1371 }
1372 }
1373}
1374
1375#[cfg(test)]
1376mod tests {
1377 use super::*;
1378
1379 #[test]
1380 fn test_profiler_session_lifecycle() {
1381 let profiler = ConversionProfiler::new();
1382
1383 let audio_info = AudioInfo {
1384 length_samples: 44100,
1385 sample_rate: 44100.0,
1386 channels: 1,
1387 duration_seconds: 1.0,
1388 peak_amplitude: 0.8,
1389 rms_level: 0.2,
1390 };
1391
1392 let session_id = profiler.start_session(ConversionType::PitchShift, audio_info);
1393
1394 profiler.record_stage_timing(&session_id, "preprocessing", Duration::from_millis(50));
1396 profiler.record_stage_timing(&session_id, "conversion", Duration::from_millis(200));
1397 profiler.record_memory_sample(&session_id, 1_000_000, 10);
1398 profiler.record_cpu_sample(&session_id, 75.0, 1_000_000);
1399
1400 let report = profiler.end_session(&session_id).unwrap();
1401
1402 assert!(!report.session_info.session_id.is_empty());
1403 assert_eq!(
1404 report.session_info.conversion_type,
1405 ConversionType::PitchShift
1406 );
1407 assert!(report.performance_score > 0.0);
1408 }
1409
1410 #[test]
1411 fn test_bottleneck_analyzer() {
1412 let analyzer = BottleneckAnalyzer::new();
1413
1414 let mut session = ProfilingSession {
1415 session_id: "test".to_string(),
1416 start_time: SystemTime::now(),
1417 end_time: None,
1418 conversion_type: ConversionType::PitchShift,
1419 audio_info: AudioInfo {
1420 length_samples: 44100,
1421 sample_rate: 44100.0,
1422 channels: 1,
1423 duration_seconds: 1.0,
1424 peak_amplitude: 0.8,
1425 rms_level: 0.2,
1426 },
1427 timing_data: TimingData::new(),
1428 memory_data: MemoryData::new(),
1429 cpu_data: CpuData::new(),
1430 bottlenecks: Vec::new(),
1431 performance_score: 0.0,
1432 };
1433
1434 session.timing_data.total_duration = Duration::from_secs(5); session.cpu_data.average_cpu_usage = 90.0; let bottlenecks = analyzer.analyze_session(&session);
1439
1440 assert!(!bottlenecks.is_empty());
1441 assert!(bottlenecks
1442 .iter()
1443 .any(|b| b.bottleneck_type == BottleneckType::ProcessingSpeed));
1444 assert!(bottlenecks
1445 .iter()
1446 .any(|b| b.bottleneck_type == BottleneckType::CpuUsage));
1447 }
1448
1449 #[test]
1450 fn test_profiling_report_generation() {
1451 let mut session = ProfilingSession {
1452 session_id: "test_report".to_string(),
1453 start_time: SystemTime::now(),
1454 end_time: Some(SystemTime::now()),
1455 conversion_type: ConversionType::SpeedTransformation,
1456 audio_info: AudioInfo {
1457 length_samples: 88200,
1458 sample_rate: 44100.0,
1459 channels: 2,
1460 duration_seconds: 2.0,
1461 peak_amplitude: 0.9,
1462 rms_level: 0.3,
1463 },
1464 timing_data: TimingData::new(),
1465 memory_data: MemoryData::new(),
1466 cpu_data: CpuData::new(),
1467 bottlenecks: Vec::new(),
1468 performance_score: 0.85,
1469 };
1470
1471 session.timing_data.total_duration = Duration::from_millis(1500);
1472 session.memory_data.peak_memory = 2_000_000;
1473 session.cpu_data.average_cpu_usage = 45.0;
1474
1475 let report = ProfilingReport::from_session(&session);
1476
1477 assert_eq!(
1478 report.session_info.conversion_type,
1479 ConversionType::SpeedTransformation
1480 );
1481 assert_eq!(report.performance_summary.performance_grade, "B");
1482 assert!(report.performance_summary.real_time_factor < 1.0); let text_report = report.to_text();
1485 assert!(text_report.contains("Performance Report"));
1486 assert!(text_report.contains("Real-time Factor"));
1487
1488 let json_report = report.to_json().unwrap();
1489 assert!(json_report.contains("session_info"));
1490 }
1491
1492 #[test]
1493 fn test_stage_timing_info() {
1494 let mut stage = StageTimingInfo::new("test_stage");
1495
1496 stage.update_timing(Duration::from_millis(100));
1497 stage.update_timing(Duration::from_millis(200));
1498 stage.update_timing(Duration::from_millis(50));
1499
1500 assert_eq!(stage.execution_count, 3);
1501 assert_eq!(stage.min_duration, Duration::from_millis(50));
1502 assert_eq!(stage.max_duration, Duration::from_millis(200));
1503 let expected_avg = Duration::from_millis(350) / 3;
1505 assert_eq!(stage.average_duration, expected_avg);
1506 }
1507}