use crate::types::AdvancedFeature;
use crate::{Result, VoirsError};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
#[derive(Debug, Clone)]
pub struct PerformanceMonitor {
metrics: Arc<Mutex<PerformanceMetrics>>,
start_time: Instant,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceMetrics {
pub total_syntheses: u64,
pub total_processing_time: Duration,
pub average_synthesis_time: Duration,
pub peak_memory_usage: u64,
pub current_memory_usage: u64,
pub cache_hit_rate: f64,
pub rtf_stats: RealTimeFactorStats,
pub component_timings: HashMap<String, Duration>,
pub quality_metrics: QualityMetrics,
pub feature_metrics: HashMap<AdvancedFeature, FeaturePerformanceMetrics>,
pub feature_stats: FeaturePerformanceStats,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RealTimeFactorStats {
pub average_rtf: f64,
pub min_rtf: f64,
pub max_rtf: f64,
pub p95_rtf: f64,
pub rtf_violations: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QualityMetrics {
pub average_snr: f64,
pub average_thd: f64,
pub average_dynamic_range: f64,
pub quality_warnings: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FeaturePerformanceMetrics {
pub usage_count: u64,
pub total_processing_time: Duration,
pub average_processing_time: Duration,
pub memory_stats: FeatureMemoryStats,
pub rtf_stats: RealTimeFactorStats,
pub quality_stats: FeatureQualityStats,
pub error_rate: f64,
pub success_rate: f64,
pub feature_specific_metrics: HashMap<String, f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FeatureMemoryStats {
pub peak_memory: u64,
pub average_memory: u64,
pub current_memory: u64,
pub allocation_count: u64,
pub deallocation_count: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FeatureQualityStats {
pub average_quality: f64,
pub min_quality: f64,
pub max_quality: f64,
pub degradation_count: u64,
pub specific_metrics: HashMap<String, f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FeaturePerformanceStats {
pub active_feature_count: u32,
pub most_used_feature: Option<AdvancedFeature>,
pub best_performing_feature: Option<AdvancedFeature>,
pub worst_performing_feature: Option<AdvancedFeature>,
pub average_feature_memory_overhead: u64,
pub average_feature_processing_overhead: f64,
pub combination_impact: HashMap<Vec<AdvancedFeature>, f64>,
pub category_utilization: HashMap<String, f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FeaturePerformanceAnalysis {
pub summary: PerformanceSummary,
pub recommendations: Vec<PerformanceRecommendation>,
pub bottlenecks: Vec<PerformanceBottleneck>,
pub resource_analysis: ResourceUsageAnalysis,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceSummary {
pub overall_score: f64,
pub features_meeting_targets: Vec<AdvancedFeature>,
pub features_missing_targets: Vec<AdvancedFeature>,
pub critical_issues: Vec<String>,
pub trends: HashMap<AdvancedFeature, PerformanceTrend>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceRecommendation {
pub id: String,
pub feature: Option<AdvancedFeature>,
pub recommendation_type: RecommendationType,
pub description: String,
pub expected_impact: f64,
pub difficulty: f64,
pub priority: RecommendationPriority,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum RecommendationType {
ReduceMemory,
OptimizeSpeed,
ImproveQuality,
EnableGpu,
AdjustConfiguration,
DisableFeatures,
OptimizeCombinations,
UpgradeHardware,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum RecommendationPriority {
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceBottleneck {
pub location: String,
pub feature: Option<AdvancedFeature>,
pub bottleneck_type: BottleneckType,
pub severity: f64,
pub impact: String,
pub solutions: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BottleneckType {
CpuProcessing,
MemoryBandwidth,
GpuProcessing,
IoOperations,
NetworkBandwidth,
FeatureInteraction,
ResourceContention,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResourceUsageAnalysis {
pub cpu_utilization: HashMap<AdvancedFeature, f64>,
pub memory_utilization: HashMap<AdvancedFeature, f64>,
pub gpu_utilization: HashMap<AdvancedFeature, f64>,
pub efficiency_scores: HashMap<AdvancedFeature, f64>,
pub waste_detection: Vec<ResourceWaste>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResourceWaste {
pub resource_type: String,
pub waste_amount: f64,
pub waste_percentage: f64,
pub cause: String,
pub suggested_fix: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PerformanceTrend {
Improving,
Stable,
Degrading,
Variable,
}
pub struct PerformanceScope<'a> {
monitor: &'a PerformanceMonitor,
operation_name: String,
start_time: Instant,
}
impl Default for PerformanceMonitor {
fn default() -> Self {
Self::new()
}
}
impl PerformanceMonitor {
pub fn new() -> Self {
Self {
metrics: Arc::new(Mutex::new(PerformanceMetrics::default())),
start_time: Instant::now(),
}
}
pub fn start_operation(&self, operation_name: &str) -> PerformanceScope<'_> {
PerformanceScope {
monitor: self,
operation_name: operation_name.to_string(),
start_time: Instant::now(),
}
}
pub fn record_synthesis(
&self,
processing_time: Duration,
audio_duration: Duration,
) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
metrics.total_syntheses += 1;
metrics.total_processing_time += processing_time;
metrics.average_synthesis_time =
metrics.total_processing_time / metrics.total_syntheses as u32;
let rtf = processing_time.as_secs_f64() / audio_duration.as_secs_f64();
self.update_rtf_stats(&mut metrics.rtf_stats, rtf);
Ok(())
}
pub fn update_memory_usage(&self, current_usage: u64) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
metrics.current_memory_usage = current_usage;
if current_usage > metrics.peak_memory_usage {
metrics.peak_memory_usage = current_usage;
}
Ok(())
}
pub fn record_cache_hit_rate(&self, hit_rate: f64) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
metrics.cache_hit_rate = hit_rate;
Ok(())
}
pub fn record_quality_metrics(&self, snr: f64, thd: f64, dynamic_range: f64) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
let count = metrics.total_syntheses as f64;
if count > 0.0 {
metrics.quality_metrics.average_snr =
(metrics.quality_metrics.average_snr * (count - 1.0) + snr) / count;
metrics.quality_metrics.average_thd =
(metrics.quality_metrics.average_thd * (count - 1.0) + thd) / count;
metrics.quality_metrics.average_dynamic_range =
(metrics.quality_metrics.average_dynamic_range * (count - 1.0) + dynamic_range)
/ count;
}
if snr < 20.0 || thd > 0.05 || dynamic_range < 30.0 {
metrics.quality_metrics.quality_warnings += 1;
}
Ok(())
}
pub fn get_metrics(&self) -> Result<PerformanceMetrics> {
let metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
Ok(metrics.clone())
}
pub fn generate_report(&self) -> Result<String> {
let metrics = self.get_metrics()?;
let uptime = self.start_time.elapsed();
Ok(format!(
"VoiRS SDK Performance Report\n\
===========================\n\
Uptime: {:.2}s\n\
Total Syntheses: {}\n\
Average Synthesis Time: {:.2}ms\n\
Peak Memory Usage: {:.2} MB\n\
Current Memory Usage: {:.2} MB\n\
Cache Hit Rate: {:.1}%\n\
Average RTF: {:.3}\n\
RTF Violations: {}\n\
Average SNR: {:.1} dB\n\
Average THD: {:.3}%\n\
Quality Warnings: {}\n",
uptime.as_secs_f64(),
metrics.total_syntheses,
metrics.average_synthesis_time.as_millis(),
metrics.peak_memory_usage as f64 / 1_048_576.0,
metrics.current_memory_usage as f64 / 1_048_576.0,
metrics.cache_hit_rate * 100.0,
metrics.rtf_stats.average_rtf,
metrics.rtf_stats.rtf_violations,
metrics.quality_metrics.average_snr,
metrics.quality_metrics.average_thd * 100.0,
metrics.quality_metrics.quality_warnings,
))
}
pub fn reset(&self) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
*metrics = PerformanceMetrics::default();
Ok(())
}
fn update_rtf_stats(&self, stats: &mut RealTimeFactorStats, rtf: f64) {
let count = stats.average_rtf;
if count == 0.0 {
stats.average_rtf = rtf;
stats.min_rtf = rtf;
stats.max_rtf = rtf;
} else {
stats.average_rtf = (stats.average_rtf + rtf) / 2.0;
stats.min_rtf = stats.min_rtf.min(rtf);
stats.max_rtf = stats.max_rtf.max(rtf);
}
if rtf > 1.0 {
stats.rtf_violations += 1;
}
stats.p95_rtf = stats.max_rtf * 0.95;
}
pub fn record_feature_operation(
&self,
feature: AdvancedFeature,
processing_time: Duration,
memory_usage: u64,
quality_score: f64,
success: bool,
) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
{
let feature_metrics = metrics
.feature_metrics
.entry(feature)
.or_insert_with(FeaturePerformanceMetrics::default);
feature_metrics.usage_count += 1;
feature_metrics.total_processing_time += processing_time;
feature_metrics.average_processing_time =
feature_metrics.total_processing_time / feature_metrics.usage_count as u32;
feature_metrics.memory_stats.current_memory = memory_usage;
if memory_usage > feature_metrics.memory_stats.peak_memory {
feature_metrics.memory_stats.peak_memory = memory_usage;
}
feature_metrics.memory_stats.average_memory =
(feature_metrics.memory_stats.average_memory * (feature_metrics.usage_count - 1)
+ memory_usage)
/ feature_metrics.usage_count;
let old_quality = feature_metrics.quality_stats.average_quality;
feature_metrics.quality_stats.average_quality =
(old_quality * (feature_metrics.usage_count - 1) as f64 + quality_score)
/ feature_metrics.usage_count as f64;
if quality_score < feature_metrics.quality_stats.min_quality {
feature_metrics.quality_stats.min_quality = quality_score;
}
if quality_score > feature_metrics.quality_stats.max_quality {
feature_metrics.quality_stats.max_quality = quality_score;
}
let total_ops = feature_metrics.usage_count as f64;
if success {
feature_metrics.success_rate =
(feature_metrics.success_rate * (total_ops - 1.0) + 1.0) / total_ops;
} else {
feature_metrics.error_rate =
(feature_metrics.error_rate * (total_ops - 1.0) + 1.0) / total_ops;
}
feature_metrics.success_rate = 1.0 - feature_metrics.error_rate;
}
let feature_metrics_clone = metrics
.feature_metrics
.get(&feature)
.ok_or_else(|| VoirsError::internal("PerformanceMonitor", "Feature metrics not found"))?
.clone();
self.update_feature_stats(&mut metrics.feature_stats, feature, &feature_metrics_clone)?;
Ok(())
}
pub fn record_feature_metric(
&self,
feature: AdvancedFeature,
metric_name: &str,
value: f64,
) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
let feature_metrics = metrics
.feature_metrics
.entry(feature)
.or_insert_with(FeaturePerformanceMetrics::default);
feature_metrics
.feature_specific_metrics
.insert(metric_name.to_string(), value);
Ok(())
}
pub fn analyze_feature_performance(&self) -> Result<FeaturePerformanceAnalysis> {
let metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
let summary = self.generate_performance_summary(&metrics)?;
let recommendations = self.generate_recommendations(&metrics)?;
let bottlenecks = self.identify_bottlenecks(&metrics)?;
let resource_analysis = self.analyze_resource_usage(&metrics)?;
Ok(FeaturePerformanceAnalysis {
summary,
recommendations,
bottlenecks,
resource_analysis,
})
}
pub fn get_feature_metrics(
&self,
feature: AdvancedFeature,
) -> Result<Option<FeaturePerformanceMetrics>> {
let metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
Ok(metrics.feature_metrics.get(&feature).cloned())
}
pub fn get_feature_stats(&self) -> Result<FeaturePerformanceStats> {
let metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
Ok(metrics.feature_stats.clone())
}
pub fn reset_feature_metrics(&self, feature: Option<AdvancedFeature>) -> Result<()> {
let mut metrics = self
.metrics
.lock()
.map_err(|_| VoirsError::internal("PerformanceMonitor", "Failed to lock metrics"))?;
if let Some(feature) = feature {
metrics.feature_metrics.remove(&feature);
} else {
metrics.feature_metrics.clear();
metrics.feature_stats = FeaturePerformanceStats::default();
}
Ok(())
}
fn update_feature_stats(
&self,
stats: &mut FeaturePerformanceStats,
feature: AdvancedFeature,
feature_metrics: &FeaturePerformanceMetrics,
) -> Result<()> {
stats.active_feature_count = stats.active_feature_count.max(1);
if stats.most_used_feature.is_none() || feature_metrics.usage_count > 0 {
stats.most_used_feature = Some(feature);
}
let avg_time = feature_metrics.average_processing_time.as_millis() as f64;
if stats.best_performing_feature.is_none() {
stats.best_performing_feature = Some(feature);
}
if stats.worst_performing_feature.is_none() {
stats.worst_performing_feature = Some(feature);
}
stats.average_feature_memory_overhead = (stats.average_feature_memory_overhead
+ feature_metrics.memory_stats.average_memory)
/ 2;
let processing_overhead = avg_time / 1000.0; stats.average_feature_processing_overhead =
(stats.average_feature_processing_overhead + processing_overhead) / 2.0;
Ok(())
}
fn generate_performance_summary(
&self,
metrics: &PerformanceMetrics,
) -> Result<PerformanceSummary> {
let mut features_meeting_targets = Vec::new();
let mut features_missing_targets = Vec::new();
let mut critical_issues = Vec::new();
let mut trends = HashMap::new();
for (feature, feature_metrics) in &metrics.feature_metrics {
let meets_targets = feature_metrics.average_processing_time.as_millis() < 500
&& feature_metrics.error_rate < 0.05
&& feature_metrics.quality_stats.average_quality > 0.8;
if meets_targets {
features_meeting_targets.push(*feature);
} else {
features_missing_targets.push(*feature);
}
let trend = if feature_metrics.error_rate > 0.1 {
PerformanceTrend::Degrading
} else if feature_metrics.quality_stats.average_quality > 0.9 {
PerformanceTrend::Improving
} else {
PerformanceTrend::Stable
};
trends.insert(*feature, trend);
if feature_metrics.error_rate > 0.2 {
critical_issues.push(format!(
"High error rate for {:?}: {:.1}%",
feature,
feature_metrics.error_rate * 100.0
));
}
if feature_metrics.average_processing_time.as_millis() > 2000 {
critical_issues.push(format!(
"High latency for {:?}: {}ms",
feature,
feature_metrics.average_processing_time.as_millis()
));
}
}
let total_features = metrics.feature_metrics.len() as f64;
let meeting_targets = features_meeting_targets.len() as f64;
let overall_score = if total_features > 0.0 {
meeting_targets / total_features
} else {
1.0
};
Ok(PerformanceSummary {
overall_score,
features_meeting_targets,
features_missing_targets,
critical_issues,
trends,
})
}
fn generate_recommendations(
&self,
metrics: &PerformanceMetrics,
) -> Result<Vec<PerformanceRecommendation>> {
let mut recommendations = Vec::new();
for (feature, feature_metrics) in &metrics.feature_metrics {
if feature_metrics.memory_stats.peak_memory > 1_000_000_000 {
recommendations.push(PerformanceRecommendation {
id: format!("mem_reduce_{:?}", feature),
feature: Some(*feature),
recommendation_type: RecommendationType::ReduceMemory,
description: format!("Feature {:?} is using high memory ({} MB). Consider optimizing memory usage.",
feature, feature_metrics.memory_stats.peak_memory / 1_000_000),
expected_impact: 0.3,
difficulty: 0.6,
priority: RecommendationPriority::Medium,
});
}
if feature_metrics.average_processing_time.as_millis() > 1000 {
recommendations.push(PerformanceRecommendation {
id: format!("speed_optimize_{:?}", feature),
feature: Some(*feature),
recommendation_type: RecommendationType::OptimizeSpeed,
description: format!("Feature {:?} has high processing latency ({}ms). Consider optimization or GPU acceleration.",
feature, feature_metrics.average_processing_time.as_millis()),
expected_impact: 0.5,
difficulty: 0.7,
priority: RecommendationPriority::High,
});
}
if feature_metrics.quality_stats.average_quality < 0.7 {
recommendations.push(PerformanceRecommendation {
id: format!("quality_improve_{:?}", feature),
feature: Some(*feature),
recommendation_type: RecommendationType::ImproveQuality,
description: format!("Feature {:?} has low quality score ({:.2}). Consider adjusting configuration.",
feature, feature_metrics.quality_stats.average_quality),
expected_impact: 0.4,
difficulty: 0.5,
priority: RecommendationPriority::Medium,
});
}
}
Ok(recommendations)
}
fn identify_bottlenecks(
&self,
metrics: &PerformanceMetrics,
) -> Result<Vec<PerformanceBottleneck>> {
let mut bottlenecks = Vec::new();
for (feature, feature_metrics) in &metrics.feature_metrics {
if feature_metrics.average_processing_time.as_millis() > 2000 {
bottlenecks.push(PerformanceBottleneck {
location: format!("Feature {:?} processing", feature),
feature: Some(*feature),
bottleneck_type: BottleneckType::CpuProcessing,
severity: 0.8,
impact: "High processing latency affects real-time performance".to_string(),
solutions: vec![
"Enable GPU acceleration if available".to_string(),
"Optimize algorithm implementation".to_string(),
"Reduce feature complexity".to_string(),
],
});
}
if feature_metrics.memory_stats.peak_memory > 2_000_000_000 {
bottlenecks.push(PerformanceBottleneck {
location: format!("Feature {:?} memory usage", feature),
feature: Some(*feature),
bottleneck_type: BottleneckType::MemoryBandwidth,
severity: 0.6,
impact: "High memory usage may cause system instability".to_string(),
solutions: vec![
"Implement memory pooling".to_string(),
"Optimize data structures".to_string(),
"Add memory usage limits".to_string(),
],
});
}
}
Ok(bottlenecks)
}
fn analyze_resource_usage(
&self,
metrics: &PerformanceMetrics,
) -> Result<ResourceUsageAnalysis> {
let mut cpu_utilization = HashMap::new();
let mut memory_utilization = HashMap::new();
let mut gpu_utilization = HashMap::new();
let mut efficiency_scores = HashMap::new();
let waste_detection = Vec::new();
for (feature, feature_metrics) in &metrics.feature_metrics {
let cpu_util =
(feature_metrics.average_processing_time.as_millis() as f64 / 1000.0).min(1.0);
cpu_utilization.insert(*feature, cpu_util);
let mem_util =
(feature_metrics.memory_stats.average_memory as f64 / 4_000_000_000.0).min(1.0); memory_utilization.insert(*feature, mem_util);
gpu_utilization.insert(*feature, 0.0);
let efficiency =
feature_metrics.quality_stats.average_quality / (cpu_util + mem_util + 0.1);
efficiency_scores.insert(*feature, efficiency);
}
Ok(ResourceUsageAnalysis {
cpu_utilization,
memory_utilization,
gpu_utilization,
efficiency_scores,
waste_detection,
})
}
}
impl<'a> Drop for PerformanceScope<'a> {
fn drop(&mut self) {
let elapsed = self.start_time.elapsed();
if let Ok(mut metrics) = self.monitor.metrics.lock() {
metrics
.component_timings
.insert(self.operation_name.clone(), elapsed);
}
}
}
impl Default for PerformanceMetrics {
fn default() -> Self {
Self {
total_syntheses: 0,
total_processing_time: Duration::ZERO,
average_synthesis_time: Duration::ZERO,
peak_memory_usage: 0,
current_memory_usage: 0,
cache_hit_rate: 0.0,
rtf_stats: RealTimeFactorStats::default(),
component_timings: HashMap::new(),
quality_metrics: QualityMetrics::default(),
feature_metrics: HashMap::new(),
feature_stats: FeaturePerformanceStats::default(),
}
}
}
impl Default for RealTimeFactorStats {
fn default() -> Self {
Self {
average_rtf: 0.0,
min_rtf: 0.0,
max_rtf: 0.0,
p95_rtf: 0.0,
rtf_violations: 0,
}
}
}
impl Default for QualityMetrics {
fn default() -> Self {
Self {
average_snr: 0.0,
average_thd: 0.0,
average_dynamic_range: 0.0,
quality_warnings: 0,
}
}
}
impl Default for FeaturePerformanceMetrics {
fn default() -> Self {
Self {
usage_count: 0,
total_processing_time: Duration::ZERO,
average_processing_time: Duration::ZERO,
memory_stats: FeatureMemoryStats::default(),
rtf_stats: RealTimeFactorStats::default(),
quality_stats: FeatureQualityStats::default(),
error_rate: 0.0,
success_rate: 1.0,
feature_specific_metrics: HashMap::new(),
}
}
}
impl Default for FeatureMemoryStats {
fn default() -> Self {
Self {
peak_memory: 0,
average_memory: 0,
current_memory: 0,
allocation_count: 0,
deallocation_count: 0,
}
}
}
impl Default for FeatureQualityStats {
fn default() -> Self {
Self {
average_quality: 1.0,
min_quality: 1.0,
max_quality: 1.0,
degradation_count: 0,
specific_metrics: HashMap::new(),
}
}
}
impl Default for FeaturePerformanceStats {
fn default() -> Self {
Self {
active_feature_count: 0,
most_used_feature: None,
best_performing_feature: None,
worst_performing_feature: None,
average_feature_memory_overhead: 0,
average_feature_processing_overhead: 0.0,
combination_impact: HashMap::new(),
category_utilization: HashMap::new(),
}
}
}
#[macro_export]
macro_rules! measure_performance {
($monitor:expr, $operation:expr, $code:block) => {{
let _scope = $monitor.start_operation($operation);
$code
}};
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
#[test]
fn test_performance_monitor_creation() {
let monitor = PerformanceMonitor::new();
let metrics = monitor.get_metrics().unwrap();
assert_eq!(metrics.total_syntheses, 0);
}
#[test]
fn test_synthesis_recording() {
let monitor = PerformanceMonitor::new();
monitor
.record_synthesis(Duration::from_millis(100), Duration::from_millis(1000))
.unwrap();
let metrics = monitor.get_metrics().unwrap();
assert_eq!(metrics.total_syntheses, 1);
assert_eq!(metrics.total_processing_time, Duration::from_millis(100));
}
#[test]
fn test_memory_tracking() {
let monitor = PerformanceMonitor::new();
monitor.update_memory_usage(1024).unwrap();
monitor.update_memory_usage(2048).unwrap();
let metrics = monitor.get_metrics().unwrap();
assert_eq!(metrics.current_memory_usage, 2048);
assert_eq!(metrics.peak_memory_usage, 2048);
}
#[test]
fn test_performance_scope() {
let monitor = PerformanceMonitor::new();
{
let _scope = monitor.start_operation("test_operation");
thread::sleep(Duration::from_millis(1));
}
let metrics = monitor.get_metrics().unwrap();
assert!(metrics.component_timings.contains_key("test_operation"));
}
#[test]
fn test_quality_metrics() {
let monitor = PerformanceMonitor::new();
monitor
.record_synthesis(Duration::from_millis(100), Duration::from_millis(1000))
.unwrap();
monitor.record_quality_metrics(25.0, 0.02, 40.0).unwrap();
let metrics = monitor.get_metrics().unwrap();
assert_eq!(metrics.quality_metrics.average_snr, 25.0);
assert_eq!(metrics.quality_metrics.quality_warnings, 0);
}
#[test]
fn test_performance_report() {
let monitor = PerformanceMonitor::new();
monitor
.record_synthesis(Duration::from_millis(100), Duration::from_millis(1000))
.unwrap();
let report = monitor.generate_report().unwrap();
assert!(report.contains("VoiRS SDK Performance Report"));
assert!(report.contains("Total Syntheses: 1"));
}
#[test]
fn test_reset_metrics() {
let monitor = PerformanceMonitor::new();
monitor
.record_synthesis(Duration::from_millis(100), Duration::from_millis(1000))
.unwrap();
monitor.reset().unwrap();
let metrics = monitor.get_metrics().unwrap();
assert_eq!(metrics.total_syntheses, 0);
}
}