use super::{ProfileSession, StageMetrics};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum BottleneckSeverity {
Low,
Medium,
High,
Critical,
}
impl BottleneckSeverity {
pub fn threshold(&self) -> f64 {
match self {
Self::Low => 10.0, Self::Medium => 25.0, Self::High => 50.0, Self::Critical => 100.0, }
}
pub fn color(&self) -> &'static str {
match self {
Self::Low => "yellow",
Self::Medium => "orange",
Self::High => "red",
Self::Critical => "darkred",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Bottleneck {
pub component: String,
pub severity: BottleneckSeverity,
pub slowdown_percent: f64,
pub expected_duration_ms: f64,
pub actual_duration_ms: f64,
pub recommendation: String,
pub impact_description: String,
}
impl Bottleneck {
pub fn new(
component: impl Into<String>,
severity: BottleneckSeverity,
slowdown_percent: f64,
expected_ms: f64,
actual_ms: f64,
) -> Self {
Self {
component: component.into(),
severity,
slowdown_percent,
expected_duration_ms: expected_ms,
actual_duration_ms: actual_ms,
recommendation: String::new(),
impact_description: String::new(),
}
}
pub fn with_recommendation(mut self, recommendation: impl Into<String>) -> Self {
self.recommendation = recommendation.into();
self
}
pub fn with_impact(mut self, impact: impl Into<String>) -> Self {
self.impact_description = impact.into();
self
}
pub fn summary(&self) -> String {
format!(
"[{:?}] {} is {:.1}% slower than expected ({:.1}ms vs {:.1}ms expected)",
self.severity,
self.component,
self.slowdown_percent,
self.actual_duration_ms,
self.expected_duration_ms
)
}
}
pub struct BottleneckDetector {
regression_threshold: f64,
}
impl BottleneckDetector {
pub fn new(regression_threshold: f64) -> Self {
Self {
regression_threshold,
}
}
pub async fn detect(&self, session: &ProfileSession) -> Vec<Bottleneck> {
let mut bottlenecks = Vec::new();
for (stage_name, metrics) in &session.stage_metrics {
if let Some(bottleneck) = self.analyze_stage_metrics(stage_name, metrics).await {
bottlenecks.push(bottleneck);
}
}
if !session.memory_snapshots.is_empty() {
if let Some(bottleneck) = self.analyze_memory_usage(session).await {
bottlenecks.push(bottleneck);
}
}
bottlenecks.sort_by(|a, b| {
b.slowdown_percent
.partial_cmp(&a.slowdown_percent)
.unwrap_or(std::cmp::Ordering::Equal)
});
bottlenecks
}
async fn analyze_stage_metrics(
&self,
stage_name: &str,
metrics: &StageMetrics,
) -> Option<Bottleneck> {
let expected_percent = metrics.stage.expected_percentage();
let total_duration_ms = metrics.total_duration.as_secs_f64() * 1000.0;
let session_total_ms = total_duration_ms / (metrics.percentage_of_total / 100.0);
let expected_ms = session_total_ms * (expected_percent / 100.0);
let actual_ms = total_duration_ms / metrics.execution_count as f64;
let slowdown_percent = if expected_ms > 0.0 {
((actual_ms - expected_ms) / expected_ms) * 100.0
} else {
0.0
};
if slowdown_percent > self.regression_threshold {
let severity = self.determine_severity(slowdown_percent);
let recommendation = self.get_stage_recommendation(stage_name, slowdown_percent);
let impact = format!(
"This stage is taking {:.1}% of total pipeline time (expected: {:.1}%)",
metrics.percentage_of_total, expected_percent
);
Some(
Bottleneck::new(
stage_name,
severity,
slowdown_percent,
expected_ms,
actual_ms,
)
.with_recommendation(recommendation)
.with_impact(impact),
)
} else {
None
}
}
async fn analyze_memory_usage(&self, session: &ProfileSession) -> Option<Bottleneck> {
let snapshots = &session.memory_snapshots;
if snapshots.is_empty() {
return None;
}
let first_memory = snapshots.first()?.allocated_bytes;
let peak_memory = snapshots.iter().map(|s| s.allocated_bytes).max()?;
let memory_growth = peak_memory.saturating_sub(first_memory);
let growth_percent = if first_memory > 0 {
(memory_growth as f64 / first_memory as f64) * 100.0
} else {
0.0
};
if growth_percent > 200.0 {
let severity = if growth_percent > 500.0 {
BottleneckSeverity::Critical
} else if growth_percent > 300.0 {
BottleneckSeverity::High
} else {
BottleneckSeverity::Medium
};
Some(
Bottleneck::new(
"Memory Usage",
severity,
growth_percent,
first_memory as f64 / 1_048_576.0, peak_memory as f64 / 1_048_576.0, )
.with_recommendation(
"Consider implementing streaming or batching to reduce memory footprint",
)
.with_impact("Excessive memory usage may lead to OOM errors or swapping"),
)
} else {
None
}
}
fn determine_severity(&self, slowdown_percent: f64) -> BottleneckSeverity {
if slowdown_percent >= 100.0 {
BottleneckSeverity::Critical
} else if slowdown_percent >= 50.0 {
BottleneckSeverity::High
} else if slowdown_percent >= 25.0 {
BottleneckSeverity::Medium
} else {
BottleneckSeverity::Low
}
}
fn get_stage_recommendation(&self, stage_name: &str, _slowdown: f64) -> String {
match stage_name {
name if name.contains("G2P") || name.contains("g2p") => {
"Consider caching G2P results or using a faster G2P backend".to_string()
}
name if name.contains("Acoustic") || name.contains("acoustic") => {
"Enable GPU acceleration or use a smaller acoustic model".to_string()
}
name if name.contains("Vocoder") || name.contains("vocoder") => {
"Enable GPU acceleration or reduce vocoder quality settings".to_string()
}
name if name.contains("Memory") => {
"Implement batching or streaming to reduce peak memory usage".to_string()
}
_ => "Profile this component in more detail to identify specific optimizations"
.to_string(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::profiling::PipelineStage;
#[test]
fn test_bottleneck_severity_threshold() {
assert_eq!(BottleneckSeverity::Low.threshold(), 10.0);
assert_eq!(BottleneckSeverity::Medium.threshold(), 25.0);
assert_eq!(BottleneckSeverity::High.threshold(), 50.0);
assert_eq!(BottleneckSeverity::Critical.threshold(), 100.0);
}
#[test]
fn test_bottleneck_creation() {
let bottleneck = Bottleneck::new(
"Test Component",
BottleneckSeverity::High,
75.0,
100.0,
175.0,
);
assert_eq!(bottleneck.component, "Test Component");
assert_eq!(bottleneck.severity, BottleneckSeverity::High);
assert_eq!(bottleneck.slowdown_percent, 75.0);
}
#[test]
fn test_bottleneck_with_recommendation() {
let bottleneck = Bottleneck::new("Test", BottleneckSeverity::Low, 15.0, 100.0, 115.0)
.with_recommendation("Enable GPU")
.with_impact("Minor performance impact");
assert_eq!(bottleneck.recommendation, "Enable GPU");
assert_eq!(bottleneck.impact_description, "Minor performance impact");
}
#[test]
fn test_bottleneck_summary() {
let bottleneck = Bottleneck::new("Vocoder", BottleneckSeverity::High, 50.0, 100.0, 150.0);
let summary = bottleneck.summary();
assert!(summary.contains("Vocoder"));
assert!(summary.contains("50.0%"));
}
#[tokio::test]
async fn test_bottleneck_detector_creation() {
let detector = BottleneckDetector::new(10.0);
assert_eq!(detector.regression_threshold, 10.0);
}
#[tokio::test]
async fn test_severity_determination() {
let detector = BottleneckDetector::new(10.0);
assert_eq!(detector.determine_severity(5.0), BottleneckSeverity::Low);
assert_eq!(
detector.determine_severity(30.0),
BottleneckSeverity::Medium
);
assert_eq!(detector.determine_severity(60.0), BottleneckSeverity::High);
assert_eq!(
detector.determine_severity(150.0),
BottleneckSeverity::Critical
);
}
}