use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tokio::sync::{RwLock, Semaphore};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamingOptimizerConfig {
pub enabled: bool,
pub target_latency_ms: u64,
pub max_latency_ms: u64,
pub buffer_config: BufferConfig,
pub quality_adaptation: QualityAdaptationConfig,
pub prefetching: PrefetchingConfig,
pub chunk_processing: ChunkProcessingConfig,
pub pipeline_optimization: PipelineOptimizationConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BufferConfig {
pub initial_buffer_ms: u64,
pub min_buffer_ms: u64,
pub max_buffer_ms: u64,
pub adaptation_sensitivity: f64,
pub adaptive_buffering: bool,
pub underrun_recovery: UnderrunRecoveryStrategy,
pub monitoring_interval_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QualityAdaptationConfig {
pub enabled: bool,
pub quality_levels: Vec<QualityLevel>,
pub adaptation_threshold_ms: u64,
pub adjustment_aggressiveness: f64,
pub min_quality_level: usize,
pub recovery_speed: QualityRecoverySpeed,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PrefetchingConfig {
pub enabled: bool,
pub lookahead_chars: usize,
pub trigger_threshold: f64,
pub max_concurrent_prefetch: usize,
pub cache_size_mb: u32,
pub strategy: PrefetchStrategy,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChunkProcessingConfig {
pub chunk_size_chars: usize,
pub chunk_overlap_chars: usize,
pub parallel_processing: bool,
pub max_parallel_chunks: usize,
pub priority_scheduling: bool,
pub dynamic_sizing: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PipelineOptimizationConfig {
pub pipeline_parallel: bool,
pub pipeline_stages: usize,
pub stage_skipping: bool,
pub cpu_affinity: bool,
pub gpu_acceleration: bool,
pub memory_optimization: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QualityLevel {
pub level: usize,
pub name: String,
pub synthesis_time_multiplier: f64,
pub quality_score: f64,
pub memory_multiplier: f64,
pub cpu_multiplier: f64,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum UnderrunRecoveryStrategy {
IncreaseBuffer,
ReduceQuality,
SkipFrames,
Hybrid,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum QualityRecoverySpeed {
Conservative,
Moderate,
Aggressive,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum PrefetchStrategy {
Linear,
Predictive,
Adaptive,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamingMetrics {
pub current_latency_ms: u64,
pub average_latency_ms: f64,
pub latency_p95_ms: u64,
pub latency_p99_ms: u64,
pub buffer_fill_percent: f64,
pub buffer_underruns: u64,
pub current_quality_level: usize,
pub quality_adaptations: u64,
pub prefetch_hit_rate: f64,
pub chunk_throughput: f64,
pub pipeline_efficiency: f64,
pub real_time_factor: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamingOptimizationResult {
pub optimization: StreamingOptimization,
pub latency_improvement_ms: i64,
pub quality_impact: f64,
pub memory_impact_bytes: i64,
pub cpu_impact_percent: f64,
pub success: bool,
pub error: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum StreamingOptimization {
AdaptiveBuffering,
QualityAdjustment,
PrefetchOptimization,
ChunkSizeOptimization,
PipelineParallelization,
MemoryOptimization,
CpuAffinityOptimization,
GpuAcceleration,
}
pub struct StreamingOptimizer {
config: StreamingOptimizerConfig,
metrics: Arc<RwLock<StreamingMetrics>>,
latency_history: Arc<RwLock<VecDeque<LatencyMeasurement>>>,
buffer_state: Arc<RwLock<BufferState>>,
quality_state: Arc<RwLock<QualityState>>,
prefetch_cache: Arc<RwLock<PrefetchCache>>,
optimization_history: Arc<RwLock<VecDeque<StreamingOptimizationResult>>>,
is_running: Arc<RwLock<bool>>,
processing_semaphore: Arc<Semaphore>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct LatencyMeasurement {
timestamp: u64,
latency_ms: u64,
quality_level: usize,
buffer_fill: f64,
context: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct BufferState {
current_size_ms: u64,
fill_percentage: f64,
last_underrun: Option<u64>,
underrun_count: u64,
adaptation_history: VecDeque<BufferAdaptation>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct BufferAdaptation {
timestamp: u64,
old_size_ms: u64,
new_size_ms: u64,
reason: String,
success: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct QualityState {
current_level: usize,
level_history: VecDeque<QualityChange>,
last_change: Option<u64>,
adaptation_stats: QualityAdaptationStats,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct QualityChange {
timestamp: u64,
old_level: usize,
new_level: usize,
trigger: String,
latency_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct QualityAdaptationStats {
total_adaptations: u64,
successful_adaptations: u64,
avg_latency_improvement_ms: f64,
stability_score: f64,
}
#[derive(Debug)]
struct PrefetchCache {
cache: HashMap<String, CachedSynthesis>,
current_size_bytes: u64,
max_size_bytes: u64,
hits: u64,
misses: u64,
lru_order: VecDeque<String>,
}
#[derive(Debug, Clone)]
struct CachedSynthesis {
key: String,
audio_data: Vec<u8>,
timestamp: u64,
quality_level: usize,
access_count: u64,
}
impl StreamingOptimizer {
pub fn new(config: StreamingOptimizerConfig) -> Self {
let processing_permits = config.chunk_processing.max_parallel_chunks;
Self {
config,
metrics: Arc::new(RwLock::new(StreamingMetrics::default())),
latency_history: Arc::new(RwLock::new(VecDeque::with_capacity(1000))),
buffer_state: Arc::new(RwLock::new(BufferState::default())),
quality_state: Arc::new(RwLock::new(QualityState::default())),
prefetch_cache: Arc::new(RwLock::new(PrefetchCache::default())),
optimization_history: Arc::new(RwLock::new(VecDeque::with_capacity(100))),
is_running: Arc::new(RwLock::new(false)),
processing_semaphore: Arc::new(Semaphore::new(processing_permits)),
}
}
pub async fn start(&self) -> Result<(), Box<dyn std::error::Error>> {
let mut is_running = self.is_running.write().await;
if *is_running {
return Ok(());
}
*is_running = true;
drop(is_running);
tracing::info!("Starting streaming optimizer");
self.initialize_quality_state().await;
self.start_latency_monitoring().await;
self.start_buffer_monitoring().await;
self.start_quality_adaptation().await;
self.start_prefetch_management().await;
Ok(())
}
pub async fn stop(&self) -> Result<(), Box<dyn std::error::Error>> {
let mut is_running = self.is_running.write().await;
if !*is_running {
return Ok(());
}
*is_running = false;
tracing::info!("Stopped streaming optimizer");
Ok(())
}
pub async fn record_latency(&self, latency_ms: u64, context: String) {
let measurement = LatencyMeasurement {
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
latency_ms,
quality_level: self.get_current_quality_level().await,
buffer_fill: self.get_buffer_fill_percentage().await,
context,
};
let mut history = self.latency_history.write().await;
history.push_back(measurement);
if history.len() > 1000 {
history.pop_front();
}
let mut metrics = self.metrics.write().await;
metrics.current_latency_ms = latency_ms;
let recent_measurements: Vec<u64> = history
.iter()
.rev()
.take(60) .map(|m| m.latency_ms)
.collect();
if !recent_measurements.is_empty() {
metrics.average_latency_ms =
recent_measurements.iter().sum::<u64>() as f64 / recent_measurements.len() as f64;
}
if latency_ms > self.config.max_latency_ms {
self.trigger_latency_optimization().await;
}
}
pub async fn get_performance_recommendations(&self) -> Vec<StreamingRecommendation> {
let mut recommendations = Vec::new();
let metrics = self.metrics.read().await;
let buffer_state = self.buffer_state.read().await;
let quality_state = self.quality_state.read().await;
if metrics.current_latency_ms > self.config.target_latency_ms {
let excess_latency = metrics.current_latency_ms - self.config.target_latency_ms;
recommendations.push(StreamingRecommendation {
optimization: StreamingOptimization::QualityAdjustment,
priority: if excess_latency > 100 { 9 } else { 6 },
description: format!("Latency {} ms above target", excess_latency),
expected_improvement_ms: (excess_latency as f64 * 0.6) as u64,
quality_impact: -0.2,
implementation_complexity: ImplementationComplexity::Low,
});
}
if buffer_state.underrun_count > 0 {
recommendations.push(StreamingRecommendation {
optimization: StreamingOptimization::AdaptiveBuffering,
priority: 8,
description: format!("{} buffer underruns detected", buffer_state.underrun_count),
expected_improvement_ms: 50,
quality_impact: 0.0,
implementation_complexity: ImplementationComplexity::Medium,
});
}
if metrics.prefetch_hit_rate < 70.0 {
recommendations.push(StreamingRecommendation {
optimization: StreamingOptimization::PrefetchOptimization,
priority: 5,
description: format!("Low prefetch hit rate: {:.1}%", metrics.prefetch_hit_rate),
expected_improvement_ms: 30,
quality_impact: 0.1,
implementation_complexity: ImplementationComplexity::High,
});
}
if metrics.pipeline_efficiency < 80.0 {
recommendations.push(StreamingRecommendation {
optimization: StreamingOptimization::PipelineParallelization,
priority: 7,
description: format!(
"Pipeline efficiency low: {:.1}%",
metrics.pipeline_efficiency
),
expected_improvement_ms: 40,
quality_impact: 0.0,
implementation_complexity: ImplementationComplexity::High,
});
}
recommendations.sort_by(|a, b| b.priority.cmp(&a.priority));
recommendations
}
pub async fn apply_optimization(
&self,
optimization: StreamingOptimization,
) -> StreamingOptimizationResult {
let start_time = Instant::now();
let result = match optimization {
StreamingOptimization::AdaptiveBuffering => self.optimize_adaptive_buffering().await,
StreamingOptimization::QualityAdjustment => self.optimize_quality_adjustment().await,
StreamingOptimization::PrefetchOptimization => self.optimize_prefetching().await,
StreamingOptimization::ChunkSizeOptimization => self.optimize_chunk_size().await,
StreamingOptimization::PipelineParallelization => {
self.optimize_pipeline_parallelization().await
}
StreamingOptimization::MemoryOptimization => self.optimize_memory_usage().await,
StreamingOptimization::CpuAffinityOptimization => self.optimize_cpu_affinity().await,
StreamingOptimization::GpuAcceleration => self.optimize_gpu_acceleration().await,
};
let optimization_result = StreamingOptimizationResult {
optimization,
latency_improvement_ms: result.0,
quality_impact: result.1,
memory_impact_bytes: result.2,
cpu_impact_percent: result.3,
success: result.4,
error: result.5,
};
let mut history = self.optimization_history.write().await;
history.push_back(optimization_result.clone());
if history.len() > 100 {
history.pop_front();
}
optimization_result
}
pub async fn get_metrics(&self) -> StreamingMetrics {
self.metrics.read().await.clone()
}
async fn initialize_quality_state(&self) {
let mut quality_state = self.quality_state.write().await;
let default_level = self.config.quality_adaptation.quality_levels.len() / 2; quality_state.current_level = default_level;
}
async fn start_latency_monitoring(&self) {
let is_running = self.is_running.clone();
let metrics = self.metrics.clone();
let latency_history = self.latency_history.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_millis(100));
loop {
interval.tick().await;
let running = is_running.read().await;
if !*running {
break;
}
drop(running);
let history = latency_history.read().await;
if history.len() >= 20 {
let mut recent_latencies: Vec<u64> = history
.iter()
.rev()
.take(100)
.map(|m| m.latency_ms)
.collect();
recent_latencies.sort_unstable();
let p95_index = (recent_latencies.len() as f64 * 0.95) as usize;
let p99_index = (recent_latencies.len() as f64 * 0.99) as usize;
let mut metrics = metrics.write().await;
metrics.latency_p95_ms = recent_latencies.get(p95_index).cloned().unwrap_or(0);
metrics.latency_p99_ms = recent_latencies.get(p99_index).cloned().unwrap_or(0);
}
}
});
}
async fn start_buffer_monitoring(&self) {
let is_running = self.is_running.clone();
let buffer_state = self.buffer_state.clone();
let metrics = self.metrics.clone();
let config = self.config.buffer_config.clone();
tokio::spawn(async move {
let mut interval =
tokio::time::interval(Duration::from_millis(config.monitoring_interval_ms));
loop {
interval.tick().await;
let running = is_running.read().await;
if !*running {
break;
}
drop(running);
let buffer = buffer_state.read().await;
let mut metrics = metrics.write().await;
metrics.buffer_fill_percent = buffer.fill_percentage;
metrics.buffer_underruns = buffer.underrun_count;
}
});
}
async fn start_quality_adaptation(&self) {
let is_running = self.is_running.clone();
let quality_state = self.quality_state.clone();
let metrics = self.metrics.clone();
let config = self.config.quality_adaptation.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_millis(500));
loop {
interval.tick().await;
let running = is_running.read().await;
if !*running {
break;
}
drop(running);
if !config.enabled {
continue;
}
let current_metrics = metrics.read().await;
let mut quality = quality_state.write().await;
if current_metrics.current_latency_ms > config.adaptation_threshold_ms {
if quality.current_level > config.min_quality_level {
let old_level = quality.current_level;
let new_level = (quality.current_level - 1).max(config.min_quality_level);
quality.current_level = new_level;
quality.level_history.push_back(QualityChange {
timestamp: SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
old_level,
new_level,
trigger: "high_latency".to_string(),
latency_ms: current_metrics.current_latency_ms,
});
quality.adaptation_stats.total_adaptations += 1;
tracing::info!(
"Reduced quality level from {} to {} due to high latency",
old_level,
quality.current_level
);
}
}
}
});
}
async fn start_prefetch_management(&self) {
let is_running = self.is_running.clone();
let prefetch_cache = self.prefetch_cache.clone();
let config = self.config.prefetching.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
loop {
interval.tick().await;
let running = is_running.read().await;
if !*running {
break;
}
drop(running);
if !config.enabled {
continue;
}
let mut cache = prefetch_cache.write().await;
let current_timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let expiry_seconds = 300u64;
let expired_keys: Vec<String> = cache
.cache
.iter()
.filter(|(_, entry)| {
current_timestamp.saturating_sub(entry.timestamp) > expiry_seconds
})
.map(|(key, _)| key.clone())
.collect();
for key in expired_keys {
if let Some(entry) = cache.cache.remove(&key) {
cache.current_size_bytes -= entry.audio_data.len() as u64;
cache.lru_order.retain(|k| k != &key);
}
}
}
});
}
async fn trigger_latency_optimization(&self) {
tracing::warn!("High latency detected, triggering optimization");
let _ = self
.apply_optimization(StreamingOptimization::QualityAdjustment)
.await;
}
async fn get_current_quality_level(&self) -> usize {
self.quality_state.read().await.current_level
}
async fn get_buffer_fill_percentage(&self) -> f64 {
self.buffer_state.read().await.fill_percentage
}
async fn optimize_adaptive_buffering(&self) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing adaptive buffering");
(25, 0.0, 1024 * 1024, 5.0, true, None) }
async fn optimize_quality_adjustment(&self) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing quality adjustment");
(60, -0.15, -512 * 1024, -10.0, true, None) }
async fn optimize_prefetching(&self) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing prefetching");
(35, 0.05, 2 * 1024 * 1024, 8.0, true, None) }
async fn optimize_chunk_size(&self) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing chunk size");
(20, 0.0, 0, 3.0, true, None) }
async fn optimize_pipeline_parallelization(
&self,
) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing pipeline parallelization");
(45, 0.0, 512 * 1024, 15.0, true, None) }
async fn optimize_memory_usage(&self) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing memory usage");
(15, 0.0, -1024 * 1024, -2.0, true, None) }
async fn optimize_cpu_affinity(&self) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing CPU affinity");
(30, 0.0, 0, -5.0, true, None) }
async fn optimize_gpu_acceleration(&self) -> (i64, f64, i64, f64, bool, Option<String>) {
tracing::info!("Optimizing GPU acceleration");
(80, 0.1, 4 * 1024 * 1024, -20.0, true, None) }
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamingRecommendation {
pub optimization: StreamingOptimization,
pub priority: u8,
pub description: String,
pub expected_improvement_ms: u64,
pub quality_impact: f64,
pub implementation_complexity: ImplementationComplexity,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum ImplementationComplexity {
Low,
Medium,
High,
}
impl Default for StreamingMetrics {
fn default() -> Self {
Self {
current_latency_ms: 0,
average_latency_ms: 0.0,
latency_p95_ms: 0,
latency_p99_ms: 0,
buffer_fill_percent: 50.0,
buffer_underruns: 0,
current_quality_level: 2,
quality_adaptations: 0,
prefetch_hit_rate: 0.0,
chunk_throughput: 0.0,
pipeline_efficiency: 100.0,
real_time_factor: 1.0,
}
}
}
impl Default for BufferState {
fn default() -> Self {
Self {
current_size_ms: 200,
fill_percentage: 50.0,
last_underrun: None,
underrun_count: 0,
adaptation_history: VecDeque::new(),
}
}
}
impl Default for QualityState {
fn default() -> Self {
Self {
current_level: 2,
level_history: VecDeque::new(),
last_change: None,
adaptation_stats: QualityAdaptationStats {
total_adaptations: 0,
successful_adaptations: 0,
avg_latency_improvement_ms: 0.0,
stability_score: 100.0,
},
}
}
}
impl Default for PrefetchCache {
fn default() -> Self {
Self {
cache: HashMap::new(),
current_size_bytes: 0,
max_size_bytes: 100 * 1024 * 1024, hits: 0,
misses: 0,
lru_order: VecDeque::new(),
}
}
}
impl Default for StreamingOptimizerConfig {
fn default() -> Self {
Self {
enabled: true,
target_latency_ms: 100,
max_latency_ms: 200,
buffer_config: BufferConfig::default(),
quality_adaptation: QualityAdaptationConfig::default(),
prefetching: PrefetchingConfig::default(),
chunk_processing: ChunkProcessingConfig::default(),
pipeline_optimization: PipelineOptimizationConfig::default(),
}
}
}
impl Default for BufferConfig {
fn default() -> Self {
Self {
initial_buffer_ms: 200,
min_buffer_ms: 50,
max_buffer_ms: 1000,
adaptation_sensitivity: 0.7,
adaptive_buffering: true,
underrun_recovery: UnderrunRecoveryStrategy::Hybrid,
monitoring_interval_ms: 100,
}
}
}
impl Default for QualityAdaptationConfig {
fn default() -> Self {
Self {
enabled: true,
quality_levels: vec![
QualityLevel {
level: 0,
name: "Low".to_string(),
synthesis_time_multiplier: 0.5,
quality_score: 0.6,
memory_multiplier: 0.7,
cpu_multiplier: 0.6,
},
QualityLevel {
level: 1,
name: "Medium".to_string(),
synthesis_time_multiplier: 0.8,
quality_score: 0.8,
memory_multiplier: 0.9,
cpu_multiplier: 0.8,
},
QualityLevel {
level: 2,
name: "High".to_string(),
synthesis_time_multiplier: 1.0,
quality_score: 1.0,
memory_multiplier: 1.0,
cpu_multiplier: 1.0,
},
QualityLevel {
level: 3,
name: "Ultra".to_string(),
synthesis_time_multiplier: 1.5,
quality_score: 1.0,
memory_multiplier: 1.3,
cpu_multiplier: 1.4,
},
],
adaptation_threshold_ms: 150,
adjustment_aggressiveness: 0.6,
min_quality_level: 0,
recovery_speed: QualityRecoverySpeed::Moderate,
}
}
}
impl Default for PrefetchingConfig {
fn default() -> Self {
Self {
enabled: true,
lookahead_chars: 200,
trigger_threshold: 0.3,
max_concurrent_prefetch: 3,
cache_size_mb: 50,
strategy: PrefetchStrategy::Adaptive,
}
}
}
impl Default for ChunkProcessingConfig {
fn default() -> Self {
Self {
chunk_size_chars: 100,
chunk_overlap_chars: 10,
parallel_processing: true,
max_parallel_chunks: 4,
priority_scheduling: true,
dynamic_sizing: true,
}
}
}
impl Default for PipelineOptimizationConfig {
fn default() -> Self {
Self {
pipeline_parallel: true,
pipeline_stages: 4,
stage_skipping: false,
cpu_affinity: true,
gpu_acceleration: false,
memory_optimization: true,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_streaming_optimizer_creation() {
let config = StreamingOptimizerConfig::default();
let optimizer = StreamingOptimizer::new(config);
assert!(!*optimizer.is_running.read().await);
}
#[tokio::test]
async fn test_latency_recording() {
let config = StreamingOptimizerConfig::default();
let optimizer = StreamingOptimizer::new(config);
optimizer
.record_latency(150, "test_context".to_string())
.await;
let metrics = optimizer.get_metrics().await;
assert_eq!(metrics.current_latency_ms, 150);
}
#[tokio::test]
async fn test_performance_recommendations() {
let config = StreamingOptimizerConfig::default();
let optimizer = StreamingOptimizer::new(config);
optimizer.record_latency(300, "test".to_string()).await;
let recommendations = optimizer.get_performance_recommendations().await;
assert!(!recommendations.is_empty());
assert!(recommendations
.iter()
.any(|r| r.optimization == StreamingOptimization::QualityAdjustment));
}
#[tokio::test]
async fn test_optimization_application() {
let config = StreamingOptimizerConfig::default();
let optimizer = StreamingOptimizer::new(config);
let result = optimizer
.apply_optimization(StreamingOptimization::AdaptiveBuffering)
.await;
assert!(result.success);
assert!(result.latency_improvement_ms > 0);
}
#[test]
fn test_config_defaults() {
let config = StreamingOptimizerConfig::default();
assert!(config.enabled);
assert_eq!(config.target_latency_ms, 100);
assert_eq!(config.max_latency_ms, 200);
assert!(config.quality_adaptation.enabled);
}
#[test]
fn test_quality_levels() {
let config = QualityAdaptationConfig::default();
assert_eq!(config.quality_levels.len(), 4);
assert_eq!(config.quality_levels[0].name, "Low");
assert_eq!(config.quality_levels[3].name, "Ultra");
}
}