use super::types::*;
use crate::pipeline::conversational::types::TrendDirection;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamingQuality {
pub smoothness: f32,
pub naturalness: f32,
pub responsiveness: f32,
pub coherence: f32,
pub overall_quality: f32,
pub chunk_consistency: f32,
pub flow_smoothness: f32,
pub timing_accuracy: f32,
pub buffer_efficiency: f32,
}
impl Default for StreamingQuality {
fn default() -> Self {
Self {
smoothness: 0.8,
naturalness: 0.8,
responsiveness: 0.8,
coherence: 0.8,
overall_quality: 0.8,
chunk_consistency: 0.8,
flow_smoothness: 0.8,
timing_accuracy: 0.8,
buffer_efficiency: 0.8,
}
}
}
#[derive(Debug, Clone)]
pub struct QualityMeasurement {
pub timestamp: Instant,
pub smoothness: f32,
pub naturalness: f32,
pub responsiveness: f32,
pub coherence: f32,
pub latency_ms: f64,
pub chunk_consistency: f32,
pub score: f32,
pub confidence: f32,
pub trend: TrendDirection,
}
#[derive(Debug, Clone)]
pub struct QualityThresholds {
pub min_smoothness: f32,
pub min_naturalness: f32,
pub min_responsiveness: f32,
pub min_coherence: f32,
pub max_latency_ms: f64,
pub min_overall_quality: f32,
pub minimum_acceptable: f32,
pub target_quality: f32,
pub excellent_threshold: f32,
pub degradation_threshold: f32,
}
impl Default for QualityThresholds {
fn default() -> Self {
Self {
min_smoothness: 0.7,
min_naturalness: 0.6,
min_responsiveness: 0.8,
min_coherence: 0.7,
max_latency_ms: 200.0,
min_overall_quality: 0.7,
minimum_acceptable: 0.6,
target_quality: 0.8,
excellent_threshold: 0.9,
degradation_threshold: 0.5,
}
}
}
#[derive(Debug, Clone)]
pub struct QualityTrends {
pub overall_trend: TrendDirection,
pub smoothness_trend: TrendDirection,
pub naturalness_trend: TrendDirection,
pub responsiveness_trend: TrendDirection,
pub coherence_trend: TrendDirection,
}
impl Default for QualityTrends {
fn default() -> Self {
Self {
overall_trend: TrendDirection::Stable,
smoothness_trend: TrendDirection::Stable,
naturalness_trend: TrendDirection::Stable,
responsiveness_trend: TrendDirection::Stable,
coherence_trend: TrendDirection::Stable,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct AdvancedQualityMetrics {
pub perceptual_quality: PerceptualQuality,
pub statistical_analysis: StatisticalAnalysis,
pub performance_benchmarks: PerformanceBenchmarks,
pub degradation_indicators: DegradationIndicators,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerceptualQuality {
pub fluency: f32,
pub engagement: f32,
pub clarity: f32,
pub emotional_tone: f32,
pub conversation_flow: f32,
pub user_experience_score: f32,
pub cognitive_load: f32,
pub attention_retention: f32,
pub engagement_flow: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StatisticalAnalysis {
pub mean_scores: QualityScores,
pub std_deviation: QualityScores,
pub variance: QualityScores,
pub percentiles: QualityPercentiles,
pub correlations: QualityCorrelations,
pub chunk_count: usize,
pub total_characters: usize,
pub average_chunk_size: f32,
pub distribution_skew: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QualityScores {
pub smoothness: f32,
pub naturalness: f32,
pub responsiveness: f32,
pub coherence: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct QualityPercentiles {
pub p25: QualityScores,
pub p50: QualityScores,
pub p75: QualityScores,
pub p90: QualityScores,
pub p95: QualityScores,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QualityCorrelations {
pub smoothness_naturalness: f32,
pub responsiveness_coherence: f32,
pub latency_quality: f32,
pub consistency_smoothness: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceBenchmarks {
pub target_comparison: BenchmarkComparison,
pub historical_comparison: BenchmarkComparison,
pub peer_comparison: Option<BenchmarkComparison>,
pub performance_percentile: f32,
pub latency_percentiles: (f32, f32, f32),
pub throughput_mbps: f32,
pub resource_efficiency: f32,
pub scalability_factor: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkComparison {
pub relative_performance: f32, pub improvement_potential: f32, pub confidence_level: f32, }
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DegradationIndicators {
pub is_degrading: bool,
pub degradation_rate: f32,
pub time_to_threshold_breach: Option<Duration>,
pub critical_areas: Vec<QualityArea>,
pub recommended_actions: Vec<OptimizationRecommendation>,
pub buffer_overflows: usize,
pub quality_drops: usize,
pub recovery_events: usize,
pub timing_violations: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum QualityArea {
Smoothness,
Naturalness,
Responsiveness,
Coherence,
Consistency,
OverallPerformance,
Performance,
Reliability,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OptimizationRecommendation {
pub optimization_type: OptimizationType,
pub recommendation_type: OptimizationType,
pub priority: f32,
pub expected_improvement: f32,
pub complexity: ComplexityLevel,
pub description: String,
pub affected_areas: Vec<QualityArea>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum OptimizationType {
ChunkSizeAdjustment,
TimingOptimization,
FlowRateAdjustment,
BufferManagement,
PipelineReorganization,
ModelParameterTuning,
InfrastructureUpgrade,
Performance,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ComplexityLevel {
Low, Medium, High, }
#[derive(Debug)]
pub struct QualityAnalyzer {
metrics_window: Arc<RwLock<VecDeque<QualityMeasurement>>>,
window_size: usize,
pub thresholds: QualityThresholds,
advanced_analysis_enabled: bool,
historical_metrics: Arc<RwLock<VecDeque<StreamingQuality>>>,
performance_baselines: Arc<RwLock<Option<StreamingQuality>>>,
pub overall_quality: QualityMeasurement,
pub streaming_quality: StreamingQuality,
pub advanced_metrics: AdvancedQualityMetrics,
pub optimization_recommendations: Vec<OptimizationRecommendation>,
pub quality_thresholds: QualityThresholds,
pub assessment_metadata: std::collections::HashMap<String, String>,
}
impl QualityAnalyzer {
pub fn new() -> Self {
use crate::pipeline::conversational::types::TrendDirection;
Self {
metrics_window: Arc::new(RwLock::new(VecDeque::with_capacity(100))),
window_size: 100,
thresholds: QualityThresholds::default(),
advanced_analysis_enabled: true,
historical_metrics: Arc::new(RwLock::new(VecDeque::with_capacity(1000))),
performance_baselines: Arc::new(RwLock::new(None)),
overall_quality: QualityMeasurement {
timestamp: Instant::now(),
smoothness: 0.8,
naturalness: 0.8,
responsiveness: 0.8,
coherence: 0.8,
latency_ms: 100.0,
chunk_consistency: 0.8,
score: 0.8,
confidence: 0.7,
trend: TrendDirection::Stable,
},
streaming_quality: StreamingQuality::default(),
advanced_metrics: AdvancedQualityMetrics::default(),
optimization_recommendations: Vec::new(),
quality_thresholds: QualityThresholds::default(),
assessment_metadata: std::collections::HashMap::new(),
}
}
pub fn with_config(window_size: usize, thresholds: QualityThresholds) -> Self {
use crate::pipeline::conversational::types::TrendDirection;
Self {
metrics_window: Arc::new(RwLock::new(VecDeque::with_capacity(window_size))),
window_size,
thresholds: thresholds.clone(),
advanced_analysis_enabled: true,
historical_metrics: Arc::new(RwLock::new(VecDeque::with_capacity(1000))),
performance_baselines: Arc::new(RwLock::new(None)),
overall_quality: QualityMeasurement {
timestamp: Instant::now(),
smoothness: 0.8,
naturalness: 0.8,
responsiveness: 0.8,
coherence: 0.8,
latency_ms: 100.0,
chunk_consistency: 0.8,
score: 0.8,
confidence: 0.7,
trend: TrendDirection::Stable,
},
streaming_quality: StreamingQuality::default(),
advanced_metrics: AdvancedQualityMetrics::default(),
optimization_recommendations: Vec::new(),
quality_thresholds: thresholds,
assessment_metadata: std::collections::HashMap::new(),
}
}
pub fn metrics_window(&self) -> &Arc<RwLock<VecDeque<QualityMeasurement>>> {
&self.metrics_window
}
pub fn window_size(&self) -> usize {
self.window_size
}
pub fn thresholds(&self) -> &QualityThresholds {
&self.thresholds
}
pub async fn analyze_chunk_quality(
&self,
chunk: &StreamChunk,
delivery_time: Duration,
) -> QualityMeasurement {
let smoothness = self.calculate_smoothness(chunk, delivery_time).await;
let naturalness = self.calculate_naturalness(chunk).await;
let responsiveness = self.calculate_responsiveness(delivery_time);
let coherence = self.calculate_coherence(chunk).await;
let chunk_consistency = self.calculate_chunk_consistency(chunk).await;
let score =
smoothness * 0.25 + naturalness * 0.25 + responsiveness * 0.25 + coherence * 0.25;
let measurement = QualityMeasurement {
timestamp: Instant::now(),
smoothness,
naturalness,
responsiveness,
coherence,
latency_ms: delivery_time.as_millis() as f64,
chunk_consistency,
score,
confidence: (score * 0.8 + chunk_consistency * 0.2).min(1.0).max(0.0),
trend: TrendDirection::Stable,
};
let mut window = self.metrics_window.write().await;
window.push_back(measurement.clone());
if window.len() > self.window_size {
window.pop_front();
}
measurement
}
pub async fn calculate_overall_quality(&self) -> StreamingQuality {
let window = self.metrics_window.read().await;
if window.is_empty() {
return StreamingQuality::default();
}
let count = window.len() as f32;
let smoothness = window.iter().map(|m| m.smoothness).sum::<f32>() / count;
let naturalness = window.iter().map(|m| m.naturalness).sum::<f32>() / count;
let responsiveness = window.iter().map(|m| m.responsiveness).sum::<f32>() / count;
let coherence = window.iter().map(|m| m.coherence).sum::<f32>() / count;
let overall_quality = (smoothness + naturalness + responsiveness + coherence) / 4.0;
let quality = StreamingQuality {
smoothness,
naturalness,
responsiveness,
coherence,
overall_quality,
chunk_consistency: 0.8,
flow_smoothness: 0.8,
timing_accuracy: 0.8,
buffer_efficiency: 0.8,
};
let mut historical = self.historical_metrics.write().await;
historical.push_back(quality.clone());
if historical.len() > 1000 {
historical.pop_front();
}
quality
}
pub async fn calculate_advanced_metrics(&self) -> AdvancedQualityMetrics {
if !self.advanced_analysis_enabled {
return AdvancedQualityMetrics::default();
}
let window = self.metrics_window.read().await;
let historical = self.historical_metrics.read().await;
AdvancedQualityMetrics {
perceptual_quality: self.calculate_perceptual_quality(&window).await,
statistical_analysis: self.calculate_statistical_analysis(&window).await,
performance_benchmarks: self.calculate_performance_benchmarks(&historical).await,
degradation_indicators: self
.calculate_degradation_indicators(&window, &historical)
.await,
}
}
pub async fn meets_quality_thresholds(&self) -> bool {
let quality = self.calculate_overall_quality().await;
quality.smoothness >= self.thresholds.min_smoothness
&& quality.naturalness >= self.thresholds.min_naturalness
&& quality.responsiveness >= self.thresholds.min_responsiveness
&& quality.coherence >= self.thresholds.min_coherence
&& quality.overall_quality >= self.thresholds.min_overall_quality
}
pub async fn get_quality_trends(&self) -> QualityTrends {
let window = self.metrics_window.read().await;
if window.len() < 10 {
return QualityTrends::default();
}
let recent = &window.as_slices().0[window.len() - 5..];
let earlier = &window.as_slices().0[window.len() - 10..window.len() - 5];
let recent_avg = recent
.iter()
.map(|m| (m.smoothness + m.naturalness + m.responsiveness + m.coherence) / 4.0)
.sum::<f32>()
/ recent.len() as f32;
let earlier_avg = earlier
.iter()
.map(|m| (m.smoothness + m.naturalness + m.responsiveness + m.coherence) / 4.0)
.sum::<f32>()
/ earlier.len() as f32;
let overall_trend = self.calculate_trend_direction(recent_avg, earlier_avg);
let smoothness_trend = self.calculate_metric_trend(recent, earlier, |m| m.smoothness);
let naturalness_trend = self.calculate_metric_trend(recent, earlier, |m| m.naturalness);
let responsiveness_trend =
self.calculate_metric_trend(recent, earlier, |m| m.responsiveness);
let coherence_trend = self.calculate_metric_trend(recent, earlier, |m| m.coherence);
QualityTrends {
overall_trend,
smoothness_trend,
naturalness_trend,
responsiveness_trend,
coherence_trend,
}
}
pub async fn generate_optimization_recommendations(&self) -> Vec<OptimizationRecommendation> {
let quality = self.calculate_overall_quality().await;
let trends = self.get_quality_trends().await;
let mut recommendations = Vec::new();
if quality.smoothness < self.thresholds.min_smoothness {
recommendations.push(OptimizationRecommendation {
optimization_type: OptimizationType::ChunkSizeAdjustment,
recommendation_type: OptimizationType::ChunkSizeAdjustment,
priority: 0.8,
expected_improvement: 0.15,
complexity: ComplexityLevel::Low,
description: "Adjust chunk sizes to improve smoothness - consider smaller, more consistent chunks".to_string(),
affected_areas: vec![QualityArea::Smoothness],
});
}
if quality.responsiveness < self.thresholds.min_responsiveness {
recommendations.push(OptimizationRecommendation {
optimization_type: OptimizationType::TimingOptimization,
recommendation_type: OptimizationType::TimingOptimization,
priority: 0.9,
expected_improvement: 0.2,
complexity: ComplexityLevel::Medium,
description:
"Optimize timing algorithms to reduce latency and improve responsiveness"
.to_string(),
affected_areas: vec![QualityArea::Responsiveness],
});
}
if quality.naturalness < self.thresholds.min_naturalness {
recommendations.push(OptimizationRecommendation {
optimization_type: OptimizationType::ModelParameterTuning,
recommendation_type: OptimizationType::ModelParameterTuning,
priority: 0.7,
expected_improvement: 0.1,
complexity: ComplexityLevel::High,
description:
"Fine-tune model parameters to improve naturalness of generated content"
.to_string(),
affected_areas: vec![QualityArea::Naturalness],
});
}
if quality.coherence < self.thresholds.min_coherence {
recommendations.push(OptimizationRecommendation {
optimization_type: OptimizationType::PipelineReorganization,
recommendation_type: OptimizationType::PipelineReorganization,
priority: 0.6,
expected_improvement: 0.12,
complexity: ComplexityLevel::High,
description:
"Reorganize processing pipeline to maintain better coherence across chunks"
.to_string(),
affected_areas: vec![QualityArea::Coherence],
});
}
if trends.overall_trend == TrendDirection::Declining {
recommendations.push(OptimizationRecommendation {
optimization_type: OptimizationType::InfrastructureUpgrade,
recommendation_type: OptimizationType::InfrastructureUpgrade,
priority: 0.5,
expected_improvement: 0.25,
complexity: ComplexityLevel::High,
description:
"Consider infrastructure upgrades to address declining performance trends"
.to_string(),
affected_areas: vec![QualityArea::OverallPerformance],
});
}
recommendations.sort_by(|a, b| {
b.priority.partial_cmp(&a.priority).unwrap_or(std::cmp::Ordering::Equal)
});
recommendations
}
pub async fn set_performance_baseline(&self, baseline: StreamingQuality) {
let mut baselines = self.performance_baselines.write().await;
*baselines = Some(baseline);
}
pub async fn reset(&self) {
let mut window = self.metrics_window.write().await;
window.clear();
let mut historical = self.historical_metrics.write().await;
historical.clear();
}
async fn calculate_smoothness(&self, chunk: &StreamChunk, delivery_time: Duration) -> f32 {
let mut smoothness = 0.8;
let window = self.metrics_window.read().await;
if window.len() > 1 {
let timing_variance = self.calculate_timing_variance(&window);
smoothness *= (1.0 - timing_variance.min(0.5)) * 2.0;
}
let length_factor = self.calculate_length_consistency_factor(chunk);
smoothness *= length_factor;
let target_time_ms = 100.0;
let actual_time_ms = delivery_time.as_millis() as f64;
let timing_factor = if actual_time_ms <= target_time_ms * 1.5 {
1.0
} else {
(target_time_ms / actual_time_ms).max(0.3) as f32
};
smoothness *= timing_factor;
smoothness.max(0.0).min(1.0)
}
async fn calculate_naturalness(&self, chunk: &StreamChunk) -> f32 {
let content = &chunk.content;
let mut naturalness = 0.8;
naturalness *= self.analyze_linguistic_patterns(content);
naturalness *= self.analyze_content_flow(chunk).await;
naturalness *= self.analyze_punctuation_structure(content);
naturalness.max(0.0).min(1.0)
}
fn calculate_responsiveness(&self, delivery_time: Duration) -> f32 {
let target_time_ms = 100.0;
let actual_time_ms = delivery_time.as_millis() as f64;
if actual_time_ms <= target_time_ms {
1.0
} else if actual_time_ms <= target_time_ms * 2.0 {
(target_time_ms / actual_time_ms).max(0.5) as f32
} else {
(target_time_ms / actual_time_ms).max(0.1) as f32
}
}
async fn calculate_coherence(&self, chunk: &StreamChunk) -> f32 {
let mut coherence = 0.8;
coherence *= self.analyze_content_coherence(chunk);
coherence *= self.analyze_context_consistency(chunk).await;
coherence *= self.analyze_semantic_coherence(chunk);
coherence.max(0.0).min(1.0)
}
async fn calculate_chunk_consistency(&self, chunk: &StreamChunk) -> f32 {
let window = self.metrics_window.read().await;
if window.len() < 2 {
return 1.0;
}
let recent_window: Vec<_> = window.iter().rev().take(10).collect();
let sizes: Vec<usize> = recent_window.iter().map(|_| chunk.content.len()).collect();
if sizes.is_empty() {
return 1.0;
}
let mean_size = sizes.iter().sum::<usize>() as f32 / sizes.len() as f32;
let variance = sizes
.iter()
.map(|&size| {
let diff = size as f32 - mean_size;
diff * diff
})
.sum::<f32>()
/ sizes.len() as f32;
let consistency = 1.0 / (1.0 + variance / (mean_size * mean_size + 1.0));
consistency.max(0.0).min(1.0)
}
async fn calculate_perceptual_quality(
&self,
window: &VecDeque<QualityMeasurement>,
) -> PerceptualQuality {
if window.is_empty() {
return PerceptualQuality::default();
}
let count = window.len() as f32;
PerceptualQuality {
fluency: window.iter().map(|m| m.smoothness * m.coherence).sum::<f32>() / count,
engagement: window.iter().map(|m| m.naturalness * 0.9).sum::<f32>() / count,
clarity: window.iter().map(|m| m.coherence * 0.95).sum::<f32>() / count,
emotional_tone: window.iter().map(|m| m.naturalness * 0.8).sum::<f32>() / count,
conversation_flow: window
.iter()
.map(|m| (m.smoothness + m.responsiveness) / 2.0)
.sum::<f32>()
/ count,
user_experience_score: window
.iter()
.map(|m| (m.smoothness + m.naturalness + m.responsiveness) / 3.0)
.sum::<f32>()
/ count,
cognitive_load: window.iter().map(|m| (1.0 - m.coherence) * 0.5).sum::<f32>() / count,
attention_retention: window.iter().map(|m| m.responsiveness * m.coherence).sum::<f32>()
/ count,
engagement_flow: window
.iter()
.map(|m| (m.naturalness + m.responsiveness) / 2.0)
.sum::<f32>()
/ count,
}
}
async fn calculate_statistical_analysis(
&self,
window: &VecDeque<QualityMeasurement>,
) -> StatisticalAnalysis {
if window.is_empty() {
return StatisticalAnalysis::default();
}
let smoothness_values: Vec<f32> = window.iter().map(|m| m.smoothness).collect();
let naturalness_values: Vec<f32> = window.iter().map(|m| m.naturalness).collect();
let responsiveness_values: Vec<f32> = window.iter().map(|m| m.responsiveness).collect();
let coherence_values: Vec<f32> = window.iter().map(|m| m.coherence).collect();
let total_chars: usize = window.iter().map(|m| (m.latency_ms * 10.0) as usize).sum();
let chunk_count = window.len();
let avg_size = if chunk_count > 0 { total_chars as f32 / chunk_count as f32 } else { 0.0 };
StatisticalAnalysis {
mean_scores: QualityScores {
smoothness: self.calculate_mean(&smoothness_values),
naturalness: self.calculate_mean(&naturalness_values),
responsiveness: self.calculate_mean(&responsiveness_values),
coherence: self.calculate_mean(&coherence_values),
},
std_deviation: QualityScores {
smoothness: self.calculate_std_dev(&smoothness_values),
naturalness: self.calculate_std_dev(&naturalness_values),
responsiveness: self.calculate_std_dev(&responsiveness_values),
coherence: self.calculate_std_dev(&coherence_values),
},
variance: QualityScores {
smoothness: self.calculate_variance(&smoothness_values),
naturalness: self.calculate_variance(&naturalness_values),
responsiveness: self.calculate_variance(&responsiveness_values),
coherence: self.calculate_variance(&coherence_values),
},
percentiles: self.calculate_percentiles(
&smoothness_values,
&naturalness_values,
&responsiveness_values,
&coherence_values,
),
correlations: self.calculate_correlations(window),
chunk_count,
total_characters: total_chars,
average_chunk_size: avg_size,
distribution_skew: self.calculate_std_dev(&smoothness_values)
/ (self.calculate_mean(&smoothness_values) + 0.001),
}
}
async fn calculate_performance_benchmarks(
&self,
historical: &VecDeque<StreamingQuality>,
) -> PerformanceBenchmarks {
let baselines = self.performance_baselines.read().await;
let window = self.metrics_window.read().await;
if let Some(baseline) = baselines.as_ref() {
if let Some(current) = historical.back() {
let target_comparison = self.compare_to_baseline(current, baseline);
let historical_comparison = self.compare_to_historical(current, historical);
let latency_values: Vec<f32> = window.iter().map(|m| m.latency_ms as f32).collect();
let p50 = self.calculate_percentile_value(&latency_values, 0.50);
let p95 = self.calculate_percentile_value(&latency_values, 0.95);
let p99 = self.calculate_percentile_value(&latency_values, 0.99);
let time_window_secs = if window.len() > 1 {
let back_timestamp = window
.back()
.expect("window.back() guaranteed by len() > 1 check")
.timestamp;
let front_timestamp = window
.front()
.expect("window.front() guaranteed by len() > 1 check")
.timestamp;
let elapsed = back_timestamp.duration_since(front_timestamp);
elapsed.as_secs_f32().max(1.0)
} else {
1.0
};
let measurements_per_sec = window.len() as f32 / time_window_secs;
let throughput_mbps = (measurements_per_sec * 100.0) / 1_000_000.0;
let avg_latency = latency_values.iter().sum::<f32>() / latency_values.len() as f32;
let avg_quality = window.iter().map(|m| m.score).sum::<f32>() / window.len() as f32;
let resource_efficiency = if avg_latency > 0.0 {
(avg_quality * 100.0 / avg_latency).min(1.0).max(0.0)
} else {
avg_quality
};
let quality_variance = {
let qualities: Vec<f32> = window.iter().map(|m| m.score).collect();
let mean = avg_quality;
let variance = qualities.iter().map(|&q| (q - mean).powi(2)).sum::<f32>()
/ qualities.len() as f32;
variance
};
let scalability_factor = (1.0 - quality_variance).max(0.0).min(1.0);
PerformanceBenchmarks {
target_comparison,
historical_comparison,
peer_comparison: None, performance_percentile: self
.calculate_performance_percentile(current, historical),
latency_percentiles: (p50, p95, p99),
throughput_mbps,
resource_efficiency,
scalability_factor,
}
} else {
PerformanceBenchmarks::default()
}
} else {
PerformanceBenchmarks::default()
}
}
async fn calculate_degradation_indicators(
&self,
window: &VecDeque<QualityMeasurement>,
historical: &VecDeque<StreamingQuality>,
) -> DegradationIndicators {
if historical.len() < 10 {
return DegradationIndicators::default();
}
let recent_quality =
historical.iter().rev().take(5).map(|q| q.overall_quality).sum::<f32>() / 5.0;
let earlier_quality =
historical.iter().rev().skip(5).take(5).map(|q| q.overall_quality).sum::<f32>() / 5.0;
let is_degrading = recent_quality < earlier_quality - 0.05;
let degradation_rate = if is_degrading {
(earlier_quality - recent_quality) / 5.0 } else {
0.0
};
let critical_areas = self.identify_critical_areas(window).await;
let recommended_actions = if is_degrading {
self.generate_optimization_recommendations().await
} else {
Vec::new()
};
let buffer_overflows = if window.len() >= self.window_size {
if window.len() as f32 / self.window_size as f32 > 0.95 {
1
} else {
0
}
} else {
0
};
let window_vec: Vec<&QualityMeasurement> = window.iter().collect();
let recovery_events = window_vec
.windows(2)
.filter(|pair| {
pair[0].score < 0.7 && pair[1].score >= 0.7
})
.count();
DegradationIndicators {
is_degrading,
degradation_rate,
time_to_threshold_breach: self.estimate_time_to_threshold_breach(degradation_rate),
critical_areas,
recommended_actions,
buffer_overflows,
quality_drops: window.iter().filter(|m| m.score < 0.7).count(),
recovery_events,
timing_violations: window.iter().filter(|m| m.latency_ms > 200.0).count(),
}
}
fn calculate_trend_direction(&self, recent: f32, earlier: f32) -> TrendDirection {
let threshold = 0.05;
if recent > earlier + threshold {
TrendDirection::Improving
} else if recent < earlier - threshold {
TrendDirection::Declining
} else {
TrendDirection::Stable
}
}
fn calculate_metric_trend<F>(
&self,
recent: &[QualityMeasurement],
earlier: &[QualityMeasurement],
extractor: F,
) -> TrendDirection
where
F: Fn(&QualityMeasurement) -> f32,
{
let recent_avg = recent.iter().map(&extractor).sum::<f32>() / recent.len() as f32;
let earlier_avg = earlier.iter().map(&extractor).sum::<f32>() / earlier.len() as f32;
self.calculate_trend_direction(recent_avg, earlier_avg)
}
fn calculate_timing_variance(&self, window: &VecDeque<QualityMeasurement>) -> f32 {
if window.len() < 2 {
return 0.0;
}
let latencies: Vec<f64> = window.iter().map(|m| m.latency_ms).collect();
let mean = latencies.iter().sum::<f64>() / latencies.len() as f64;
let variance =
latencies.iter().map(|&l| (l - mean).powi(2)).sum::<f64>() / latencies.len() as f64;
(variance.sqrt() / mean.max(1.0)) as f32
}
fn calculate_length_consistency_factor(&self, chunk: &StreamChunk) -> f32 {
let length = chunk.content.len();
if length < 5 {
0.6
} else if length > 200 {
0.8
} else {
1.0
}
}
fn analyze_linguistic_patterns(&self, content: &str) -> f32 {
let mut score: f32 = 1.0;
if content.contains("um") || content.contains("uh") || content.contains("er") {
score *= 1.1; }
if let Some(first_char) = content.chars().next() {
if first_char.is_lowercase() && content.len() > 1 {
score *= 0.9;
}
}
let words: Vec<&str> = content.split_whitespace().collect();
if words.len() > 1 {
let mut repeated = 0;
for i in 1..words.len() {
if words[i] == words[i - 1] {
repeated += 1;
}
}
if repeated > words.len() / 3 {
score *= 0.7;
}
}
score.max(0.0_f32).min(1.0_f32)
}
async fn analyze_content_flow(&self, _chunk: &StreamChunk) -> f32 {
0.9
}
fn analyze_punctuation_structure(&self, content: &str) -> f32 {
let mut score: f32 = 1.0;
if content.trim().ends_with(['.', '!', '?']) {
score *= 1.1;
}
let comma_count = content.matches(',').count();
let word_count = content.split_whitespace().count();
if word_count > 10 && comma_count == 0 {
score *= 0.9; }
score.max(0.0_f32).min(1.0_f32)
}
fn analyze_content_coherence(&self, chunk: &StreamChunk) -> f32 {
let content = &chunk.content.trim();
if content.is_empty() {
return 0.0;
}
let mut coherence = 0.9;
if matches!(chunk.chunk_type, ChunkType::Sentence) && !content.ends_with(['.', '!', '?']) {
coherence *= 0.8;
}
if chunk.content.starts_with(' ') || chunk.content.ends_with(' ') {
coherence *= 0.95; }
coherence
}
async fn analyze_context_consistency(&self, _chunk: &StreamChunk) -> f32 {
0.9
}
fn analyze_semantic_coherence(&self, _chunk: &StreamChunk) -> f32 {
0.9
}
fn calculate_mean(&self, values: &[f32]) -> f32 {
if values.is_empty() {
0.0
} else {
values.iter().sum::<f32>() / values.len() as f32
}
}
fn calculate_std_dev(&self, values: &[f32]) -> f32 {
if values.len() < 2 {
return 0.0;
}
let mean = self.calculate_mean(values);
let variance =
values.iter().map(|&x| (x - mean).powi(2)).sum::<f32>() / values.len() as f32;
variance.sqrt()
}
fn calculate_variance(&self, values: &[f32]) -> f32 {
self.calculate_std_dev(values).powi(2)
}
fn calculate_percentile_value(&self, values: &[f32], percentile: f32) -> f32 {
if values.is_empty() {
return 0.0;
}
let mut sorted = values.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let index = ((percentile * (sorted.len() - 1) as f32) as usize).min(sorted.len() - 1);
sorted[index]
}
fn calculate_percentiles(
&self,
smoothness: &[f32],
naturalness: &[f32],
responsiveness: &[f32],
coherence: &[f32],
) -> QualityPercentiles {
QualityPercentiles {
p25: QualityScores {
smoothness: self.percentile(smoothness, 0.25),
naturalness: self.percentile(naturalness, 0.25),
responsiveness: self.percentile(responsiveness, 0.25),
coherence: self.percentile(coherence, 0.25),
},
p50: QualityScores {
smoothness: self.percentile(smoothness, 0.50),
naturalness: self.percentile(naturalness, 0.50),
responsiveness: self.percentile(responsiveness, 0.50),
coherence: self.percentile(coherence, 0.50),
},
p75: QualityScores {
smoothness: self.percentile(smoothness, 0.75),
naturalness: self.percentile(naturalness, 0.75),
responsiveness: self.percentile(responsiveness, 0.75),
coherence: self.percentile(coherence, 0.75),
},
p90: QualityScores {
smoothness: self.percentile(smoothness, 0.90),
naturalness: self.percentile(naturalness, 0.90),
responsiveness: self.percentile(responsiveness, 0.90),
coherence: self.percentile(coherence, 0.90),
},
p95: QualityScores {
smoothness: self.percentile(smoothness, 0.95),
naturalness: self.percentile(naturalness, 0.95),
responsiveness: self.percentile(responsiveness, 0.95),
coherence: self.percentile(coherence, 0.95),
},
}
}
fn percentile(&self, values: &[f32], p: f32) -> f32 {
if values.is_empty() {
return 0.0;
}
let mut sorted = values.to_vec();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let index = ((sorted.len() - 1) as f32 * p) as usize;
sorted[index]
}
fn calculate_correlations(&self, window: &VecDeque<QualityMeasurement>) -> QualityCorrelations {
if window.len() < 2 {
return QualityCorrelations::default();
}
let smoothness: Vec<f32> = window.iter().map(|m| m.smoothness).collect();
let naturalness: Vec<f32> = window.iter().map(|m| m.naturalness).collect();
let responsiveness: Vec<f32> = window.iter().map(|m| m.responsiveness).collect();
let coherence: Vec<f32> = window.iter().map(|m| m.coherence).collect();
let latency: Vec<f32> = window.iter().map(|m| m.latency_ms as f32).collect();
let consistency: Vec<f32> = window.iter().map(|m| m.chunk_consistency).collect();
QualityCorrelations {
smoothness_naturalness: self.correlation(&smoothness, &naturalness),
responsiveness_coherence: self.correlation(&responsiveness, &coherence),
latency_quality: -self.correlation(&latency, &smoothness), consistency_smoothness: self.correlation(&consistency, &smoothness),
}
}
fn correlation(&self, x: &[f32], y: &[f32]) -> f32 {
if x.len() != y.len() || x.len() < 2 {
return 0.0;
}
let mean_x = self.calculate_mean(x);
let mean_y = self.calculate_mean(y);
let numerator: f32 =
x.iter().zip(y.iter()).map(|(&xi, &yi)| (xi - mean_x) * (yi - mean_y)).sum();
let sum_sq_x: f32 = x.iter().map(|&xi| (xi - mean_x).powi(2)).sum();
let sum_sq_y: f32 = y.iter().map(|&yi| (yi - mean_y).powi(2)).sum();
let denominator = (sum_sq_x * sum_sq_y).sqrt();
if denominator == 0.0 {
0.0
} else {
numerator / denominator
}
}
fn compare_to_baseline(
&self,
current: &StreamingQuality,
baseline: &StreamingQuality,
) -> BenchmarkComparison {
let relative_performance = (current.overall_quality - baseline.overall_quality)
/ baseline.overall_quality.max(0.1);
BenchmarkComparison {
relative_performance: relative_performance.max(-1.0).min(1.0),
improvement_potential: (1.0 - current.overall_quality).max(0.0),
confidence_level: 0.8, }
}
fn compare_to_historical(
&self,
current: &StreamingQuality,
historical: &VecDeque<StreamingQuality>,
) -> BenchmarkComparison {
if historical.len() < 5 {
return BenchmarkComparison::default();
}
let historical_avg =
historical.iter().map(|q| q.overall_quality).sum::<f32>() / historical.len() as f32;
let relative_performance =
(current.overall_quality - historical_avg) / historical_avg.max(0.1);
BenchmarkComparison {
relative_performance: relative_performance.max(-1.0).min(1.0),
improvement_potential: (1.0 - current.overall_quality).max(0.0),
confidence_level: 0.9,
}
}
fn calculate_performance_percentile(
&self,
current: &StreamingQuality,
historical: &VecDeque<StreamingQuality>,
) -> f32 {
if historical.is_empty() {
return 0.5;
}
let better_count = historical
.iter()
.filter(|q| q.overall_quality < current.overall_quality)
.count();
better_count as f32 / historical.len() as f32
}
async fn identify_critical_areas(
&self,
window: &VecDeque<QualityMeasurement>,
) -> Vec<QualityArea> {
let mut critical_areas = Vec::new();
if let Some(latest) = window.back() {
if latest.smoothness < self.thresholds.min_smoothness {
critical_areas.push(QualityArea::Smoothness);
}
if latest.naturalness < self.thresholds.min_naturalness {
critical_areas.push(QualityArea::Naturalness);
}
if latest.responsiveness < self.thresholds.min_responsiveness {
critical_areas.push(QualityArea::Responsiveness);
}
if latest.coherence < self.thresholds.min_coherence {
critical_areas.push(QualityArea::Coherence);
}
if latest.chunk_consistency < 0.7 {
critical_areas.push(QualityArea::Consistency);
}
}
critical_areas
}
fn estimate_time_to_threshold_breach(&self, degradation_rate: f32) -> Option<Duration> {
if degradation_rate <= 0.0 {
return None;
}
let time_periods = (self.thresholds.min_overall_quality / degradation_rate) as u64;
Some(Duration::from_secs(time_periods * 60)) }
}
impl Default for PerceptualQuality {
fn default() -> Self {
Self {
fluency: 0.8,
engagement: 0.8,
clarity: 0.8,
emotional_tone: 0.8,
conversation_flow: 0.8,
user_experience_score: 0.8,
cognitive_load: 0.3,
attention_retention: 0.8,
engagement_flow: 0.8,
}
}
}
impl Default for StatisticalAnalysis {
fn default() -> Self {
Self {
mean_scores: QualityScores::default(),
std_deviation: QualityScores::default(),
variance: QualityScores::default(),
percentiles: QualityPercentiles::default(),
correlations: QualityCorrelations::default(),
chunk_count: 0,
total_characters: 0,
average_chunk_size: 0.0,
distribution_skew: 0.0,
}
}
}
impl Default for QualityScores {
fn default() -> Self {
Self {
smoothness: 0.8,
naturalness: 0.8,
responsiveness: 0.8,
coherence: 0.8,
}
}
}
impl Default for QualityCorrelations {
fn default() -> Self {
Self {
smoothness_naturalness: 0.0,
responsiveness_coherence: 0.0,
latency_quality: 0.0,
consistency_smoothness: 0.0,
}
}
}
impl Default for PerformanceBenchmarks {
fn default() -> Self {
Self {
target_comparison: BenchmarkComparison::default(),
historical_comparison: BenchmarkComparison::default(),
peer_comparison: None,
performance_percentile: 0.5,
latency_percentiles: (100.0, 200.0, 300.0),
throughput_mbps: 1.0,
resource_efficiency: 0.8,
scalability_factor: 1.0,
}
}
}
impl Default for BenchmarkComparison {
fn default() -> Self {
Self {
relative_performance: 0.0,
improvement_potential: 0.2,
confidence_level: 0.5,
}
}
}
impl Default for DegradationIndicators {
fn default() -> Self {
Self {
is_degrading: false,
degradation_rate: 0.0,
time_to_threshold_breach: None,
critical_areas: Vec::new(),
recommended_actions: Vec::new(),
buffer_overflows: 0,
quality_drops: 0,
recovery_events: 0,
timing_violations: 0,
}
}
}
impl Default for QualityAnalyzer {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
pub trait QualityAnalysis {
async fn analyze_quality(
&self,
chunk: &StreamChunk,
delivery_time: Duration,
) -> QualityMeasurement;
async fn get_overall_quality(&self) -> StreamingQuality;
async fn meets_thresholds(&self) -> bool;
async fn get_trends(&self) -> QualityTrends;
async fn get_recommendations(&self) -> Vec<OptimizationRecommendation>;
}
#[async_trait]
impl QualityAnalysis for QualityAnalyzer {
async fn analyze_quality(
&self,
chunk: &StreamChunk,
delivery_time: Duration,
) -> QualityMeasurement {
self.analyze_chunk_quality(chunk, delivery_time).await
}
async fn get_overall_quality(&self) -> StreamingQuality {
self.calculate_overall_quality().await
}
async fn meets_thresholds(&self) -> bool {
self.meets_quality_thresholds().await
}
async fn get_trends(&self) -> QualityTrends {
self.get_quality_trends().await
}
async fn get_recommendations(&self) -> Vec<OptimizationRecommendation> {
self.generate_optimization_recommendations().await
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
fn make_chunk(content: &str, complexity: f32) -> StreamChunk {
StreamChunk {
content: content.to_string(),
index: 0,
chunk_type: ChunkType::Content,
timing: ChunkTiming::default(),
metadata: ChunkMetadata::with_complexity(complexity),
}
}
#[test]
fn test_streaming_quality_default_values_in_range() {
let quality = StreamingQuality::default();
assert!(
quality.smoothness >= 0.0 && quality.smoothness <= 1.0,
"smoothness must be in [0.0, 1.0]"
);
assert!(
quality.naturalness >= 0.0 && quality.naturalness <= 1.0,
"naturalness must be in [0.0, 1.0]"
);
assert!(
quality.responsiveness >= 0.0 && quality.responsiveness <= 1.0,
"responsiveness must be in [0.0, 1.0]"
);
assert!(
quality.coherence >= 0.0 && quality.coherence <= 1.0,
"coherence must be in [0.0, 1.0]"
);
assert!(
quality.overall_quality >= 0.0 && quality.overall_quality <= 1.0,
"overall_quality must be in [0.0, 1.0]"
);
}
#[test]
fn test_streaming_quality_default_chunk_consistency_in_range() {
let quality = StreamingQuality::default();
assert!(quality.chunk_consistency >= 0.0 && quality.chunk_consistency <= 1.0);
assert!(quality.flow_smoothness >= 0.0 && quality.flow_smoothness <= 1.0);
assert!(quality.timing_accuracy >= 0.0 && quality.timing_accuracy <= 1.0);
assert!(quality.buffer_efficiency >= 0.0 && quality.buffer_efficiency <= 1.0);
}
#[test]
fn test_quality_thresholds_default_min_overall_quality_in_range() {
let thresholds = QualityThresholds::default();
assert!(
thresholds.min_overall_quality >= 0.0 && thresholds.min_overall_quality <= 1.0,
"min_overall_quality must be in [0.0, 1.0]"
);
}
#[test]
fn test_quality_thresholds_minimum_acceptable_less_than_target() {
let thresholds = QualityThresholds::default();
assert!(
thresholds.minimum_acceptable <= thresholds.target_quality,
"minimum_acceptable ({}) should be <= target_quality ({})",
thresholds.minimum_acceptable,
thresholds.target_quality
);
}
#[test]
fn test_quality_thresholds_target_less_than_excellent() {
let thresholds = QualityThresholds::default();
assert!(
thresholds.target_quality <= thresholds.excellent_threshold,
"target_quality ({}) should be <= excellent_threshold ({})",
thresholds.target_quality,
thresholds.excellent_threshold
);
}
#[test]
fn test_quality_thresholds_max_latency_positive() {
let thresholds = QualityThresholds::default();
assert!(
thresholds.max_latency_ms > 0.0,
"max_latency_ms should be positive, got {}",
thresholds.max_latency_ms
);
}
#[test]
fn test_quality_analyzer_new() {
let analyzer = QualityAnalyzer::new();
assert_eq!(
analyzer.window_size(),
100,
"default window size should be 100"
);
}
#[test]
fn test_quality_analyzer_default() {
let analyzer = QualityAnalyzer::default();
assert_eq!(analyzer.window_size(), 100);
}
#[tokio::test]
async fn test_quality_analyzer_analyze_chunk_quality_returns_measurement() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Hello world, this is a test sentence.", 0.5);
let delivery_time = Duration::from_millis(80);
let measurement = analyzer.analyze_chunk_quality(&chunk, delivery_time).await;
assert!(
measurement.smoothness >= 0.0 && measurement.smoothness <= 1.0,
"smoothness must be in [0.0, 1.0]"
);
assert!(
measurement.naturalness >= 0.0 && measurement.naturalness <= 1.0,
"naturalness must be in [0.0, 1.0]"
);
assert!(
measurement.responsiveness >= 0.0 && measurement.responsiveness <= 1.0,
"responsiveness must be in [0.0, 1.0]"
);
assert!(
measurement.coherence >= 0.0 && measurement.coherence <= 1.0,
"coherence must be in [0.0, 1.0]"
);
}
#[tokio::test]
async fn test_quality_analyzer_score_normalized() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Test content for score normalization check.", 0.4);
let delivery_time = Duration::from_millis(50);
let measurement = analyzer.analyze_chunk_quality(&chunk, delivery_time).await;
assert!(
measurement.score >= 0.0 && measurement.score <= 1.0,
"overall score must be normalized in [0.0, 1.0], got {}",
measurement.score
);
}
#[tokio::test]
async fn test_quality_analyzer_high_latency_lowers_responsiveness() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Test content for latency measurement.", 0.5);
let fast_delivery = Duration::from_millis(50);
let slow_delivery = Duration::from_millis(500);
let fast_measurement = analyzer.analyze_chunk_quality(&chunk, fast_delivery).await;
let slow_measurement = analyzer.analyze_chunk_quality(&chunk, slow_delivery).await;
assert!(
fast_measurement.responsiveness >= slow_measurement.responsiveness,
"faster delivery should produce >= responsiveness score: {} >= {}",
fast_measurement.responsiveness,
slow_measurement.responsiveness
);
}
#[tokio::test]
async fn test_quality_analyzer_calculate_overall_quality_bounded() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Some streaming content for quality check.", 0.5);
for _ in 0..5 {
analyzer.analyze_chunk_quality(&chunk, Duration::from_millis(100)).await;
}
let quality = analyzer.calculate_overall_quality().await;
assert!(
quality.overall_quality >= 0.0 && quality.overall_quality <= 1.0,
"overall_quality must be in [0.0, 1.0]"
);
assert!(quality.smoothness >= 0.0 && quality.smoothness <= 1.0);
}
#[tokio::test]
async fn test_quality_analyzer_meets_quality_thresholds_initially() {
let analyzer = QualityAnalyzer::new();
let meets = analyzer.meets_quality_thresholds().await;
let _ = meets;
}
#[tokio::test]
async fn test_quality_analyzer_quality_trends_stable_initially() {
let analyzer = QualityAnalyzer::new();
let trends = analyzer.get_quality_trends().await;
assert_eq!(
std::mem::discriminant(&trends.overall_trend),
std::mem::discriminant(&TrendDirection::Stable),
"initial trend should be Stable"
);
}
#[tokio::test]
async fn test_quality_analyzer_get_optimization_recommendations() {
let analyzer = QualityAnalyzer::new();
let recommendations = analyzer.generate_optimization_recommendations().await;
let _ = recommendations.len();
}
#[tokio::test]
async fn test_quality_analyzer_accumulates_window() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Accumulate measurements in the quality window", 0.5);
for i in 0..10 {
let delivery_time = Duration::from_millis(50 + i * 10);
analyzer.analyze_chunk_quality(&chunk, delivery_time).await;
}
let window = analyzer.metrics_window().read().await;
assert_eq!(
window.len(),
10,
"window should contain exactly 10 measurements"
);
}
#[tokio::test]
async fn test_quality_analyzer_window_respects_capacity() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Testing window capacity limit", 0.5);
for i in 0..150u64 {
let delivery_time = Duration::from_millis(50 + i % 100);
analyzer.analyze_chunk_quality(&chunk, delivery_time).await;
}
let window = analyzer.metrics_window().read().await;
assert!(
window.len() <= analyzer.window_size(),
"window should not exceed capacity: {} <= {}",
window.len(),
analyzer.window_size()
);
}
#[test]
fn test_perceptual_quality_default_values_in_range() {
let pq = PerceptualQuality::default();
assert!(pq.fluency >= 0.0 && pq.fluency <= 1.0);
assert!(pq.engagement >= 0.0 && pq.engagement <= 1.0);
assert!(pq.clarity >= 0.0 && pq.clarity <= 1.0);
assert!(pq.user_experience_score >= 0.0 && pq.user_experience_score <= 1.0);
assert!(pq.cognitive_load >= 0.0 && pq.cognitive_load <= 1.0);
}
#[test]
fn test_statistical_analysis_default_chunk_count_zero() {
let stats = StatisticalAnalysis::default();
assert_eq!(stats.chunk_count, 0);
assert_eq!(stats.total_characters, 0);
}
#[test]
fn test_degradation_indicators_default_not_degrading() {
let indicators = DegradationIndicators::default();
assert!(!indicators.is_degrading, "default should not be degrading");
assert_eq!(indicators.degradation_rate, 0.0);
assert!(indicators.time_to_threshold_breach.is_none());
}
#[tokio::test]
async fn test_quality_analysis_trait_analyze_quality() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Trait delegation test content", 0.5);
let measurement = analyzer.analyze_quality(&chunk, Duration::from_millis(75)).await;
assert!(measurement.score >= 0.0 && measurement.score <= 1.0);
}
#[tokio::test]
async fn test_quality_analysis_trait_get_overall_quality() {
let analyzer = QualityAnalyzer::new();
let quality = analyzer.get_overall_quality().await;
assert!(quality.overall_quality >= 0.0 && quality.overall_quality <= 1.0);
}
#[tokio::test]
async fn test_quality_analysis_trait_meets_thresholds() {
let analyzer = QualityAnalyzer::new();
let result = analyzer.meets_thresholds().await;
let _ = result; }
#[tokio::test]
async fn test_quality_analysis_trait_get_trends() {
let analyzer = QualityAnalyzer::new();
let trends = analyzer.get_trends().await;
let _ = trends; }
#[tokio::test]
async fn test_quality_analysis_trait_get_recommendations() {
let analyzer = QualityAnalyzer::new();
let recs = analyzer.get_recommendations().await;
let _ = recs.len();
}
#[tokio::test]
async fn test_quality_score_very_slow_delivery_still_bounded() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Edge case: extremely slow delivery", 0.9);
let measurement = analyzer.analyze_chunk_quality(&chunk, Duration::from_secs(10)).await;
assert!(
measurement.responsiveness >= 0.0 && measurement.responsiveness <= 1.0,
"responsiveness must stay in [0.0, 1.0] even with very slow delivery"
);
assert!(
measurement.score >= 0.0 && measurement.score <= 1.0,
"overall score must stay in [0.0, 1.0] even with very slow delivery"
);
}
#[tokio::test]
async fn test_quality_score_instant_delivery_bounded() {
let analyzer = QualityAnalyzer::new();
let chunk = make_chunk("Edge case: instant delivery", 0.1);
let measurement = analyzer.analyze_chunk_quality(&chunk, Duration::from_millis(1)).await;
assert!(
measurement.responsiveness >= 0.0 && measurement.responsiveness <= 1.0,
"responsiveness must be in [0.0, 1.0] for instant delivery"
);
}
}