Skip to main content

voirs_evaluation/
lib.rs

1//! # `VoiRS` Evaluation
2//!
3//! Comprehensive quality evaluation and assessment framework for the `VoiRS` ecosystem.
4//! This crate provides tools for evaluating speech synthesis quality, pronunciation accuracy,
5//! and comparative analysis between different models or systems.
6//!
7//! ## Features
8//!
9//! - **Quality Evaluation**: Objective and subjective quality metrics
10//! - **Pronunciation Assessment**: Phoneme-level accuracy scoring
11//! - **Comparative Analysis**: Side-by-side evaluation of different systems
12//! - **Perceptual Metrics**: Human-perception-aligned quality measures
13//! - **Automated Scoring**: ML-based quality prediction
14//!
15//! ## Quick Start
16//!
17//! ```rust
18//! use voirs_evaluation::quality::QualityEvaluator;
19//! use voirs_evaluation::traits::QualityEvaluator as QualityEvaluatorTrait;
20//! use voirs_sdk::AudioBuffer;
21//!
22//! # #[tokio::main]
23//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
24//!     // Create quality evaluator
25//!     let evaluator = QualityEvaluator::new().await?;
26//!     
27//!     // Create test audio buffers
28//!     let generated = AudioBuffer::new(vec![0.1; 16000], 16000, 1);
29//!     let reference = AudioBuffer::new(vec![0.12; 16000], 16000, 1);
30//!     
31//!     // Evaluate quality
32//!     let quality = evaluator.evaluate_quality(&generated, Some(&reference), None).await?;
33//!     println!("Quality score: {:.2}", quality.overall_score);
34//!     
35//! #   Ok(())
36//! # }
37//! ```
38
39#![allow(missing_docs)]
40#![warn(clippy::all)]
41#![allow(clippy::module_name_repetitions)]
42#![allow(clippy::unused_async)] // Many async functions are part of API design
43#![allow(clippy::cast_precision_loss)] // Acceptable for audio/signal processing
44#![allow(clippy::cast_possible_truncation)] // Controlled in audio processing
45#![allow(clippy::cast_sign_loss)] // Validated in implementation
46#![allow(clippy::cast_lossless)] // Explicit casts for clarity
47#![allow(clippy::unused_self)] // Trait implementations require &self
48#![allow(clippy::must_use_candidate)] // Would require extensive API changes
49#![allow(clippy::missing_errors_doc)] // Errors are self-explanatory
50#![allow(clippy::missing_panics_doc)] // Panics are documented where critical
51#![allow(clippy::uninlined_format_args)] // Older format syntax used consistently
52#![allow(clippy::similar_names)] // Domain-specific naming (e.g., mfcc1, mfcc2)
53#![allow(clippy::unnecessary_wraps)] // API consistency requires Result types
54#![allow(clippy::format_push_string)] // String building in formatters
55#![allow(clippy::manual_clamp)] // Explicit bounds checking for clarity
56#![allow(clippy::doc_markdown)] // Technical terms don't need backticks
57#![allow(clippy::return_self_not_must_use)] // Builder pattern convenience
58#![allow(clippy::if_not_else)] // Conditional logic clarity
59#![allow(clippy::redundant_closure_for_method_calls)] // Explicit closures for readability
60#![allow(clippy::match_same_arms)] // Explicit matching for different cases
61#![allow(clippy::inefficient_to_string)] // Minimal performance impact
62#![allow(clippy::needless_pass_by_value)] // API design choices
63#![allow(clippy::too_many_lines)] // Complex evaluation algorithms
64#![allow(clippy::struct_excessive_bools)] // Configuration structs need flags
65#![allow(clippy::needless_range_loop)] // Explicit indexing for clarity
66#![allow(clippy::wildcard_imports)] // Prelude and common imports
67#![allow(clippy::single_char_add_str)] // Minor performance impact
68#![allow(clippy::map_unwrap_or)] // Explicit error handling
69#![allow(clippy::excessive_precision)] // Scientific/audio constants
70#![allow(clippy::cast_possible_wrap)] // Controlled integer conversions
71#![allow(clippy::cloned_instead_of_copied)] // Generic over Copy/Clone
72#![allow(clippy::useless_vec)] // Vector literals for test data
73#![allow(clippy::ptr_as_ptr)] // FFI and C-compatible code
74#![allow(clippy::manual_let_else)] // Explicit error handling preferred
75#![allow(clippy::unnecessary_cast)] // Explicit types for clarity
76#![allow(clippy::trivially_copy_pass_by_ref)] // Trait method signatures
77#![allow(clippy::items_after_statements)] // Logical grouping of code
78#![allow(clippy::too_many_arguments)] // Complex evaluation functions
79#![allow(clippy::new_without_default)] // Constructors with validation
80#![allow(clippy::needless_borrow)] // Explicit borrowing for clarity
81#![allow(clippy::derivable_impls)] // Explicit default implementations
82#![allow(clippy::clone_on_copy)] // Explicit cloning in generic code
83#![allow(clippy::useless_format)] // Format strings for consistency
84#![allow(clippy::unwrap_or_default)] // Explicit defaults preferred
85#![allow(clippy::single_match_else)] // Explicit match arms
86#![allow(clippy::vec_init_then_push)] // Clear vector building
87#![allow(clippy::unnecessary_mut_passed)] // Mutable references for clarity
88#![allow(clippy::manual_range_contains)] // Explicit bounds checking
89#![allow(clippy::len_zero)] // Explicit length checks
90#![allow(clippy::float_cmp)] // Acceptable for test assertions
91#![allow(clippy::range_plus_one)] // Inclusive range clarity
92#![allow(clippy::manual_string_new)] // Explicit string creation
93#![allow(clippy::should_implement_trait)] // Custom trait implementations
94#![allow(clippy::let_and_return)] // Named intermediate values
95#![allow(clippy::type_complexity)] // Necessary for complex types
96#![allow(clippy::collapsible_else_if)] // Explicit conditional logic
97#![allow(clippy::collapsible_if)] // Clear condition separation
98#![allow(clippy::collapsible_match)] // Explicit pattern matching
99#![allow(clippy::single_char_pattern)] // Character patterns for clarity
100#![allow(clippy::needless_borrows_for_generic_args)] // Explicit borrowing
101#![allow(clippy::default_trait_access)] // Explicit Default::default()
102#![allow(clippy::empty_line_after_doc_comments)] // Documentation formatting
103#![allow(clippy::bool_to_int_with_if)] // Explicit boolean conversion
104#![allow(clippy::manual_ok_err)] // Explicit Result construction
105#![allow(clippy::match_like_matches_macro)] // Explicit matching
106#![allow(clippy::needless_continue)] // Loop control clarity
107#![allow(clippy::explicit_iter_loop)] // Explicit iteration
108#![allow(clippy::semicolon_if_nothing_returned)] // Expression clarity
109#![allow(clippy::unnecessary_map_or)] // Explicit Option handling
110#![allow(clippy::ref_option)] // FFI compatibility
111#![allow(clippy::used_underscore_binding)] // Prefixed variables for clarity
112#![allow(clippy::ip_constant)] // Test and example IP addresses
113#![allow(clippy::for_kv_map)] // Explicit iteration patterns
114#![allow(clippy::assigning_clones)] // Explicit clone operations
115#![allow(clippy::manual_map)] // Explicit mapping logic
116#![allow(clippy::manual_flatten)] // Explicit flattening
117#![allow(clippy::await_holding_lock)] // Controlled lock scope
118#![allow(clippy::borrowed_box)] // API compatibility
119#![allow(clippy::unnecessary_literal_bound)] // Explicit type bounds
120#![allow(clippy::borrow_as_ptr)] // Pointer conversions
121#![allow(clippy::case_sensitive_file_extension_comparisons)] // Platform compatibility
122#![allow(clippy::comparison_chain)] // Explicit comparisons
123#![allow(clippy::format_collect)] // String building
124#![allow(clippy::if_same_then_else)] // Conditional clarity
125#![allow(clippy::implicit_saturating_sub)] // Explicit arithmetic
126#![allow(clippy::iter_kv_map)] // Iterator patterns
127#![allow(clippy::match_result_ok)] // Explicit Result handling
128#![allow(clippy::match_wildcard_for_single_variants)] // Exhaustive matching
129#![allow(clippy::missing_const_for_thread_local)] // Runtime initialization
130#![allow(clippy::mixed_attributes_style)] // Attribute formatting
131#![allow(clippy::stable_sort_primitive)] // Sort algorithm choice
132#![allow(clippy::struct_field_names)] // Descriptive field names
133#![allow(clippy::unnecessary_debug_formatting)] // Debug trait usage
134#![allow(clippy::useless_asref)] // Explicit references
135#![allow(clippy::useless_conversion)] // Type clarity
136
137// Re-export core VoiRS types
138pub use voirs_recognizer::traits::{PhonemeAlignment, Transcript};
139pub use voirs_sdk::{AudioBuffer, LanguageCode, Phoneme, VoirsError};
140
141// Public API modules
142pub mod accuracy_benchmarks;
143pub mod advanced_preprocessing;
144pub mod audio;
145/// Audit trail system for compliance and security monitoring
146pub mod audit;
147pub mod automated_benchmarks;
148pub mod benchmark_export;
149pub mod benchmark_runner;
150pub mod benchmarks;
151/// Commercial tool comparison framework for speech evaluation systems
152/// Advanced result caching system with multiple backends
153pub mod caching;
154pub mod commercial_tool_comparison;
155pub mod comparison;
156pub mod compliance;
157/// Compliance testing suite for standards validation
158pub mod compliance_testing;
159/// Context-aware evaluation system for speech synthesis
160pub mod context_aware;
161/// Conversational quality assessment for dialogue systems
162pub mod conversational;
163/// C++ header-only interface for evaluation framework
164pub mod cpp_bindings;
165/// Cross-language evaluation accuracy validation framework
166pub mod cross_language_validation;
167/// Critical Success Factors (CSF) validation framework
168pub mod csf_validation;
169/// Advanced data quality validation and dataset management utilities
170pub mod data_quality_validation;
171/// Data versioning system for benchmark and evaluation results
172pub mod data_versioning;
173pub mod dataset_management;
174/// Deep learning-based evaluation metrics with neural MOS prediction
175pub mod deep_learning_metrics;
176pub mod distributed;
177/// Automated documentation generation for evaluation results
178pub mod doc_generation;
179/// Enterprise security framework with RBAC, encryption, and compliance
180pub mod enterprise_security;
181/// Enhanced error message generation utilities
182pub mod error_enhancement;
183/// Fairness-aware evaluation for bias detection and demographic parity
184pub mod fairness;
185/// Federated evaluation system for distributed processing
186pub mod federated;
187pub mod fuzzing;
188/// GraphQL API for complex evaluation queries
189pub mod graphql;
190/// Ground truth dataset management for evaluation validation
191pub mod ground_truth_dataset;
192pub mod integration;
193/// Kubernetes deployment configuration for distributed evaluation
194pub mod kubernetes;
195/// Enhanced logging and debugging utilities
196pub mod logging;
197/// MATLAB/Octave bindings for evaluation framework
198pub mod matlab_bindings;
199/// Metric reliability and reproducibility testing framework
200pub mod metric_reliability_testing;
201/// Metrics comparison and regression detection
202pub mod metrics_comparison;
203/// Metrics explainability system for interpretable evaluation
204pub mod metrics_explainability;
205/// Multi-turn dialogue evaluation for extended conversations
206pub mod multi_turn_dialogue;
207/// Multi-region deployment configuration for global distribution
208pub mod multiregion;
209/// Node.js/JavaScript bindings for evaluation framework
210pub mod nodejs_bindings;
211/// Monitoring and observability framework with Prometheus and tracing
212pub mod observability;
213pub mod perceptual;
214pub mod performance;
215/// Performance enhancement utilities for faster evaluation
216pub mod performance_enhancements;
217pub mod performance_monitor;
218pub mod platform;
219/// Plugin system for custom evaluation metrics
220pub mod plugins;
221/// Numerical precision utilities for high-accuracy calculations
222pub mod precision;
223/// Privacy-preserving evaluation framework with differential privacy
224pub mod privacy;
225pub mod pronunciation;
226/// Protocol documentation and compliance validation utilities
227pub mod protocol_documentation;
228pub mod quality;
229/// Quality gate validation system for automated quality assurance
230pub mod quality_gates;
231/// R statistical analysis integration (optional, requires R installation)
232#[cfg(feature = "r-integration")]
233pub mod r_integration;
234/// R package creation foundation for VoiRS evaluation
235#[cfg(feature = "r-integration")]
236pub mod r_package_foundation;
237/// Role-Based Access Control (RBAC) for enterprise security
238pub mod rbac;
239pub mod regression_detector;
240pub mod regression_testing;
241/// Reproducibility guarantees system for deterministic evaluation
242pub mod reproducibility;
243/// REST API interface for evaluation services
244pub mod rest_api;
245/// Semantic similarity evaluation for speech content analysis
246pub mod semantic_similarity;
247/// Industry standards compliance module (ANSI, ISO/IEC, AES, ITU-T)
248pub mod standards;
249pub mod statistical;
250/// Enhanced statistical analysis utilities
251pub mod statistical_enhancements;
252/// Task-oriented dialogue evaluation for goal-driven interactions
253pub mod task_oriented;
254pub mod traits;
255/// User experience (UX) evaluation for usability and satisfaction
256pub mod user_experience;
257pub mod validation;
258/// Validation certificate generator for certified evaluation results
259pub mod validation_certificates;
260/// WebSocket interface for real-time evaluation services
261pub mod websocket;
262/// Evaluation workflow system for automated pipelines
263pub mod workflows;
264
265// Python bindings (optional, enabled with "python" feature)
266#[cfg(feature = "python")]
267pub mod python;
268
269// Re-export performance optimizations
270pub use performance::{multi_gpu, LRUCache, PersistentCache, SlidingWindowProcessor};
271
272// Re-export all public types from traits
273pub use traits::*;
274
275// Note: Feature module types are not glob re-exported to avoid ambiguity.
276// Import from specific modules: evaluation::audio::*, evaluation::perceptual::*, etc.
277// Or use the prelude: use voirs_evaluation::prelude::*;
278
279// Re-export R integration when feature is enabled
280#[cfg(feature = "r-integration")]
281pub use r_integration::*;
282
283// Re-export Python bindings when feature is enabled
284#[cfg(feature = "python")]
285pub use python::*;
286
287/// Version information
288pub const VERSION: &str = env!("CARGO_PKG_VERSION");
289
290/// Convenient prelude for common imports
291pub mod prelude {
292    //! Prelude module for convenient imports
293
294    pub use crate::traits::{
295        ComparativeEvaluator, ComparisonMetric, ComparisonResult, EvaluationResult,
296        PronunciationEvaluator, PronunciationMetric, PronunciationScore,
297        QualityEvaluator as QualityEvaluatorTrait, QualityMetric, QualityScore,
298        SelfEvaluationResult, SelfEvaluator,
299    };
300
301    pub use crate::audio::{
302        AudioFormat, AudioLoader, LoadOptions, StreamingConfig, StreamingEvaluator,
303    };
304    pub use crate::comparison::ComparativeEvaluatorImpl;
305    pub use crate::compliance::{
306        ComplianceChecker, ComplianceConfig, ComplianceResult, ComplianceStatus,
307    };
308    pub use crate::integration::{EcosystemConfig, EcosystemEvaluator, EcosystemResults};
309    pub use crate::perceptual::{
310        EnhancedMultiListenerSimulator, IntelligibilityMonitor, MultiListenerConfig,
311    };
312    pub use crate::performance_enhancements::{CacheStats, OptimizedQualityEvaluator};
313    pub use crate::platform::{DeploymentConfig, PlatformCompatibility, PlatformInfo};
314    pub use crate::plugins::{
315        EvaluationContext, ExampleMetricPlugin, MetricPlugin, MetricResult, PluginConfig,
316        PluginError, PluginInfo, PluginManager,
317    };
318    pub use crate::pronunciation::PronunciationEvaluatorImpl;
319    pub use crate::quality::{
320        AdvancedSpectralAnalysis, AgeGroup, ChildrenEvaluationConfig, ChildrenEvaluationResult,
321        ChildrenSpeechEvaluator, CochlearImplantStrategy, CulturalRegion, ElderlyAgeGroup,
322        ElderlyPathologicalConfig, ElderlyPathologicalEvaluator, ElderlyPathologicalResult,
323        EmotionType, EmotionalEvaluationConfig, EmotionalSpeechEvaluationResult,
324        EmotionalSpeechEvaluator, ExpressionStyle, HearingAidType, ModelArchitecture, NeuralConfig,
325        NeuralEvaluator, NeuralQualityAssessment, PathologicalCondition, PersonalityTrait,
326        PsychoacousticAnalysis, PsychoacousticConfig, PsychoacousticEvaluator, QualityEvaluator,
327        SeverityLevel, SingingEvaluationConfig, SingingEvaluationResult, SingingEvaluator,
328        SpectralAnalysisConfig, SpectralAnalyzer,
329    };
330    pub use crate::validation::{ValidationConfig, ValidationFramework, ValidationResult};
331    pub use crate::websocket::{
332        RealtimeAnalysis, SessionConfig, WebSocketConfig, WebSocketError, WebSocketMessage,
333        WebSocketSessionManager,
334    };
335
336    // Re-export R integration when feature is enabled
337    #[cfg(feature = "r-integration")]
338    pub use crate::r_integration::{
339        RAnovaResult, RArimaModel, RDataFrame, RGamModel, RKmeansResult, RLinearModel,
340        RLogisticModel, RPcaResult, RRandomForestModel, RSession, RSurvivalModel, RTestResult,
341        RTimeSeriesResult, RValue,
342    };
343
344    // Re-export SDK types
345    pub use voirs_recognizer::traits::{PhonemeAlignment, Transcript};
346    pub use voirs_sdk::{AudioBuffer, LanguageCode, Phoneme, VoirsError};
347
348    // Re-export async trait
349    pub use async_trait::async_trait;
350}
351
352// ============================================================================
353// Error Types
354// ============================================================================
355
356/// Evaluation-specific error types
357#[derive(Debug, thiserror::Error)]
358pub enum EvaluationError {
359    /// Quality evaluation failed
360    #[error("Quality evaluation failed: {message}")]
361    QualityEvaluationError {
362        /// Error message
363        message: String,
364        /// Source error
365        #[source]
366        source: Option<Box<dyn std::error::Error + Send + Sync>>,
367    },
368
369    /// Pronunciation evaluation failed
370    #[error("Pronunciation evaluation failed: {message}")]
371    PronunciationEvaluationError {
372        /// Error message
373        message: String,
374        /// Source error
375        #[source]
376        source: Option<Box<dyn std::error::Error + Send + Sync>>,
377    },
378
379    /// Comparison evaluation failed
380    #[error("Comparison evaluation failed: {message}")]
381    ComparisonError {
382        /// Error message
383        message: String,
384        /// Source error
385        #[source]
386        source: Option<Box<dyn std::error::Error + Send + Sync>>,
387    },
388
389    /// Metric calculation failed
390    #[error("Metric calculation failed: {metric} - {message}")]
391    MetricCalculationError {
392        /// Metric name
393        metric: String,
394        /// Error message
395        message: String,
396        /// Source error
397        #[source]
398        source: Option<Box<dyn std::error::Error + Send + Sync>>,
399    },
400
401    /// Audio processing error
402    #[error("Audio processing error: {message}")]
403    AudioProcessingError {
404        /// Error message
405        message: String,
406        /// Source error
407        #[source]
408        source: Option<Box<dyn std::error::Error + Send + Sync>>,
409    },
410
411    /// General processing error
412    #[error("Processing error: {message}")]
413    ProcessingError {
414        /// Error message
415        message: String,
416        /// Source error
417        #[source]
418        source: Option<Box<dyn std::error::Error + Send + Sync>>,
419    },
420
421    /// Configuration error
422    #[error("Configuration error: {message}")]
423    ConfigurationError {
424        /// Error message
425        message: String,
426    },
427
428    /// Model error
429    #[error("Model error: {message}")]
430    ModelError {
431        /// Error message
432        message: String,
433        /// Source error
434        #[source]
435        source: Option<Box<dyn std::error::Error + Send + Sync>>,
436    },
437
438    /// Invalid input
439    #[error("Invalid input: {message}")]
440    InvalidInput {
441        /// Error message
442        message: String,
443    },
444
445    /// Feature not supported
446    #[error("Feature not supported: {feature}")]
447    FeatureNotSupported {
448        /// Feature name
449        feature: String,
450    },
451
452    /// I/O error
453    #[error("I/O error: {0}")]
454    Io(String),
455
456    /// Other error
457    #[error("Error: {0}")]
458    Other(String),
459}
460
461impl From<EvaluationError> for VoirsError {
462    fn from(err: EvaluationError) -> Self {
463        match err {
464            EvaluationError::QualityEvaluationError { message, source } => {
465                VoirsError::ModelError {
466                    model_type: voirs_sdk::error::ModelType::Vocoder, // Use closest type
467                    message,
468                    source,
469                }
470            }
471            EvaluationError::PronunciationEvaluationError { message, source } => {
472                VoirsError::ModelError {
473                    model_type: voirs_sdk::error::ModelType::ASR,
474                    message,
475                    source,
476                }
477            }
478            EvaluationError::ComparisonError { message, source: _ } => VoirsError::AudioError {
479                message,
480                buffer_info: None,
481            },
482            EvaluationError::MetricCalculationError {
483                metric,
484                message,
485                source: _,
486            } => VoirsError::AudioError {
487                message: format!("Metric calculation failed: {metric} - {message}"),
488                buffer_info: None,
489            },
490            EvaluationError::AudioProcessingError { message, source: _ } => {
491                VoirsError::AudioError {
492                    message,
493                    buffer_info: None,
494                }
495            }
496            EvaluationError::ConfigurationError { message } => VoirsError::ConfigError {
497                field: "evaluation".to_string(),
498                message,
499            },
500            EvaluationError::ModelError { message, source } => VoirsError::ModelError {
501                model_type: voirs_sdk::error::ModelType::Vocoder,
502                message,
503                source,
504            },
505            EvaluationError::InvalidInput { message } => VoirsError::ConfigError {
506                field: "input".to_string(),
507                message: format!("Invalid input: {message}"),
508            },
509            EvaluationError::FeatureNotSupported { feature } => VoirsError::ModelError {
510                model_type: voirs_sdk::error::ModelType::Vocoder,
511                message: format!("Feature not supported: {feature}"),
512                source: None,
513            },
514            EvaluationError::ProcessingError { message, source: _ } => VoirsError::AudioError {
515                message,
516                buffer_info: None,
517            },
518            EvaluationError::Io(msg) => VoirsError::IoError {
519                path: std::path::PathBuf::from("unknown"),
520                operation: voirs_sdk::error::IoOperation::Read,
521                source: std::io::Error::new(std::io::ErrorKind::Other, msg),
522            },
523            EvaluationError::Other(msg) => VoirsError::InternalError {
524                component: "evaluation".to_string(),
525                message: msg,
526            },
527        }
528    }
529}
530
531impl From<VoirsError> for EvaluationError {
532    fn from(err: VoirsError) -> Self {
533        match err {
534            VoirsError::ModelError {
535                model_type: _,
536                message,
537                source,
538            } => EvaluationError::ModelError { message, source },
539            VoirsError::AudioError {
540                message,
541                buffer_info: _,
542            } => EvaluationError::AudioProcessingError {
543                message,
544                source: None,
545            },
546            VoirsError::ConfigError { field: _, message } => {
547                EvaluationError::ConfigurationError { message }
548            }
549            VoirsError::G2pError { message, .. } => EvaluationError::ModelError {
550                message,
551                source: None,
552            },
553            VoirsError::NetworkError { message, .. } => EvaluationError::ModelError {
554                message: format!("Network error: {message}"),
555                source: None,
556            },
557            VoirsError::IoError {
558                path, operation, ..
559            } => EvaluationError::ModelError {
560                message: format!("IO error: {} on {}", operation, path.display()),
561                source: None,
562            },
563            VoirsError::DataValidationFailed { data_type, reason } => {
564                EvaluationError::InvalidInput {
565                    message: format!("Validation failed for {data_type}: {reason}"),
566                }
567            }
568            VoirsError::TextPreprocessingError { message, .. } => {
569                EvaluationError::AudioProcessingError {
570                    message: format!("Text preprocessing error: {message}"),
571                    source: None,
572                }
573            }
574            VoirsError::NotImplemented { feature } => {
575                EvaluationError::FeatureNotSupported { feature }
576            }
577            VoirsError::ResourceExhausted { resource, details } => EvaluationError::ModelError {
578                message: format!("Resource exhausted: {resource}: {details}"),
579                source: None,
580            },
581            VoirsError::InternalError { component, message } => EvaluationError::ModelError {
582                message: format!("Internal error in {component}: {message}"),
583                source: None,
584            },
585            _ => EvaluationError::ModelError {
586                message: format!("Unknown error: {err}"),
587                source: None,
588            },
589        }
590    }
591}
592
593impl From<scirs2_fft::error::FFTError> for EvaluationError {
594    fn from(err: scirs2_fft::error::FFTError) -> Self {
595        EvaluationError::AudioProcessingError {
596            message: format!("FFT computation error: {err}"),
597            source: Some(Box::new(err)),
598        }
599    }
600}
601
602// ============================================================================
603// Utility Functions
604// ============================================================================
605
606/// Create a default quality evaluation configuration
607#[must_use]
608pub fn default_quality_config() -> QualityEvaluationConfig {
609    QualityEvaluationConfig::default()
610}
611
612/// Create a default pronunciation evaluation configuration
613#[must_use]
614pub fn default_pronunciation_config() -> PronunciationEvaluationConfig {
615    PronunciationEvaluationConfig::default()
616}
617
618/// Create a default comparison configuration
619#[must_use]
620pub fn default_comparison_config() -> ComparisonConfig {
621    ComparisonConfig::default()
622}
623
624/// Validate audio compatibility for evaluation
625pub fn validate_audio_compatibility(
626    audio1: &AudioBuffer,
627    audio2: &AudioBuffer,
628) -> Result<(), EvaluationError> {
629    if audio1.sample_rate() != audio2.sample_rate() {
630        return Err(EvaluationError::InvalidInput {
631            message: format!(
632                "Sample rate mismatch: {} vs {}",
633                audio1.sample_rate(),
634                audio2.sample_rate()
635            ),
636        });
637    }
638
639    if audio1.channels() != audio2.channels() {
640        return Err(EvaluationError::InvalidInput {
641            message: format!(
642                "Channel count mismatch: {} vs {}",
643                audio1.channels(),
644                audio2.channels()
645            ),
646        });
647    }
648
649    Ok(())
650}
651
652/// Calculate statistical correlation between two score vectors
653#[must_use]
654pub fn calculate_correlation(scores1: &[f32], scores2: &[f32]) -> f32 {
655    if scores1.len() != scores2.len() || scores1.is_empty() {
656        return 0.0;
657    }
658
659    let n = scores1.len() as f32;
660    let mean1 = scores1.iter().sum::<f32>() / n;
661    let mean2 = scores2.iter().sum::<f32>() / n;
662
663    let mut numerator = 0.0;
664    let mut sum_sq1 = 0.0;
665    let mut sum_sq2 = 0.0;
666
667    for (&s1, &s2) in scores1.iter().zip(scores2.iter()) {
668        let diff1 = s1 - mean1;
669        let diff2 = s2 - mean2;
670        numerator += diff1 * diff2;
671        sum_sq1 += diff1 * diff1;
672        sum_sq2 += diff2 * diff2;
673    }
674
675    let denominator = (sum_sq1 * sum_sq2).sqrt();
676    if denominator > 0.0 {
677        numerator / denominator
678    } else {
679        0.0
680    }
681}
682
683/// Convert quality score to human-readable label
684#[must_use]
685pub fn quality_score_to_label(score: f32) -> &'static str {
686    match score {
687        s if s >= 0.9 => "Excellent",
688        s if s >= 0.8 => "Good",
689        s if s >= 0.7 => "Fair",
690        s if s >= 0.6 => "Poor",
691        _ => "Very Poor",
692    }
693}
694
695/// Convert pronunciation score to human-readable label
696#[must_use]
697pub fn pronunciation_score_to_label(score: f32) -> &'static str {
698    match score {
699        s if s >= 0.95 => "Native-like",
700        s if s >= 0.85 => "Very Good",
701        s if s >= 0.75 => "Good",
702        s if s >= 0.65 => "Acceptable",
703        s if s >= 0.5 => "Needs Improvement",
704        _ => "Poor",
705    }
706}
707
708/// Utility function to normalize scores to 0-1 range
709pub fn normalize_scores(scores: &mut [f32]) {
710    if scores.is_empty() {
711        return;
712    }
713
714    let min_score = scores.iter().fold(f32::INFINITY, |a, &b| a.min(b));
715    let max_score = scores.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));
716
717    if max_score > min_score {
718        let range = max_score - min_score;
719        for score in scores {
720            *score = (*score - min_score) / range;
721        }
722    }
723}
724
725/// Calculate weighted average of scores
726#[must_use]
727pub fn weighted_average(scores: &[f32], weights: &[f32]) -> f32 {
728    if scores.len() != weights.len() || scores.is_empty() {
729        return 0.0;
730    }
731
732    let weighted_sum: f32 = scores.iter().zip(weights.iter()).map(|(s, w)| s * w).sum();
733    let weight_sum: f32 = weights.iter().sum();
734
735    if weight_sum > 0.0 {
736        weighted_sum / weight_sum
737    } else {
738        0.0
739    }
740}
741
742#[cfg(test)]
743mod tests {
744    use super::*;
745
746    #[test]
747    fn test_version() {
748        // VERSION is a const string literal, so this checks it has content
749        assert!(!VERSION.is_empty());
750    }
751
752    #[test]
753    fn test_audio_compatibility_validation() {
754        let audio1 = AudioBuffer::new(vec![0.1, 0.2, 0.3], 16000, 1);
755        let audio2 = AudioBuffer::new(vec![0.4, 0.5, 0.6], 16000, 1);
756
757        // Should be compatible
758        assert!(validate_audio_compatibility(&audio1, &audio2).is_ok());
759
760        // Different sample rates
761        let audio3 = AudioBuffer::new(vec![0.1, 0.2, 0.3], 22050, 1);
762        assert!(validate_audio_compatibility(&audio1, &audio3).is_err());
763
764        // Different channel counts
765        let audio4 = AudioBuffer::new(vec![0.1, 0.2, 0.3, 0.4], 16000, 2);
766        assert!(validate_audio_compatibility(&audio1, &audio4).is_err());
767    }
768
769    #[test]
770    fn test_correlation_calculation() {
771        let scores1 = vec![1.0, 2.0, 3.0, 4.0, 5.0];
772        let scores2 = vec![2.0, 4.0, 6.0, 8.0, 10.0];
773
774        let correlation = calculate_correlation(&scores1, &scores2);
775        assert!((correlation - 1.0).abs() < 0.001); // Perfect correlation
776
777        let scores3 = vec![5.0, 4.0, 3.0, 2.0, 1.0];
778        let correlation_neg = calculate_correlation(&scores1, &scores3);
779        assert!((correlation_neg + 1.0).abs() < 0.001); // Perfect negative correlation
780    }
781
782    #[test]
783    fn test_quality_score_labels() {
784        assert_eq!(quality_score_to_label(0.95), "Excellent");
785        assert_eq!(quality_score_to_label(0.85), "Good");
786        assert_eq!(quality_score_to_label(0.75), "Fair");
787        assert_eq!(quality_score_to_label(0.65), "Poor");
788        assert_eq!(quality_score_to_label(0.45), "Very Poor");
789    }
790
791    #[test]
792    fn test_pronunciation_score_labels() {
793        assert_eq!(pronunciation_score_to_label(0.97), "Native-like");
794        assert_eq!(pronunciation_score_to_label(0.87), "Very Good");
795        assert_eq!(pronunciation_score_to_label(0.77), "Good");
796        assert_eq!(pronunciation_score_to_label(0.67), "Acceptable");
797        assert_eq!(pronunciation_score_to_label(0.57), "Needs Improvement");
798        assert_eq!(pronunciation_score_to_label(0.37), "Poor");
799    }
800
801    #[test]
802    fn test_score_normalization() {
803        let mut scores = vec![10.0, 20.0, 30.0, 40.0, 50.0];
804        normalize_scores(&mut scores);
805
806        assert!((scores[0] - 0.0).abs() < 0.001);
807        assert!((scores[4] - 1.0).abs() < 0.001);
808        assert!(scores.iter().all(|&s| (0.0..=1.0).contains(&s)));
809    }
810
811    #[test]
812    fn test_weighted_average() {
813        let scores = vec![0.8, 0.6, 0.9];
814        let weights = vec![0.5, 0.3, 0.2];
815
816        let avg = weighted_average(&scores, &weights);
817        let expected = (0.8 * 0.5 + 0.6 * 0.3 + 0.9 * 0.2) / (0.5 + 0.3 + 0.2);
818        assert!((avg - expected).abs() < 0.001);
819    }
820
821    #[test]
822    fn test_default_configs() {
823        let quality_config = default_quality_config();
824        assert!(quality_config.objective_metrics);
825
826        let pronunciation_config = default_pronunciation_config();
827        assert!(pronunciation_config.phoneme_level_scoring);
828
829        let comparison_config = default_comparison_config();
830        assert!(comparison_config.enable_statistical_analysis);
831    }
832}