1#![allow(clippy::cast_precision_loss)] #![allow(clippy::cast_possible_truncation)] #![allow(clippy::cast_sign_loss)] #![allow(clippy::missing_errors_doc)] #![allow(clippy::missing_panics_doc)] #![allow(clippy::unused_self)] #![allow(clippy::must_use_candidate)] #![allow(clippy::doc_markdown)] #![allow(clippy::unnecessary_wraps)] #![allow(clippy::float_cmp)] #![allow(clippy::match_same_arms)] #![allow(clippy::module_name_repetitions)] #![allow(clippy::struct_excessive_bools)] #![allow(clippy::too_many_lines)] #![allow(clippy::needless_pass_by_value)] #![allow(clippy::similar_names)] #![allow(clippy::unused_async)] #![allow(clippy::needless_range_loop)] #![allow(clippy::uninlined_format_args)] #![allow(clippy::manual_clamp)] #![allow(clippy::return_self_not_must_use)] #![allow(clippy::cast_possible_wrap)] #![allow(clippy::cast_lossless)] #![allow(clippy::wildcard_imports)] #![allow(clippy::format_push_string)] #![allow(clippy::redundant_closure_for_method_calls)] #![deny(unsafe_code)]
34#![warn(missing_docs)]
35
36pub mod audio_libraries_update;
37pub mod audio_quality_research;
38pub mod buffer_pool;
39pub mod cache;
40pub mod cloud_scaling;
41pub mod communication;
42pub mod compression_research;
43pub mod config;
44pub mod core;
45pub mod diagnostics;
46pub mod fallback;
47pub mod format;
48pub mod gaming;
49pub mod simd_audio;
50
51#[cfg(feature = "iot")]
52pub mod iot;
53
54pub mod ml_frameworks;
55pub mod mobile;
56pub mod models;
57pub mod monitoring;
58pub mod multi_target;
59pub mod neural_vocoding;
60pub mod optimizations;
61pub mod pipeline_optimization;
62pub mod platform_libraries;
63pub mod processing;
64pub mod profiling;
65pub mod quality;
66pub mod realtime;
67pub mod realtime_libraries;
68pub mod realtime_ml;
69pub mod recognition;
70pub mod scalability;
71pub mod streaming;
72pub mod streaming_platforms;
73pub mod style_consistency;
74pub mod style_transfer;
75pub mod thread_safety;
76pub mod transforms;
77pub mod types;
78pub mod webrtc_integration;
79pub mod zero_shot;
80
81#[cfg(feature = "acoustic-integration")]
82pub mod acoustic;
83
84#[cfg(feature = "cloning-integration")]
85pub mod cloning;
86
87#[cfg(feature = "emotion-integration")]
88pub mod emotion;
89
90#[cfg(feature = "spatial-integration")]
91pub mod spatial;
92
93#[cfg(feature = "wasm")]
94pub mod wasm;
95
96pub use audio_libraries_update::{
98 AudioLibrariesUpdater, AudioLibraryInfo, CompatibilityRisk, CompatibilityTestResult,
99 LibraryVersionAnalysis, MigrationEffort, MigrationGuide, PerformanceImpact, SecuritySeverity,
100 SecurityVulnerability, UpdatePriority, UpdateResult,
101};
102pub use audio_quality_research::{
103 AnalysisStatistics, AudioQualityResearcher, ComprehensiveQualityAnalysis,
104 HarmonicDistortionAnalysis, MultidimensionalQuality, NeuralQualityModel,
105 PsychoacousticAnalysis, ResearchConfig, ResearchCriticalBandAnalysis, SpectralQualityAnalysis,
106 TemporalQualityAnalysis, TonalityAnalysis,
107};
108pub use cache::{
109 CacheConfig, CacheItemType, CachePolicy, CachePriority, CacheStatistics, CachedData,
110 CachedItem, ConversionCacheSystem, LruCache, PerformanceMetrics,
111};
112pub use cloud_scaling::{
113 CloudNode, CloudScalingConfig, CloudScalingController, ClusterMetrics,
114 DistributedConversionRequest, DistributedConversionResult, LoadBalancingStrategy,
115 NodeCapabilities, NodeResourceUsage, NodeStatus, RequestPriority, RetryConfig,
116 ScalingAction as CloudScalingAction, ScalingDecision,
117};
118pub use compression_research::{
119 CompressedAudio, CompressionAlgorithm, CompressionConfig, CompressionParameters,
120 CompressionResearcher, CompressionStats, CompressionTarget, PredictionAnalyzer,
121 PsychoacousticAnalyzer, TonalityDetector, VectorQuantizer,
122};
123pub use config::{ConversionConfig, ConversionConfigBuilder};
124pub use core::{VoiceConverter, VoiceConverterBuilder};
125pub use diagnostics::{
126 DiagnosticAnalysis, DiagnosticSystem, HealthAssessment, IdentifiedIssue, IssueCategory,
127 IssueSeverity, Recommendation, ReportType,
128};
129pub use fallback::{
130 DegradationConfig, FailureType, FallbackContext, GracefulDegradationController,
131 QualityThresholds,
132};
133pub use format::{
134 AudioData, AudioFormat, AudioFormatType, AudioReader, AudioWriter, FormatConverter,
135 FormatDetector, FormatQuality,
136};
137pub use gaming::{
138 BevyIntegration, CustomIntegration, GameAudioConfig, GameEngine, GameEngineIntegration,
139 GamePerformanceConstraints, GamePerformanceMetrics, GamePerformanceMonitor, GameVoiceMode,
140 GameVoiceProcessor, GameVoiceSession, GodotIntegration, ThreadPriority, UnityIntegration,
141 UnrealIntegration,
142};
143
144#[cfg(feature = "iot")]
145pub use iot::{
146 IoTConversionConfig, IoTConversionStatistics, IoTDeviceStatus, IoTPlatform, IoTPowerMode,
147 IoTProcessingMode, IoTVoiceConverter, ResourceConstraints, ResourceUsage as IoTResourceUsage,
148};
149
150#[cfg(feature = "acoustic-integration")]
151pub use acoustic::{
152 AcousticConversionAdapter, AcousticConversionContext, AcousticConversionResult,
153 AcousticFeatureConfig, AcousticFeatures, AcousticState, FormantFrequencies, HarmonicFeatures,
154 TemporalFeatures, WindowType,
155};
156
157#[cfg(feature = "cloning-integration")]
158pub use cloning::{
159 CloningConversionAdapter, CloningConversionResult, CloningIntegration,
160 CloningIntegrationConfig, TargetSpeakerInfo,
161};
162
163#[cfg(feature = "emotion-integration")]
164pub use emotion::{EmotionConversionAdapter, EmotionParameters};
165
166pub use ml_frameworks::{
167 ActivationFunction, ConvLayerConfig, DevicePreference, InferenceMetrics, LayerSpec,
168 MLFramework, MLFrameworkConfig, MLFrameworkManager, MLInferenceSession, MLModelMetadata,
169 MemoryConfig, MemoryUsageStats, ModelArchitecture, ModelCapabilities, ModelOptimization,
170 PerformanceConfig, QuantizationPrecision, RnnType, TensorDataType, TensorSpec,
171};
172pub use mobile::{
173 MobileConversionConfig, MobileConversionStatistics, MobileDeviceInfo, MobilePlatform,
174 MobileVoiceConverter, NeonOptimizer, PowerMode, ThermalState,
175};
176pub use models::{ConversionModel, ModelType};
177pub use monitoring::{
178 AlertSeverity, AlertType, MonitorConfig, QualityDashboard, QualityEvent, QualityMonitor,
179 SessionDashboard, SystemOverview,
180};
181pub use multi_target::{
182 MultiTargetConversionRequest, MultiTargetConversionResult, MultiTargetConverter,
183 MultiTargetProcessingStats, NamedTarget, ProcessingMode,
184};
185pub use neural_vocoding::{
186 ActivationType, AlgorithmBenchmark, AlgorithmPerformance, AttentionConfig,
187 AudioProcessingParams, NeuralArchitectureConfig, NeuralVocoder, NeuralVocodingConfig,
188 NeuralVocodingMetrics, VocodingAlgorithm, VocodingQuality,
189};
190pub use optimizations::{AudioBufferPool, ConversionPerformanceMonitor, SmallAudioOptimizer};
191pub use pipeline_optimization::{
192 AlgorithmVariant, OptimizationStatistics, OptimizedConversionPlan, OptimizedPipeline,
193 OptimizedPipelineConfig,
194};
195pub use platform_libraries::{
196 CpuFeatures, OptimizationLevel, PlatformConfig, PlatformOptimizer, PlatformStats,
197 TargetPlatform,
198};
199pub use processing::{AudioBuffer, ProcessingPipeline};
200pub use profiling::{
201 BottleneckAnalyzer, BottleneckInfo, BottleneckThresholds, BottleneckType, ConversionProfiler,
202 CpuAnalysis, CpuData, CpuSample, GlobalMetrics, MemoryAnalysis, MemoryData, MemorySample,
203 PerformanceSummary, ProfilingConfig, ProfilingReport, ProfilingSession, SessionInfo,
204 StageTimingInfo, TimingBreakdown, TimingData,
205};
206pub use quality::{
207 AdaptiveQualityController, ArtifactDetector, CriticalBandAnalysis, DetailedQualityMetrics,
208 DetectedArtifacts, LoudnessAnalysis, MaskingAnalysis, ObjectiveQualityMetrics,
209 PerceptualOptimizationParams, PerceptualOptimizationResult, PerceptualOptimizer,
210 QualityAssessment, QualityMetricsSystem, QualityTargetMeasurement, QualityTargetsAchievement,
211 QualityTargetsConfig, QualityTargetsStatistics, QualityTargetsSystem,
212};
213pub use realtime::{RealtimeConfig, RealtimeConverter};
214pub use realtime_libraries::{
215 AudioBackend, BackendCapabilities, RealtimeBuffer, RealtimeConfig as RealtimeLibraryConfig,
216 RealtimeLibraryManager, RealtimeStats,
217};
218pub use realtime_ml::{
219 AdaptiveOptimizationState, BufferStrategy, CacheEvictionPolicy, CacheOptimizationConfig,
220 ModelAdaptationConfig, OptimizationSnapshot, OptimizationStrategy, ParallelProcessingConfig,
221 PerformanceSample, QuantizationLevel, RealtimeMLConfig, RealtimeMLOptimizer, RealtimeMetrics,
222 ResourceUsage as RealtimeMLResourceUsage, StreamingOptimizationConfig,
223};
224pub use recognition::{
225 ASRConfig, ASREngine, ASRTranscription, PhonemeAlignment, RecognitionGuidedConverter,
226 RecognitionGuidedResult, RecognitionStats, SpeechGuidedParams, WordTimestamp,
227};
228pub use scalability::{
229 MemoryEfficiencyMetrics, MemoryTracker, ResourceAllocationStrategy, ResourceMonitor,
230 ResourceUsageMetrics, ScalabilityConfig, ScalabilityMetrics, ScalabilityTargets,
231 ScalableConverter, ScalingAction, ScalingActionType, ScalingController, ScalingThresholds,
232 ThroughputMetrics, ThroughputSample,
233};
234#[cfg(feature = "spatial-integration")]
235pub use spatial::{
236 AmbisonicsOutput, BinauralAudioOutput, HrtfMetadata, SpatialAudioOutput,
237 SpatialConversionAdapter, SpatialDirection, SpatialPosition, SpatialVoiceSource,
238};
239pub use streaming::{StreamProcessor, StreamingConverter};
240pub use streaming_platforms::{
241 AdaptationDirection, AdaptationEvent, BandwidthAdaptationState, DiscordIntegration,
242 FacebookIntegration, OBSIntegration, PlatformIntegration, RTMPIntegration, StreamConfig,
243 StreamPerformanceMetrics, StreamPerformanceMonitor, StreamProcessor as StreamPlatformProcessor,
244 StreamQuality, StreamSession, StreamVoiceMode, StreamingConstraints, StreamingPlatform,
245 StreamlabsIntegration, TikTokIntegration, TwitchIntegration, XSplitIntegration,
246 YouTubeIntegration,
247};
248pub use style_consistency::{
249 ConsistencyThresholds, PreservationMode, StyleAdaptationSettings, StyleConsistencyConfig,
250 StyleConsistencyEngine, StyleElement,
251};
252pub use style_transfer::{
253 StyleCharacteristics, StyleTransferConfig, StyleTransferMethod, StyleTransferSystem,
254};
255pub use thread_safety::{
256 AllocationInfo, AllocationTracker, AllocationType, BoundsViolation, BufferSafetyMonitor,
257 ConcurrentConversionManager, ConcurrentConversionMetrics, LeakSeverity, MemoryLeak,
258 MemorySafetyAuditor, MemorySafetyConfig, MemorySafetyReport, MemorySafetyStatus,
259 ModelAccessStats, ModelUsageInfo, OperationGuard, OperationInfo, OperationState,
260 OperationStatus, ReferenceTracker, RiskLevel, ThreadSafeModelManager, UnsafeOperation,
261 UnsafeOperationType, ViolationSeverity, ViolationType,
262};
263pub use transforms::{
264 AgeTransform, ChannelStrategy, GenderTransform, MultiChannelAudio, MultiChannelConfig,
265 MultiChannelPitchTransform, MultiChannelTransform, PitchTransform, SpeedTransform, Transform,
266 VoiceMorpher,
267};
268pub use types::{
269 ConversionRequest, ConversionResult, ConversionTarget, ConversionType, VoiceCharacteristics,
270};
271pub use webrtc_integration::{
272 ConversionMode, NetworkConditions, QualityMode, VoiceConversionConfig, WebRTCAudioConfig,
273 WebRTCProcessingStatistics, WebRTCVoiceProcessor,
274};
275pub use zero_shot::{
276 ReferenceVoiceDatabase, SpeakerEmbedding, UniversalVoiceModel, ZeroShotConfig,
277 ZeroShotConverter,
278};
279
280#[cfg(feature = "wasm")]
281pub use wasm::{
282 BrowserCapabilities, ConversionParameters, WasmConversionConfig, WasmConversionStatistics,
283 WasmSupportLevel, WasmVoiceConverter, WebAudioNodeType,
284};
285
286pub type Result<T> = std::result::Result<T, Error>;
288
289#[macro_export]
291macro_rules! error_config {
292 ($msg:expr) => {
293 $crate::Error::config($msg.to_string())
294 };
295}
296
297#[macro_export]
299macro_rules! error_processing {
300 ($msg:expr) => {
301 $crate::Error::processing($msg.to_string())
302 };
303}
304
305#[macro_export]
307macro_rules! error_model {
308 ($msg:expr) => {
309 $crate::Error::model($msg.to_string())
310 };
311}
312
313#[macro_export]
315macro_rules! error_audio {
316 ($msg:expr) => {
317 $crate::Error::audio($msg.to_string())
318 };
319}
320
321#[macro_export]
323macro_rules! error_realtime {
324 ($msg:expr) => {
325 $crate::Error::realtime($msg.to_string())
326 };
327}
328
329#[macro_export]
331macro_rules! error_streaming {
332 ($msg:expr) => {
333 $crate::Error::streaming($msg.to_string())
334 };
335}
336
337#[macro_export]
339macro_rules! error_buffer {
340 ($msg:expr) => {
341 $crate::Error::buffer($msg.to_string())
342 };
343}
344
345#[macro_export]
347macro_rules! error_transform {
348 ($msg:expr) => {
349 $crate::Error::transform($msg.to_string())
350 };
351}
352
353#[macro_export]
355macro_rules! error_validation {
356 ($msg:expr) => {
357 $crate::Error::validation($msg.to_string())
358 };
359}
360
361#[macro_export]
363macro_rules! error_runtime {
364 ($msg:expr) => {
365 $crate::Error::runtime($msg.to_string())
366 };
367}
368
369#[derive(Debug, thiserror::Error)]
371pub enum Error {
372 #[error("Configuration error: {message}")]
374 Config {
375 message: String,
377 context: Option<Box<ErrorContext>>,
379 recovery_suggestions: Box<Vec<String>>,
381 },
382
383 #[error("Processing error in {operation}: {message}")]
385 Processing {
386 operation: String,
388 message: String,
390 context: Option<Box<ErrorContext>>,
392 recovery_suggestions: Box<Vec<String>>,
394 },
395
396 #[error("Model error ({model_type}): {message}")]
398 Model {
399 model_type: String,
401 message: String,
403 context: Option<Box<ErrorContext>>,
405 recovery_suggestions: Box<Vec<String>>,
407 },
408
409 #[error("Audio error: {message}")]
411 Audio {
412 message: String,
414 audio_info: Option<Box<AudioErrorInfo>>,
416 context: Option<Box<ErrorContext>>,
418 recovery_suggestions: Box<Vec<String>>,
420 },
421
422 #[error("Real-time processing error: {message}")]
424 Realtime {
425 message: String,
427 performance_context: Option<Box<PerformanceErrorInfo>>,
429 context: Option<Box<ErrorContext>>,
431 recovery_suggestions: Box<Vec<String>>,
433 },
434
435 #[error("Streaming error: {message}")]
437 Streaming {
438 message: String,
440 stream_info: Option<Box<StreamErrorInfo>>,
442 context: Option<Box<ErrorContext>>,
444 recovery_suggestions: Box<Vec<String>>,
446 },
447
448 #[error("Buffer error: {message}")]
450 Buffer {
451 message: String,
453 buffer_info: Option<Box<BufferErrorInfo>>,
455 context: Option<Box<ErrorContext>>,
457 recovery_suggestions: Box<Vec<String>>,
459 },
460
461 #[error("Transform error ({transform_type}): {message}")]
463 Transform {
464 transform_type: String,
466 message: String,
468 context: Option<Box<ErrorContext>>,
470 recovery_suggestions: Box<Vec<String>>,
472 },
473
474 #[error("Validation error: {message}")]
476 Validation {
477 message: String,
479 field: Option<String>,
481 expected: Option<String>,
483 actual: Option<String>,
485 context: Option<Box<ErrorContext>>,
487 recovery_suggestions: Box<Vec<String>>,
489 },
490
491 #[error("Runtime error: {message}")]
493 Runtime {
494 message: String,
496 context: Option<Box<ErrorContext>>,
498 recovery_suggestions: Box<Vec<String>>,
500 },
501
502 #[error("Memory safety error: {message}")]
504 MemorySafety {
505 message: String,
507 safety_info: Option<Box<MemorySafetyErrorInfo>>,
509 context: Option<Box<ErrorContext>>,
511 recovery_suggestions: Box<Vec<String>>,
513 },
514
515 #[error("Thread safety error: {message}")]
517 ThreadSafety {
518 message: String,
520 thread_info: Option<Box<ThreadSafetyErrorInfo>>,
522 context: Option<Box<ErrorContext>>,
524 recovery_suggestions: Box<Vec<String>>,
526 },
527
528 #[error("Resource exhaustion: {resource_type}")]
530 ResourceExhaustion {
531 resource_type: String,
533 current_usage: Option<u64>,
535 limit: Option<u64>,
537 context: Option<Box<ErrorContext>>,
539 recovery_suggestions: Box<Vec<String>>,
541 },
542
543 #[error("Operation timeout: {operation} exceeded {timeout_ms}ms")]
545 Timeout {
546 operation: String,
548 timeout_ms: u64,
550 elapsed_ms: Option<u64>,
552 context: Option<Box<ErrorContext>>,
554 recovery_suggestions: Box<Vec<String>>,
556 },
557
558 #[error("Compatibility error: {message}")]
560 Compatibility {
561 message: String,
563 required_version: Option<String>,
565 current_version: Option<String>,
567 context: Option<Box<ErrorContext>>,
569 recovery_suggestions: Box<Vec<String>>,
571 },
572
573 #[error("I/O error: {0}")]
575 Io(#[from] std::io::Error),
576
577 #[error("Serialization error: {0}")]
579 Serialization(#[from] serde_json::Error),
580
581 #[error("ML framework error: {0}")]
583 Candle(#[from] candle_core::Error),
584}
585
586#[derive(Debug, Clone)]
588pub struct ErrorContext {
589 pub operation: String,
591 pub location: String,
593 pub thread_id: Option<String>,
595 pub timestamp: std::time::SystemTime,
597 pub additional_info: std::collections::HashMap<String, String>,
599}
600
601#[derive(Debug, Clone)]
603pub struct AudioErrorInfo {
604 pub sample_rate: Option<u32>,
606 pub channels: Option<u32>,
608 pub format: Option<String>,
610 pub duration_seconds: Option<f32>,
612 pub buffer_size: Option<usize>,
614}
615
616#[derive(Debug, Clone)]
618pub struct PerformanceErrorInfo {
619 pub current_latency_ms: Option<f32>,
621 pub target_latency_ms: Option<f32>,
623 pub cpu_usage_percent: Option<f32>,
625 pub memory_usage_bytes: Option<u64>,
627 pub queue_size: Option<usize>,
629}
630
631#[derive(Debug, Clone)]
633pub struct StreamErrorInfo {
634 pub stream_id: Option<String>,
636 pub stream_state: Option<String>,
638 pub buffer_level: Option<usize>,
640 pub dropped_samples: Option<u64>,
642 pub stream_position: Option<u64>,
644}
645
646#[derive(Debug, Clone)]
648pub struct BufferErrorInfo {
649 pub buffer_id: Option<String>,
651 pub buffer_size: Option<usize>,
653 pub available_space: Option<usize>,
655 pub buffer_type: Option<String>,
657 pub access_pattern: Option<String>,
659}
660
661#[derive(Debug, Clone)]
663pub struct MemorySafetyErrorInfo {
664 pub violation_type: String,
666 pub memory_address: Option<String>,
668 pub allocation_size: Option<u64>,
670 pub allocation_age_ms: Option<u64>,
672 pub allocating_thread: Option<String>,
674}
675
676#[derive(Debug, Clone)]
678pub struct ThreadSafetyErrorInfo {
679 pub violation_type: String,
681 pub thread_ids: Vec<String>,
683 pub resource_name: Option<String>,
685 pub lock_state: Option<String>,
687 pub deadlock_info: Option<String>,
689}
690
691impl Clone for Error {
693 fn clone(&self) -> Self {
694 match self {
695 Error::Config {
696 message,
697 context,
698 recovery_suggestions,
699 } => Error::Config {
700 message: message.clone(),
701 context: context.as_ref().map(|c| Box::new((**c).clone())),
702 recovery_suggestions: recovery_suggestions.clone(),
703 },
704 Error::Processing {
705 operation,
706 message,
707 context,
708 recovery_suggestions,
709 } => Error::Processing {
710 operation: operation.clone(),
711 message: message.clone(),
712 context: context.as_ref().map(|c| Box::new((**c).clone())),
713 recovery_suggestions: recovery_suggestions.clone(),
714 },
715 Error::Model {
716 model_type,
717 message,
718 context,
719 recovery_suggestions,
720 } => Error::Model {
721 model_type: model_type.clone(),
722 message: message.clone(),
723 context: context.as_ref().map(|c| Box::new((**c).clone())),
724 recovery_suggestions: recovery_suggestions.clone(),
725 },
726 Error::Audio {
727 message,
728 audio_info,
729 context,
730 recovery_suggestions,
731 } => Error::Audio {
732 message: message.clone(),
733 audio_info: audio_info.as_ref().map(|a| Box::new((**a).clone())),
734 context: context.as_ref().map(|c| Box::new((**c).clone())),
735 recovery_suggestions: recovery_suggestions.clone(),
736 },
737 Error::Realtime {
738 message,
739 performance_context,
740 context,
741 recovery_suggestions,
742 } => Error::Realtime {
743 message: message.clone(),
744 performance_context: performance_context
745 .as_ref()
746 .map(|p| Box::new((**p).clone())),
747 context: context.as_ref().map(|c| Box::new((**c).clone())),
748 recovery_suggestions: recovery_suggestions.clone(),
749 },
750 Error::Streaming {
751 message,
752 stream_info,
753 context,
754 recovery_suggestions,
755 } => Error::Streaming {
756 message: message.clone(),
757 stream_info: stream_info.as_ref().map(|s| Box::new((**s).clone())),
758 context: context.as_ref().map(|c| Box::new((**c).clone())),
759 recovery_suggestions: recovery_suggestions.clone(),
760 },
761 Error::Buffer {
762 message,
763 buffer_info,
764 context,
765 recovery_suggestions,
766 } => Error::Buffer {
767 message: message.clone(),
768 buffer_info: buffer_info.as_ref().map(|b| Box::new((**b).clone())),
769 context: context.as_ref().map(|c| Box::new((**c).clone())),
770 recovery_suggestions: recovery_suggestions.clone(),
771 },
772 Error::Transform {
773 transform_type,
774 message,
775 context,
776 recovery_suggestions,
777 } => Error::Transform {
778 transform_type: transform_type.clone(),
779 message: message.clone(),
780 context: context.as_ref().map(|c| Box::new((**c).clone())),
781 recovery_suggestions: recovery_suggestions.clone(),
782 },
783 Error::Validation {
784 message,
785 field,
786 expected,
787 actual,
788 context,
789 recovery_suggestions,
790 } => Error::Validation {
791 message: message.clone(),
792 field: field.clone(),
793 expected: expected.clone(),
794 actual: actual.clone(),
795 context: context.as_ref().map(|c| Box::new((**c).clone())),
796 recovery_suggestions: recovery_suggestions.clone(),
797 },
798 Error::Runtime {
799 message,
800 context,
801 recovery_suggestions,
802 } => Error::Runtime {
803 message: message.clone(),
804 context: context.as_ref().map(|c| Box::new((**c).clone())),
805 recovery_suggestions: recovery_suggestions.clone(),
806 },
807 Error::MemorySafety {
808 message,
809 safety_info,
810 context,
811 recovery_suggestions,
812 } => Error::MemorySafety {
813 message: message.clone(),
814 safety_info: safety_info.as_ref().map(|s| Box::new((**s).clone())),
815 context: context.as_ref().map(|c| Box::new((**c).clone())),
816 recovery_suggestions: recovery_suggestions.clone(),
817 },
818 Error::ThreadSafety {
819 message,
820 thread_info,
821 context,
822 recovery_suggestions,
823 } => Error::ThreadSafety {
824 message: message.clone(),
825 thread_info: thread_info.as_ref().map(|t| Box::new((**t).clone())),
826 context: context.as_ref().map(|c| Box::new((**c).clone())),
827 recovery_suggestions: recovery_suggestions.clone(),
828 },
829 Error::ResourceExhaustion {
830 resource_type,
831 current_usage,
832 limit,
833 context,
834 recovery_suggestions,
835 } => Error::ResourceExhaustion {
836 resource_type: resource_type.clone(),
837 current_usage: *current_usage,
838 limit: *limit,
839 context: context.as_ref().map(|c| Box::new((**c).clone())),
840 recovery_suggestions: recovery_suggestions.clone(),
841 },
842 Error::Timeout {
843 operation,
844 timeout_ms,
845 elapsed_ms,
846 context,
847 recovery_suggestions,
848 } => Error::Timeout {
849 operation: operation.clone(),
850 timeout_ms: *timeout_ms,
851 elapsed_ms: *elapsed_ms,
852 context: context.as_ref().map(|c| Box::new((**c).clone())),
853 recovery_suggestions: recovery_suggestions.clone(),
854 },
855 Error::Compatibility {
856 message,
857 required_version,
858 current_version,
859 context,
860 recovery_suggestions,
861 } => Error::Compatibility {
862 message: message.clone(),
863 required_version: required_version.clone(),
864 current_version: current_version.clone(),
865 context: context.as_ref().map(|c| Box::new((**c).clone())),
866 recovery_suggestions: recovery_suggestions.clone(),
867 },
868 Error::Io(e) => Error::Config {
869 message: format!("I/O error: {e}"),
870 context: None,
871 recovery_suggestions: Box::new(vec![
872 "Check file permissions".to_string(),
873 "Verify file path exists".to_string(),
874 ]),
875 },
876 Error::Serialization(e) => Error::Config {
877 message: format!("Serialization error: {e}"),
878 context: None,
879 recovery_suggestions: Box::new(vec![
880 "Check data format".to_string(),
881 "Verify JSON structure".to_string(),
882 ]),
883 },
884 Error::Candle(e) => Error::Model {
885 model_type: "ML Framework".to_string(),
886 message: format!("ML framework error: {e}"),
887 context: None,
888 recovery_suggestions: Box::new(vec![
889 "Check model file".to_string(),
890 "Verify input dimensions".to_string(),
891 ]),
892 },
893 }
894 }
895}
896
897impl Error {
898 pub fn config(message: String) -> Self {
900 Self::Config {
901 message,
902 context: None,
903 recovery_suggestions: Box::new(vec!["Check configuration parameters".to_string()]),
904 }
905 }
906
907 pub fn config_with_context(message: String, context: ErrorContext) -> Self {
909 Self::Config {
910 message,
911 context: Some(Box::new(context)),
912 recovery_suggestions: Box::new(vec![]),
913 }
914 }
915
916 pub fn processing(message: String) -> Self {
918 Self::Processing {
919 operation: "processing".to_string(),
920 message,
921 context: None,
922 recovery_suggestions: Box::new(vec!["Review processing parameters".to_string()]),
923 }
924 }
925
926 pub fn model(message: String) -> Self {
928 Self::Model {
929 model_type: "unknown".to_string(),
930 message,
931 context: None,
932 recovery_suggestions: Box::new(vec!["Check model configuration".to_string()]),
933 }
934 }
935
936 pub fn audio(message: String) -> Self {
938 Self::Audio {
939 message,
940 audio_info: None,
941 context: None,
942 recovery_suggestions: Box::new(vec!["Check audio format".to_string()]),
943 }
944 }
945
946 pub fn realtime(message: String) -> Self {
948 Self::Realtime {
949 message,
950 performance_context: None,
951 context: None,
952 recovery_suggestions: Box::new(vec!["Reduce processing load".to_string()]),
953 }
954 }
955
956 pub fn streaming(message: String) -> Self {
958 Self::Streaming {
959 message,
960 stream_info: None,
961 context: None,
962 recovery_suggestions: Box::new(vec!["Check stream configuration".to_string()]),
963 }
964 }
965
966 pub fn buffer(message: String) -> Self {
968 Self::Buffer {
969 message,
970 buffer_info: None,
971 context: None,
972 recovery_suggestions: Box::new(vec!["Check buffer size and usage".to_string()]),
973 }
974 }
975
976 pub fn transform(message: String) -> Self {
978 Self::Transform {
979 transform_type: "unknown".to_string(),
980 message,
981 context: None,
982 recovery_suggestions: Box::new(vec!["Check transform parameters".to_string()]),
983 }
984 }
985
986 pub fn validation(message: String) -> Self {
988 Self::Validation {
989 message,
990 field: None,
991 expected: None,
992 actual: None,
993 context: None,
994 recovery_suggestions: Box::new(vec!["Check input parameters".to_string()]),
995 }
996 }
997
998 pub fn runtime(message: String) -> Self {
1000 Self::Runtime {
1001 message,
1002 context: None,
1003 recovery_suggestions: Box::new(vec!["Check system resources".to_string()]),
1004 }
1005 }
1006
1007 pub fn processing_with_context(
1009 operation: String,
1010 message: String,
1011 context: ErrorContext,
1012 ) -> Self {
1013 Self::Processing {
1014 operation,
1015 message,
1016 context: Some(Box::new(context)),
1017 recovery_suggestions: Box::new(vec![]),
1018 }
1019 }
1020
1021 pub fn validation_detailed(
1023 message: String,
1024 field: Option<String>,
1025 expected: Option<String>,
1026 actual: Option<String>,
1027 ) -> Self {
1028 let mut recovery_suggestions = vec!["Check input parameters".to_string()];
1029
1030 if let (Some(exp), Some(act)) = (&expected, &actual) {
1031 recovery_suggestions.push(format!("Expected {exp}, got {act}"));
1032 }
1033
1034 if let Some(field_name) = &field {
1035 recovery_suggestions.push(format!("Verify {field_name} field is correctly set"));
1036 }
1037
1038 Self::Validation {
1039 message,
1040 field,
1041 expected,
1042 actual,
1043 context: None,
1044 recovery_suggestions: Box::new(recovery_suggestions),
1045 }
1046 }
1047
1048 pub fn audio_with_info(message: String, audio_info: AudioErrorInfo) -> Self {
1050 let mut recovery_suggestions = vec!["Check audio format compatibility".to_string()];
1051
1052 if let Some(sr) = audio_info.sample_rate {
1053 if sr < 16000 || sr > 48000 {
1054 recovery_suggestions.push("Use supported sample rate (16kHz-48kHz)".to_string());
1055 }
1056 }
1057
1058 if let Some(channels) = audio_info.channels {
1059 if channels > 2 {
1060 recovery_suggestions.push("Convert to mono or stereo audio".to_string());
1061 }
1062 }
1063
1064 Self::Audio {
1065 message,
1066 audio_info: Some(Box::new(audio_info)),
1067 context: None,
1068 recovery_suggestions: Box::new(recovery_suggestions),
1069 }
1070 }
1071
1072 pub fn realtime_with_performance(
1074 message: String,
1075 performance_context: PerformanceErrorInfo,
1076 ) -> Self {
1077 let mut recovery_suggestions = vec!["Reduce processing load".to_string()];
1078
1079 if let Some(latency) = performance_context.current_latency_ms {
1080 if latency > 100.0 {
1081 recovery_suggestions
1082 .push("Optimize processing pipeline for lower latency".to_string());
1083 }
1084 }
1085
1086 if let Some(cpu) = performance_context.cpu_usage_percent {
1087 if cpu > 80.0 {
1088 recovery_suggestions.push("Reduce CPU-intensive operations".to_string());
1089 }
1090 }
1091
1092 Self::Realtime {
1093 message,
1094 performance_context: Some(Box::new(performance_context)),
1095 context: None,
1096 recovery_suggestions: Box::new(recovery_suggestions),
1097 }
1098 }
1099
1100 pub fn memory_safety_with_info(message: String, safety_info: MemorySafetyErrorInfo) -> Self {
1102 let mut recovery_suggestions = vec!["Run memory audit".to_string()];
1103
1104 match safety_info.violation_type.as_str() {
1105 "memory_leak" => recovery_suggestions.push("Check for unclosed resources".to_string()),
1106 "buffer_overflow" => recovery_suggestions.push("Validate buffer bounds".to_string()),
1107 "use_after_free" => recovery_suggestions.push("Check object lifecycle".to_string()),
1108 _ => recovery_suggestions.push("Review memory usage patterns".to_string()),
1109 }
1110
1111 Self::MemorySafety {
1112 message,
1113 safety_info: Some(Box::new(safety_info)),
1114 context: None,
1115 recovery_suggestions: Box::new(recovery_suggestions),
1116 }
1117 }
1118
1119 pub fn thread_safety_with_info(message: String, thread_info: ThreadSafetyErrorInfo) -> Self {
1121 let mut recovery_suggestions = vec!["Review thread synchronization".to_string()];
1122
1123 match thread_info.violation_type.as_str() {
1124 "deadlock" => recovery_suggestions.push("Check lock ordering".to_string()),
1125 "race_condition" => recovery_suggestions.push("Add proper synchronization".to_string()),
1126 "data_race" => recovery_suggestions.push("Use atomic operations or locks".to_string()),
1127 _ => recovery_suggestions.push("Review concurrent access patterns".to_string()),
1128 }
1129
1130 Self::ThreadSafety {
1131 message,
1132 thread_info: Some(Box::new(thread_info)),
1133 context: None,
1134 recovery_suggestions: Box::new(recovery_suggestions),
1135 }
1136 }
1137
1138 pub fn recovery_suggestions(&self) -> &[String] {
1140 match self {
1141 Error::Config {
1142 recovery_suggestions,
1143 ..
1144 } => recovery_suggestions,
1145 Error::Processing {
1146 recovery_suggestions,
1147 ..
1148 } => recovery_suggestions,
1149 Error::Model {
1150 recovery_suggestions,
1151 ..
1152 } => recovery_suggestions,
1153 Error::Audio {
1154 recovery_suggestions,
1155 ..
1156 } => recovery_suggestions,
1157 Error::Realtime {
1158 recovery_suggestions,
1159 ..
1160 } => recovery_suggestions,
1161 Error::Streaming {
1162 recovery_suggestions,
1163 ..
1164 } => recovery_suggestions,
1165 Error::Buffer {
1166 recovery_suggestions,
1167 ..
1168 } => recovery_suggestions,
1169 Error::Transform {
1170 recovery_suggestions,
1171 ..
1172 } => recovery_suggestions,
1173 Error::Validation {
1174 recovery_suggestions,
1175 ..
1176 } => recovery_suggestions,
1177 Error::Runtime {
1178 recovery_suggestions,
1179 ..
1180 } => recovery_suggestions,
1181 Error::MemorySafety {
1182 recovery_suggestions,
1183 ..
1184 } => recovery_suggestions,
1185 Error::ThreadSafety {
1186 recovery_suggestions,
1187 ..
1188 } => recovery_suggestions,
1189 Error::ResourceExhaustion {
1190 recovery_suggestions,
1191 ..
1192 } => recovery_suggestions,
1193 Error::Timeout {
1194 recovery_suggestions,
1195 ..
1196 } => recovery_suggestions,
1197 Error::Compatibility {
1198 recovery_suggestions,
1199 ..
1200 } => recovery_suggestions,
1201 Error::Io(_) => &[],
1202 Error::Serialization(_) => &[],
1203 Error::Candle(_) => &[],
1204 }
1205 }
1206
1207 pub fn context(&self) -> Option<&ErrorContext> {
1209 match self {
1210 Error::Config { context, .. } => context.as_ref().map(|c| &**c),
1211 Error::Processing { context, .. } => context.as_ref().map(|c| &**c),
1212 Error::Model { context, .. } => context.as_ref().map(|c| &**c),
1213 Error::Audio { context, .. } => context.as_ref().map(|c| &**c),
1214 Error::Realtime { context, .. } => context.as_ref().map(|c| &**c),
1215 Error::Streaming { context, .. } => context.as_ref().map(|c| &**c),
1216 Error::Buffer { context, .. } => context.as_ref().map(|c| &**c),
1217 Error::Transform { context, .. } => context.as_ref().map(|c| &**c),
1218 Error::Validation { context, .. } => context.as_ref().map(|c| &**c),
1219 Error::Runtime { context, .. } => context.as_ref().map(|c| &**c),
1220 Error::MemorySafety { context, .. } => context.as_ref().map(|c| &**c),
1221 Error::ThreadSafety { context, .. } => context.as_ref().map(|c| &**c),
1222 Error::ResourceExhaustion { context, .. } => context.as_ref().map(|c| &**c),
1223 Error::Timeout { context, .. } => context.as_ref().map(|c| &**c),
1224 Error::Compatibility { context, .. } => context.as_ref().map(|c| &**c),
1225 Error::Io(_) => None,
1226 Error::Serialization(_) => None,
1227 Error::Candle(_) => None,
1228 }
1229 }
1230
1231 pub fn severity(&self) -> ErrorSeverity {
1233 match self {
1234 Error::Config { .. } => ErrorSeverity::Medium,
1235 Error::Processing { .. } => ErrorSeverity::Medium,
1236 Error::Model { .. } => ErrorSeverity::High,
1237 Error::Audio { .. } => ErrorSeverity::Medium,
1238 Error::Realtime { .. } => ErrorSeverity::High,
1239 Error::Streaming { .. } => ErrorSeverity::Medium,
1240 Error::Buffer { .. } => ErrorSeverity::Medium,
1241 Error::Transform { .. } => ErrorSeverity::Medium,
1242 Error::Validation { .. } => ErrorSeverity::Low,
1243 Error::Runtime { .. } => ErrorSeverity::High,
1244 Error::MemorySafety { .. } => ErrorSeverity::Critical,
1245 Error::ThreadSafety { .. } => ErrorSeverity::Critical,
1246 Error::ResourceExhaustion { .. } => ErrorSeverity::High,
1247 Error::Timeout { .. } => ErrorSeverity::Medium,
1248 Error::Compatibility { .. } => ErrorSeverity::Low,
1249 Error::Io(_) => ErrorSeverity::Medium,
1250 Error::Serialization(_) => ErrorSeverity::Low,
1251 Error::Candle(_) => ErrorSeverity::High,
1252 }
1253 }
1254}
1255
1256#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
1258pub enum ErrorSeverity {
1259 Low,
1261 Medium,
1263 High,
1265 Critical,
1267}
1268
1269impl ErrorContext {
1270 pub fn new(operation: String, location: String) -> Self {
1272 Self {
1273 operation,
1274 location,
1275 thread_id: Some(format!("{:?}", std::thread::current().id())),
1276 timestamp: std::time::SystemTime::now(),
1277 additional_info: std::collections::HashMap::new(),
1278 }
1279 }
1280
1281 pub fn with_info(mut self, key: String, value: String) -> Self {
1283 self.additional_info.insert(key, value);
1284 self
1285 }
1286}
1287
1288pub mod prelude {
1290 pub use crate::{
1291 audio_libraries_update::{
1292 AudioLibrariesUpdater, AudioLibraryInfo, CompatibilityRisk, CompatibilityTestResult,
1293 LibraryVersionAnalysis, MigrationEffort, MigrationGuide, PerformanceImpact,
1294 SecuritySeverity, SecurityVulnerability, UpdatePriority, UpdateResult,
1295 },
1296 audio_quality_research::{
1297 AnalysisStatistics, AudioQualityResearcher, ComprehensiveQualityAnalysis,
1298 HarmonicDistortionAnalysis, MultidimensionalQuality, NeuralQualityModel,
1299 PsychoacousticAnalysis, ResearchConfig, ResearchCriticalBandAnalysis,
1300 SpectralQualityAnalysis, TemporalQualityAnalysis, TonalityAnalysis,
1301 },
1302 cache::{CacheConfig, CacheItemType, CachePolicy, CachePriority, ConversionCacheSystem},
1303 cloud_scaling::{
1304 CloudNode, CloudScalingConfig, CloudScalingController, ClusterMetrics,
1305 DistributedConversionRequest, DistributedConversionResult, LoadBalancingStrategy,
1306 NodeCapabilities, NodeResourceUsage, NodeStatus, RequestPriority, RetryConfig,
1307 ScalingAction as CloudScalingAction, ScalingDecision,
1308 },
1309 compression_research::{
1310 CompressedAudio, CompressionAlgorithm, CompressionConfig, CompressionParameters,
1311 CompressionResearcher, CompressionStats, CompressionTarget, PredictionAnalyzer,
1312 PsychoacousticAnalyzer, TonalityDetector, VectorQuantizer,
1313 },
1314 config::{ConversionConfig, ConversionConfigBuilder},
1315 core::{VoiceConverter, VoiceConverterBuilder},
1316 diagnostics::{
1317 DiagnosticAnalysis, DiagnosticSystem, HealthAssessment, IdentifiedIssue, IssueCategory,
1318 IssueSeverity, Recommendation, ReportType,
1319 },
1320 fallback::{
1321 DegradationConfig, FailureType, FallbackContext, GracefulDegradationController,
1322 QualityThresholds,
1323 },
1324 gaming::{
1325 BevyIntegration, CustomIntegration, GameAudioConfig, GameEngine, GameEngineIntegration,
1326 GamePerformanceConstraints, GamePerformanceMetrics, GamePerformanceMonitor,
1327 GameVoiceMode, GameVoiceProcessor, GameVoiceSession, GodotIntegration, ThreadPriority,
1328 UnityIntegration, UnrealIntegration,
1329 },
1330 ml_frameworks::{
1331 ActivationFunction, ConvLayerConfig, DevicePreference, InferenceMetrics, LayerSpec,
1332 MLFramework, MLFrameworkConfig, MLFrameworkManager, MLInferenceSession,
1333 MLModelMetadata, MemoryConfig, MemoryUsageStats, ModelArchitecture, ModelCapabilities,
1334 ModelOptimization, PerformanceConfig, QuantizationPrecision, RnnType, TensorDataType,
1335 TensorSpec,
1336 },
1337 mobile::{
1338 MobileConversionConfig, MobileConversionStatistics, MobileDeviceInfo, MobilePlatform,
1339 MobileVoiceConverter, NeonOptimizer, PowerMode, ThermalState,
1340 },
1341 models::{ConversionModel, ModelType},
1342 monitoring::{
1343 AlertSeverity, AlertType, MonitorConfig, QualityDashboard, QualityEvent,
1344 QualityMonitor, SessionDashboard, SystemOverview,
1345 },
1346 multi_target::{
1347 MultiTargetConversionRequest, MultiTargetConversionResult, MultiTargetConverter,
1348 MultiTargetProcessingStats, NamedTarget, ProcessingMode,
1349 },
1350 neural_vocoding::{
1351 ActivationType, AlgorithmBenchmark, AlgorithmPerformance, AttentionConfig,
1352 AudioProcessingParams, NeuralArchitectureConfig, NeuralVocoder, NeuralVocodingConfig,
1353 NeuralVocodingMetrics, VocodingAlgorithm, VocodingQuality,
1354 },
1355 optimizations::{AudioBufferPool, ConversionPerformanceMonitor, SmallAudioOptimizer},
1356 pipeline_optimization::{
1357 AlgorithmVariant, OptimizationStatistics, OptimizedConversionPlan, OptimizedPipeline,
1358 OptimizedPipelineConfig,
1359 },
1360 platform_libraries::{
1361 CpuFeatures, OptimizationLevel, PlatformConfig, PlatformOptimizer, PlatformStats,
1362 TargetPlatform,
1363 },
1364 processing::{AudioBuffer, ProcessingPipeline},
1365 profiling::{
1366 BottleneckAnalyzer, BottleneckInfo, BottleneckType, ConversionProfiler,
1367 ProfilingConfig, ProfilingReport, ProfilingSession,
1368 },
1369 quality::{
1370 AdaptiveQualityController, ArtifactDetector, CriticalBandAnalysis, DetectedArtifacts,
1371 LoudnessAnalysis, MaskingAnalysis, ObjectiveQualityMetrics,
1372 PerceptualOptimizationParams, PerceptualOptimizationResult, PerceptualOptimizer,
1373 QualityAssessment, QualityMetricsSystem,
1374 },
1375 realtime::{RealtimeConfig, RealtimeConverter},
1376 realtime_libraries::{
1377 AudioBackend, BackendCapabilities, RealtimeBuffer,
1378 RealtimeConfig as RealtimeLibraryConfig, RealtimeLibraryManager, RealtimeStats,
1379 },
1380 realtime_ml::{
1381 AdaptiveOptimizationState, BufferStrategy, CacheEvictionPolicy,
1382 CacheOptimizationConfig, ModelAdaptationConfig, OptimizationSnapshot,
1383 OptimizationStrategy, ParallelProcessingConfig, PerformanceSample, QuantizationLevel,
1384 RealtimeMLConfig, RealtimeMLOptimizer, RealtimeMetrics,
1385 ResourceUsage as RealtimeMLResourceUsage, StreamingOptimizationConfig,
1386 },
1387 recognition::{
1388 ASRConfig, ASREngine, ASRTranscription, PhonemeAlignment, RecognitionGuidedConverter,
1389 RecognitionGuidedResult, RecognitionStats, SpeechGuidedParams, WordTimestamp,
1390 },
1391 scalability::{
1392 MemoryEfficiencyMetrics, MemoryTracker, ResourceAllocationStrategy, ResourceMonitor,
1393 ResourceUsageMetrics, ScalabilityConfig, ScalabilityMetrics, ScalabilityTargets,
1394 ScalableConverter, ScalingAction, ScalingActionType, ScalingController,
1395 ScalingThresholds, ThroughputMetrics, ThroughputSample,
1396 },
1397 streaming::{StreamProcessor, StreamingConverter},
1398 streaming_platforms::{
1399 AdaptationDirection, AdaptationEvent, BandwidthAdaptationState, DiscordIntegration,
1400 FacebookIntegration, OBSIntegration, PlatformIntegration, RTMPIntegration,
1401 StreamConfig, StreamPerformanceMetrics, StreamPerformanceMonitor,
1402 StreamProcessor as StreamPlatformProcessor, StreamQuality, StreamSession,
1403 StreamVoiceMode, StreamingConstraints, StreamingPlatform, StreamlabsIntegration,
1404 TikTokIntegration, TwitchIntegration, XSplitIntegration, YouTubeIntegration,
1405 },
1406 style_consistency::{
1407 ConsistencyThresholds, PreservationMode, StyleAdaptationSettings,
1408 StyleConsistencyConfig, StyleConsistencyEngine, StyleElement,
1409 },
1410 style_transfer::{
1411 StyleCharacteristics, StyleTransferConfig, StyleTransferMethod, StyleTransferSystem,
1412 },
1413 thread_safety::{
1414 ConcurrentConversionManager, ConcurrentConversionMetrics, ThreadSafeModelManager,
1415 },
1416 transforms::{
1417 AgeTransform, GenderTransform, PitchTransform, SpeedTransform, Transform, VoiceMorpher,
1418 },
1419 types::{
1420 ConversionRequest, ConversionResult, ConversionTarget, ConversionType,
1421 VoiceCharacteristics,
1422 },
1423 webrtc_integration::{
1424 ConversionMode, NetworkConditions, QualityMode, VoiceConversionConfig,
1425 WebRTCAudioConfig, WebRTCProcessingStatistics, WebRTCVoiceProcessor,
1426 },
1427 zero_shot::{
1428 ReferenceVoiceDatabase, SpeakerEmbedding, UniversalVoiceModel, ZeroShotConfig,
1429 ZeroShotConverter,
1430 },
1431 Error, Result,
1432 };
1433
1434 #[cfg(feature = "acoustic-integration")]
1435 pub use crate::acoustic::{
1436 AcousticConversionAdapter, AcousticConversionContext, AcousticConversionResult,
1437 AcousticFeatureConfig, AcousticFeatures, AcousticState, FormantFrequencies,
1438 HarmonicFeatures, TemporalFeatures, WindowType,
1439 };
1440
1441 #[cfg(feature = "cloning-integration")]
1442 pub use crate::cloning::{
1443 CloningConversionAdapter, CloningConversionResult, CloningIntegration,
1444 CloningIntegrationConfig, TargetSpeakerInfo,
1445 };
1446
1447 #[cfg(feature = "emotion-integration")]
1448 pub use crate::emotion::{EmotionConversionAdapter, EmotionParameters};
1449
1450 #[cfg(feature = "spatial-integration")]
1451 pub use crate::spatial::{
1452 AmbisonicsOutput, BinauralAudioOutput, HrtfMetadata, SpatialAudioOutput,
1453 SpatialConversionAdapter, SpatialDirection, SpatialPosition, SpatialVoiceSource,
1454 };
1455
1456 #[cfg(feature = "iot")]
1457 pub use crate::iot::{
1458 IoTConversionConfig, IoTConversionStatistics, IoTDeviceStatus, IoTPlatform, IoTPowerMode,
1459 IoTProcessingMode, IoTVoiceConverter, ResourceConstraints,
1460 ResourceUsage as IoTResourceUsage,
1461 };
1462
1463 #[cfg(feature = "wasm")]
1464 pub use crate::wasm::{
1465 BrowserCapabilities, ConversionParameters, WasmConversionConfig, WasmConversionStatistics,
1466 WasmSupportLevel, WasmVoiceConverter, WebAudioNodeType,
1467 };
1468}