Skip to main content

voirs_cloning/
lib.rs

1//! # VoiRS Voice Cloning System
2//!
3//! This crate provides comprehensive voice cloning capabilities including few-shot speaker
4//! adaptation, speaker verification, voice similarity measurement, and cross-language cloning.
5//!
6//! ## Features
7//!
8//! - **Few-shot Learning**: Clone voices with as little as 30 seconds of audio
9//! - **Speaker Verification**: Verify speaker identity with high accuracy
10//! - **Cross-lingual Cloning**: Clone voices across different languages
11//! - **Real-time Adaptation**: Adapt speaker characteristics during synthesis
12//! - **Quality Assessment**: Automated quality evaluation and similarity measurement
13//! - **Ethical Safeguards**: Consent management, usage tracking, and authenticity detection
14//! - **Performance Optimization**: SIMD-accelerated operations, GPU support, and quantization
15//!
16//! ## Quick Start
17//!
18//! ### Basic Voice Cloning
19//!
20//! ```rust,no_run
21//! use voirs_cloning::{VoiceCloner, VoiceClonerBuilder, VoiceSample, CloningConfig};
22//!
23//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
24//! // Create a voice cloner with default configuration
25//! let cloner = VoiceClonerBuilder::new()
26//!     .config(CloningConfig::default())
27//!     .build()?;
28//!
29//! // Prepare voice samples (at least 30 seconds recommended)
30//! let audio_data = vec![0.1, -0.1, 0.2, -0.2]; // Your audio samples
31//! let sample = VoiceSample::new("speaker1".to_string(), audio_data, 16000);
32//!
33//! // Voice cloning is performed through speaker embedding and synthesis
34//! println!("Voice cloner ready for synthesis");
35//! # Ok(())
36//! # }
37//! ```
38//!
39//! ### Speaker Verification
40//!
41//! ```rust,no_run
42//! use voirs_cloning::{SpeakerVerifier, VoiceSample};
43//!
44//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
45//! let mut verifier = SpeakerVerifier::new(Default::default())?;
46//!
47//! let reference_sample = VoiceSample::new("speaker1".to_string(), vec![0.1; 16000], 16000);
48//! let test_sample = VoiceSample::new("speaker1".to_string(), vec![0.2; 16000], 16000);
49//!
50//! // Verify speaker by comparing samples
51//! let result = verifier.verify_samples(&reference_sample, &test_sample).await?;
52//! println!("Verification passed: {}", result.verified);
53//! println!("Similarity score: {}", result.score);
54//! # Ok(())
55//! # }
56//! ```
57//!
58//! ### Few-shot Learning
59//!
60//! ```rust,no_run
61//! use voirs_cloning::{FewShotLearner, FewShotConfig, VoiceSample};
62//!
63//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
64//! // Configure few-shot learner for 3-shot learning
65//! let config = FewShotConfig {
66//!     num_shots: 3,
67//!     quality_threshold: 0.5,
68//!     ..Default::default()
69//! };
70//!
71//! let mut learner = FewShotLearner::new(config)?;
72//!
73//! // Prepare 3 samples (30 seconds total recommended)
74//! let samples = vec![
75//!     VoiceSample::new("speaker1".to_string(), vec![0.1; 48000], 16000),
76//!     VoiceSample::new("speaker1".to_string(), vec![0.2; 48000], 16000),
77//!     VoiceSample::new("speaker1".to_string(), vec![0.3; 48000], 16000),
78//! ];
79//!
80//! // Adapt speaker with few-shot learning
81//! let result = learner.adapt_speaker("speaker1", &samples).await?;
82//! println!("Adaptation confidence: {}", result.confidence);
83//! println!("Quality score: {}", result.quality_score);
84//! # Ok(())
85//! # }
86//! ```
87//!
88//! ### Quality Assessment
89//!
90//! ```rust,no_run
91//! use voirs_cloning::{CloningQualityAssessor, VoiceSample};
92//!
93//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
94//! let mut assessor = CloningQualityAssessor::new()?;
95//!
96//! let original = VoiceSample::new("original".to_string(), vec![0.1; 16000], 16000);
97//! let cloned = VoiceSample::new("cloned".to_string(), vec![0.11; 16000], 16000);
98//!
99//! // Assess cloning quality
100//! let metrics = assessor.assess_quality(&original, &cloned).await?;
101//! println!("Overall quality: {}", metrics.overall_score);
102//! println!("Speaker similarity: {}", metrics.speaker_similarity);
103//! println!("Audio quality: {}", metrics.audio_quality);
104//! println!("Naturalness: {}", metrics.naturalness);
105//! # Ok(())
106//! # }
107//! ```
108//!
109//! ## Advanced Features
110//!
111//! ### Cross-lingual Voice Cloning
112//!
113//! ```rust,no_run
114//! use voirs_cloning::{FewShotLearner, FewShotConfig, VoiceSample};
115//!
116//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
117//! let config = FewShotConfig {
118//!     enable_cross_lingual: true,
119//!     ..Default::default()
120//! };
121//!
122//! let mut learner = FewShotLearner::new(config)?;
123//!
124//! let samples_en = vec![/* English samples */];
125//! let result = learner
126//!     .adapt_speaker_cross_lingual("speaker1", &samples_en, "en", "es")
127//!     .await?;
128//! # Ok(())
129//! # }
130//! ```
131//!
132//! ### Voice Morphing
133//!
134//! ```rust,no_run
135//! use voirs_cloning::{VoiceMorpher, VoiceMorphingConfig, VoiceMorphingRequest, MorphingWeight, InterpolationMethod};
136//!
137//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
138//! let morpher = VoiceMorpher::new(VoiceMorphingConfig::default())?;
139//!
140//! // Morph between two speakers with 50/50 blend
141//! let request = VoiceMorphingRequest {
142//!     target_id: "morphed_voice".to_string(),
143//!     speaker_weights: vec![
144//!         MorphingWeight {
145//!             speaker_id: "speaker1".to_string(),
146//!             weight: 0.5,
147//!             quality_boost: 1.0,
148//!             temporal_variation: None,
149//!         },
150//!         MorphingWeight {
151//!             speaker_id: "speaker2".to_string(),
152//!             weight: 0.5,
153//!             quality_boost: 1.0,
154//!             temporal_variation: None,
155//!         },
156//!     ],
157//!     config: VoiceMorphingConfig {
158//!         interpolation_method: InterpolationMethod::Weighted,
159//!         ..Default::default()
160//!     },
161//!     target_characteristics: None,
162//!     morphing_duration: None,
163//! };
164//!
165//! let result = morpher.morph_voices(request).await?;
166//! # Ok(())
167//! # }
168//! ```
169//!
170//! ### Age/Gender Adaptation
171//!
172//! ```rust,no_run
173//! use voirs_cloning::{AgeGenderAdapter, VoiceAdaptationTarget, AgeCategory, GenderCategory, VoiceSample};
174//!
175//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
176//! let mut adapter = AgeGenderAdapter::new();
177//!
178//! let source_samples = vec![VoiceSample::new("speaker1".to_string(), vec![0.1; 16000], 16000)];
179//!
180//! // Train adaptation model to sound younger and more feminine
181//! let target = VoiceAdaptationTarget {
182//!     age: AgeCategory::YoungAdult,
183//!     gender: GenderCategory::Feminine,
184//!     age_intensity: 0.7,
185//!     gender_intensity: 0.7,
186//!     identity_preservation: 0.8,
187//! };
188//!
189//! let model = adapter.train_adaptation_model("speaker1", &source_samples, target).await?;
190//! let result = adapter.adapt_voice(&model, &source_samples).await?;
191//! # Ok(())
192//! # }
193//! ```
194//!
195//! ## Performance Considerations
196//!
197//! ### GPU Acceleration
198//!
199//! Enable GPU acceleration for faster processing:
200//!
201//! ```rust,no_run
202//! use voirs_cloning::{GpuAccelerator, GpuAccelerationConfig};
203//!
204//! # fn example() -> Result<(), Box<dyn std::error::Error>> {
205//! // Create GPU accelerator with default configuration
206//! let config = GpuAccelerationConfig::default();
207//! let accelerator = GpuAccelerator::new(config)?;
208//!
209//! // GPU will be used automatically for supported operations
210//! # Ok(())
211//! # }
212//! ```
213//!
214//! ### Model Quantization
215//!
216//! Reduce memory footprint with quantization:
217//!
218//! ```rust,no_run
219//! use voirs_cloning::{ModelQuantizer, QuantizationConfig, QuantizationPrecision};
220//!
221//! # fn example() -> Result<(), Box<dyn std::error::Error>> {
222//! // Create a CPU device for quantization
223//! let device = candle_core::Device::Cpu;
224//!
225//! let config = QuantizationConfig {
226//!     precision: QuantizationPrecision::Int8,
227//!     ..Default::default()
228//! };
229//!
230//! let quantizer = ModelQuantizer::new(config, device)?;
231//! # Ok(())
232//! # }
233//! ```
234//!
235//! ## Ethical Usage
236//!
237//! ### Consent Management
238//!
239//! ```rust,no_run
240//! use voirs_cloning::ConsentManager;
241//!
242//! # fn example() -> Result<(), Box<dyn std::error::Error>> {
243//! // Create a consent manager for managing voice cloning consent
244//! let manager = ConsentManager::new();
245//!
246//! // The consent manager tracks and verifies consent for voice usage
247//! // See the consent module documentation for complete usage examples
248//! println!("Consent manager initialized");
249//! # Ok(())
250//! # }
251//! ```
252//!
253//! ### Usage Tracking
254//!
255//! ```rust,no_run
256//! use voirs_cloning::{UsageTracker, UsageTrackingConfig, CloningOperationType};
257//!
258//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
259//! let tracker = UsageTracker::new(UsageTrackingConfig::default());
260//!
261//! // Start tracking a voice cloning operation
262//! let operation = tracker.start_operation(
263//!     "user123".to_string(),
264//!     "speaker1".to_string(),
265//!     CloningOperationType::VoiceCloning
266//! ).await?;
267//! println!("Tracking operation: {}", operation.id);
268//! # Ok(())
269//! # }
270//! ```
271//!
272//! ## Architecture
273//!
274//! The voice cloning system is organized into several specialized modules:
275//!
276//! - **core**: Core voice cloning functionality and builder pattern
277//! - **embedding**: Speaker embedding extraction and similarity computation
278//! - **few_shot**: Few-shot learning algorithms for rapid adaptation
279//! - **verification**: Speaker verification and identity validation
280//! - **quality**: Quality assessment and perceptual evaluation
281//! - **consent**: Ethical safeguards and consent management
282//! - **usage_tracking**: Usage monitoring and audit logging
283//! - **authenticity**: Deepfake detection and authenticity validation
284//!
285//! ## Performance Benchmarks
286//!
287//! Run benchmarks to measure performance:
288//!
289//! ```bash
290//! cargo bench --features "acoustic-integration"
291//! ```
292//!
293//! ## Feature Flags
294//!
295//! - `acoustic-integration`: Enable integration with voirs-acoustic
296//! - `g2p-integration`: Enable integration with voirs-g2p
297//! - `gpu`: Enable GPU acceleration support
298//! - `cuda`: Enable CUDA GPU support
299//! - `metal`: Enable Metal GPU support
300//! - `wasm`: Enable WebAssembly support
301//!
302//! ## License
303//!
304//! This crate is part of the VoiRS project. See LICENSE for details.
305
306// Allow pedantic lints that are acceptable for audio/DSP processing code
307#![allow(clippy::cast_precision_loss)] // Acceptable for audio sample conversions
308#![allow(clippy::cast_possible_truncation)] // Controlled truncation in audio processing
309#![allow(clippy::cast_sign_loss)] // Intentional in index calculations
310#![allow(clippy::missing_errors_doc)] // Many internal functions with self-documenting error types
311#![allow(clippy::missing_panics_doc)] // Panics are documented where relevant
312#![allow(clippy::unused_self)] // Some trait implementations require &self for consistency
313#![allow(clippy::must_use_candidate)] // Not all return values need must_use annotation
314#![allow(clippy::doc_markdown)] // Technical terms don't all need backticks
315#![allow(clippy::unnecessary_wraps)] // Result wrappers maintained for API consistency
316#![allow(clippy::float_cmp)] // Exact float comparisons are intentional in some contexts
317#![allow(clippy::match_same_arms)] // Pattern matching clarity sometimes requires duplication
318#![allow(clippy::module_name_repetitions)] // Type names often repeat module names
319#![allow(clippy::struct_excessive_bools)] // Config structs naturally have many boolean flags
320#![allow(clippy::too_many_lines)] // Some functions are inherently complex
321#![allow(clippy::needless_pass_by_value)] // Some functions designed for ownership transfer
322#![allow(clippy::similar_names)] // Many similar variable names in algorithms
323#![allow(clippy::unused_async)] // Public API functions may need async for consistency
324#![allow(clippy::needless_range_loop)] // Range loops sometimes clearer than iterators
325#![allow(clippy::uninlined_format_args)] // Explicit argument names can improve clarity
326#![allow(clippy::manual_clamp)] // Manual clamping sometimes clearer
327#![allow(clippy::return_self_not_must_use)] // Not all builder methods need must_use
328#![allow(clippy::cast_possible_wrap)] // Controlled wrapping in processing code
329#![allow(clippy::cast_lossless)] // Explicit casts preferred for clarity
330#![allow(clippy::wildcard_imports)] // Prelude imports are convenient and standard
331#![allow(clippy::format_push_string)] // Sometimes more readable than alternative
332#![allow(clippy::redundant_closure_for_method_calls)] // Closures sometimes needed for type inference
333#![deny(unsafe_code)]
334
335pub mod ab_testing;
336pub mod adaptation;
337pub mod adversarial_robustness;
338pub mod age_gender_adaptation;
339pub mod api_standards;
340pub mod authenticity;
341pub mod auto_scaling;
342pub mod cloning_wizard;
343pub mod cloud_scaling;
344pub mod config;
345pub mod config_management;
346pub mod consent;
347pub mod consent_crypto;
348pub mod consistency_models;
349pub mod core;
350pub mod deep_mos;
351pub mod edge;
352pub mod embedding;
353pub mod emotion_transfer;
354pub mod enterprise_sso;
355pub mod error_handling;
356pub mod few_shot;
357pub mod flow_matching;
358pub mod gaming_plugins;
359pub mod gpu_acceleration;
360pub mod kernel_fusion;
361pub mod load_balancing;
362pub mod long_term_adaptation;
363pub mod long_term_stability;
364pub mod memory_optimization;
365pub mod misuse_prevention;
366pub mod mobile;
367pub mod model_loading;
368pub mod multimodal;
369pub mod neural_codec;
370pub mod perceptual_evaluation;
371pub mod performance_monitoring;
372pub mod personality;
373pub mod plugins;
374pub mod preprocessing;
375pub mod privacy_protection;
376pub mod qat;
377pub mod quality;
378pub mod quality_visualization;
379pub mod quantization;
380pub mod realtime_streaming;
381pub mod similarity;
382pub mod ssl_verification;
383pub mod storage;
384pub mod streaming_adaptation;
385pub mod thread_safety;
386pub mod types;
387pub mod usage_tracking;
388pub mod utils;
389pub mod verification;
390pub mod visual_editor;
391pub mod vits2;
392pub mod voice_aging;
393pub mod voice_library;
394pub mod voice_morphing;
395pub mod zero_shot;
396
397#[cfg(feature = "acoustic-integration")]
398pub mod acoustic;
399
400pub mod conversion;
401pub mod vocoder;
402
403#[cfg(feature = "wasm")]
404pub mod wasm;
405
406// Re-export main types and traits
407pub use ab_testing::{
408    ABTestConfig, ABTestResults, ABTestingFramework, CriteriaWeights, EvaluationResult,
409    ObjectiveComparisonResults, ObjectiveMetrics, PracticalSignificance, TestConclusion,
410    TestCondition, TestMethodology, TestStatistics, TestStatus, TestStatusType,
411};
412pub use age_gender_adaptation::{
413    AgeCategory, AgeGenderAdaptationConfig, AgeGenderAdaptationResult, AgeGenderAdapter,
414    AgeGenderModel, F0Statistics, GenderCategory, SpectralCharacteristics, VoiceAdaptationTarget,
415    VoiceCharacteristics, VoiceQualityMetrics,
416};
417pub use authenticity::{
418    ArtifactDetection, ArtifactType, AuthenticityConfig, AuthenticityDetector,
419    AuthenticityMetadata, AuthenticityResult, DetectorResult,
420};
421pub use auto_scaling::{
422    AutoScaler, AutoScalingConfig, AutoScalingStats, AutoScalingStrategy, CostImpact,
423    ExpectedImpact, InstanceHealth, InstanceState, PerformanceTier, ScalableGpuInstance,
424    ScalingAction, ScalingDecision, ScalingTrigger, WorkloadPrediction,
425};
426pub use config::{CloningConfig, CloningConfigBuilder};
427pub use config_management::{
428    ConfigChangeEvent, ConfigChangeType, ConfigFileFormat, ConfigManagerSettings, ConfigMetadata,
429    ConfigSnapshot, ConfigSource, Environment, SystemConfiguration, UnifiedConfigManager,
430    ValidationError, ValidationResult, ValidationWarning,
431};
432pub use consent::{
433    ConsentManager, ConsentPermissions, ConsentRecord, ConsentStatistics, ConsentStatus,
434    ConsentType, ConsentUsageContext, ConsentUsageResult, ConsentVerificationMethod,
435    SubjectIdentity, UsageRestrictions,
436};
437pub use core::{
438    AdaptationConfig, RealtimeSynthesisChunk, RealtimeSynthesisConfig, RealtimeSynthesisRequest,
439    RealtimeSynthesisResponse, SpeakerAdaptationResult, StreamSynthesisChunk,
440    StreamSynthesisRequest, StreamingSynthesisConfig, SynthesisConfig, VoiceCloner,
441    VoiceClonerBuilder,
442};
443pub use embedding::{SpeakerEmbedding, SpeakerEmbeddingExtractor};
444pub use emotion_transfer::{
445    EmotionCategory, EmotionTransfer, EmotionTransferConfig, EmotionTransferRequest,
446    EmotionTransferResult, EmotionTransferStatistics, EmotionalCharacteristics, ProsodyFeatures,
447};
448pub use enterprise_sso::{
449    AuthenticationMethod, AuthenticationRequest, AuthenticationResponse, AuthorizationResult,
450    EnterpriseSSOManager, JWTConfig, OAuthProvider, PasswordPolicy, Permission, PermissionScope,
451    RBACManager, Role, SAMLProvider, SSOConfig, UserSession,
452};
453pub use error_handling::{
454    ErrorClassification, ErrorContext, ErrorRecoveryManager, ErrorReport, ErrorReportingConfig,
455    ErrorSeverity, ErrorStatistics, PerformanceImpact, RecoverableError, RecoveryConfig,
456    RecoveryOperation, RecoveryProgress, RecoveryResult, RecoveryState, RecoveryStrategy,
457    RetryConfig,
458};
459pub use few_shot::{
460    DistanceMetric, FewShotConfig, FewShotLearner, FewShotMetrics, FewShotResult,
461    MetaLearningAlgorithm, SampleQuality,
462};
463pub use gaming_plugins::{
464    AudioAttenuation, AudioRolloffType, CombatState, DynamicVoiceCharacteristics, EmotionalState,
465    EnvironmentalFilter, GameContext, GameEngineType, GamePerformanceProfile, GameSession,
466    GameVoiceProfile, GameVoiceResult, GamingPluginConfig, GamingPluginManager, ReverbSettings,
467    SpatialAudioProperties, UnityPlugin, UnrealPlugin, VoiceInstance, VoicePlaybackState,
468    WeatherEffects,
469};
470pub use gpu_acceleration::{
471    GpuAccelerationConfig, GpuAccelerator, GpuDeviceType, GpuMemoryStats, GpuOperationType,
472    GpuPerformanceMetrics, GpuUtils, TensorOperation, TensorOperationResult,
473};
474pub use load_balancing::{
475    GpuAssignment, GpuDeviceInfo, GpuLoadBalancer, LoadBalancingConfig, LoadBalancingStats,
476    LoadBalancingStrategy, PerformancePrediction,
477};
478pub use long_term_adaptation::{
479    AdaptationResult, AdaptationStatistics, AdaptationStrategy, EfficiencyMetrics,
480    FeedbackCategory, FeedbackContext, FeedbackType, LongTermAdaptationConfig,
481    LongTermAdaptationEngine, ProcessingStatistics, RequestMetadata as AdaptationRequestMetadata,
482    UserFeedback,
483};
484pub use long_term_stability::{
485    RiskLevel, StabilityAssessment, StabilityCheckResult, StabilityConclusions,
486    StabilityStatistics, StabilityTestConfig, StabilityTestResults, StabilityValidator,
487};
488pub use memory_optimization::{
489    AlertSeverity, AlertType, AllocationInfo, AllocationType, CacheLimits, CompressedEmbedding,
490    DetailedMemoryStats, GarbageCollectionResult, LeakDetectionConfig, LeakSummary,
491    MemoryAuditReport, MemoryIssue, MemoryIssueType, MemoryLeakDetector, MemoryManager,
492    MemoryOptimizationConfig, MemoryOptimizationRecommendation, MemoryPool, MemoryPoolSizes,
493    MemoryPoolStats, MemoryRecommendation, MemoryStats, OptimizationCategory, OptimizationImpact,
494    PerformanceImpactAnalysis, PooledObject, RecommendationPriority, RecommendationType,
495};
496pub use mobile::{
497    CacheStrategy, MobileCloningConfig, MobileCloningStats, MobileDeviceInfo, MobilePlatform,
498    MobileVoiceCloner, NeonCloningOptimizer, PowerMode, ThermalState,
499};
500pub use model_loading::{
501    LoadingMetrics, LoadingStrategy, MemoryPressureLevel, ModelInterface, ModelLoadingConfig,
502    ModelLoadingManager, ModelMemoryManager, ModelMetadata, ModelPreloader, PreloadPriority,
503    PreloadRequest, UsagePatternAnalyzer,
504};
505pub use multimodal::{
506    AudioVisualAligner, ExpressionAnalysis, FacialGeometry, FacialGeometryAnalyzer, HeadPose,
507    LipFeatures, LipMovementAnalyzer, MultimodalCloneRequest, MultimodalCloner, MultimodalConfig,
508    VisualDataType, VisualFeatureExtractor, VisualFeatures, VisualSample,
509};
510pub use neural_codec::{
511    CodecCompressionRequest, CodecCompressionResult, CodecDecompressionResult, CodecMetadata,
512    CodecPerformanceStats, CodecQualityMetrics, NeuralCodec, NeuralCodecConfig, NeuralCodecManager,
513};
514pub use perceptual_evaluation::{
515    AgeGroup, AudioExperience, EvaluationResponse, EvaluationResults, EvaluationSample,
516    EvaluationScores, EvaluationStudy, ExpertiseLevel, HearingStatus, PerceptualEvaluationConfig,
517    PerceptualEvaluator, StudyResults,
518};
519pub use performance_monitoring::{
520    AdaptationMonitor, PerformanceMeasurement, PerformanceMetrics, PerformanceMonitor,
521    PerformanceStatistics, PerformanceTargets, TargetResults,
522};
523pub use personality::{
524    AnalysisMetadata, ConversationalStyle, LinguisticPreferences, PersonalityComponents,
525    PersonalityProfile, PersonalityTraits, PersonalityTransferConfig, PersonalityTransferEngine,
526    SpeakingPatterns, TransferStats,
527};
528pub use plugins::{
529    CloningPlugin, ExamplePlugin, ParameterConstraints, ParameterType, ParameterValue,
530    PluginCapabilities, PluginConfig, PluginContext, PluginDependency, PluginHealth,
531    PluginHealthStatus, PluginManager, PluginManagerConfig, PluginManifest, PluginMemoryStats,
532    PluginMetrics, PluginOperationMetrics, PluginParameter, PluginPerformanceMetrics,
533    PluginRegistry, PluginValidationResult,
534};
535pub use preprocessing::{AudioPreprocessor, PreprocessingPipeline};
536pub use quality::{CloningQualityAssessor, QualityMetrics};
537pub use quantization::{
538    LayerQuantizationConfig, ModelQuantizer, QuantizationConfig, QuantizationMemoryAnalysis,
539    QuantizationMethod, QuantizationPrecision, QuantizationResult, QuantizationStatsSummary,
540    QuantizedTensor,
541};
542pub use realtime_streaming::{
543    AdaptiveQualityController, AudioChunk, AudioDeviceConfig, AudioInputStream, AudioOutputStream,
544    LatencyMode, NetworkConditions, QualityAdaptationStrategy, RealtimeStreamingEngine,
545    SessionState, StreamingConfig, StreamingMetrics, StreamingSession, StreamingSessionType,
546    VADAlgorithm, VoiceActivityDetector, VoiceProcessingPipeline,
547};
548pub use similarity::{SimilarityMeasurer, SimilarityScore};
549pub use storage::{
550    AccessStats, CompressionAlgorithm, CompressionInfo, CompressionStatistics, HealthIndicators,
551    MaintenanceReport, MaintenanceStatistics, ModelFilter, SpeakerInfo, StorageConfig, StorageInfo,
552    StorageOperation, StorageOperationResult, StorageStatistics, StorageTier, StoredModelMetadata,
553    VoiceCharacteristicsSummary, VoiceModelStorage,
554};
555pub use streaming_adaptation::{
556    AdaptationStep, StreamingAdaptationConfig, StreamingAdaptationManager,
557    StreamingAdaptationManagerStats, StreamingAdaptationResult, StreamingAdaptationSession,
558    StreamingAdaptationStats,
559};
560pub use thread_safety::{
561    CacheStats, ComponentHealthMonitor, ComponentRegistry, ComponentStatus, ModelCache,
562    OperationCoordinator, OperationGuard, OperationState, OperationStatus,
563    PerformanceMetrics as ThreadPerformanceMetrics, ResourceLimits, ResourceMonitor,
564};
565pub use types::{
566    CloningMethod, SpeakerData, SpeakerProfile, VoiceCloneRequest, VoiceCloneResult, VoiceSample,
567};
568pub use usage_tracking::{
569    CloningOperation, CloningOperationType, ComplianceStatus, OperationRecord,
570    OperationRequestMetadata as RequestMetadata, Priority, ResourceUsage, UsageOutcome,
571    UsageRecord, UsageStatistics, UsageStatus, UsageTracker, UsageTrackingConfig, UserContext,
572    UserPreferences,
573};
574pub use verification::{SpeakerVerifier, VerificationResult};
575pub use vits2::{
576    Vits2Cloner, Vits2Config, Vits2PerformanceStats, Vits2QualityMetrics, Vits2SynthesisRequest,
577    Vits2SynthesisResult,
578};
579pub use voice_aging::{
580    AgeTransition, AgingCharacteristics, AgingCurveType, AgingFactors, AgingQuality,
581    AgingStatistics, ArticulatoryAging, FormantAging, ProsodicAging, RespiratoryAging,
582    StabilityFactors, TemporalModel, TransitionType, VariationFactors, VoiceAgingConfig,
583    VoiceAgingEngine, VoiceAgingModel, VoiceAgingResult, VoiceQualityAging,
584};
585pub use voice_morphing::{
586    InterpolationMethod, MorphingWeight, RealtimeMorphingSession, VoiceMorpher,
587    VoiceMorphingConfig, VoiceMorphingRequest, VoiceMorphingResult,
588};
589pub use zero_shot::{
590    ReferenceVoice, ZeroShotCloner, ZeroShotConfig, ZeroShotMethod, ZeroShotResult,
591};
592
593#[cfg(feature = "wasm")]
594pub use wasm::{
595    WasmCloneRequest, WasmCloneResult, WasmCloningConfig, WasmConsentRecord, WasmQualityMetrics,
596    WasmSpeakerProfile, WasmVerificationResult, WasmVoiceCloner, WasmVoiceSample,
597};
598
599/// Result type for voice cloning operations
600pub type Result<T> = std::result::Result<T, Error>;
601
602/// Error types for voice cloning
603#[derive(Debug, thiserror::Error)]
604pub enum Error {
605    /// Configuration error
606    #[error("Configuration error: {0}")]
607    Config(String),
608
609    /// Processing error
610    #[error("Processing error: {0}")]
611    Processing(String),
612
613    /// Model error
614    #[error("Model error: {0}")]
615    Model(String),
616
617    /// Audio error
618    #[error("Audio error: {0}")]
619    Audio(String),
620
621    /// Embedding error
622    #[error("Embedding error: {0}")]
623    Embedding(String),
624
625    /// Verification error
626    #[error("Verification error: {0}")]
627    Verification(String),
628
629    /// Quality assessment error
630    #[error("Quality assessment error: {0}")]
631    Quality(String),
632
633    /// Insufficient data error
634    #[error("Insufficient data: {0}")]
635    InsufficientData(String),
636
637    /// Validation error
638    #[error("Validation error: {0}")]
639    Validation(String),
640
641    /// Invalid input error
642    #[error("Invalid input: {0}")]
643    InvalidInput(String),
644
645    /// Consent management error
646    #[error("Consent error: {0}")]
647    Consent(String),
648
649    /// Authentication error
650    #[error("Authentication error: {0}")]
651    Authentication(String),
652
653    /// Usage tracking error
654    #[error("Usage tracking error: {0}")]
655    UsageTracking(String),
656
657    /// Ethics and compliance error
658    #[error("Ethics violation: {0}")]
659    Ethics(String),
660
661    /// Lock operation error
662    #[error("Lock error: {0}")]
663    LockError(String),
664
665    /// I/O error
666    #[error("I/O error: {0}")]
667    Io(#[from] std::io::Error),
668
669    /// Serialization error
670    #[error("Serialization error: {0}")]
671    Serialization(#[from] serde_json::Error),
672
673    /// Candle error
674    #[error("Candle error: {0}")]
675    Candle(#[from] candle_core::Error),
676}
677
678impl From<&str> for Error {
679    fn from(s: &str) -> Self {
680        Error::Processing(s.to_string())
681    }
682}
683
684impl From<String> for Error {
685    fn from(s: String) -> Self {
686        Error::Processing(s)
687    }
688}
689
690/// Prelude module for convenient imports
691pub mod prelude {
692    pub use crate::{
693        adaptation::{AdaptationMethod, SpeakerAdapter},
694        age_gender_adaptation::{
695            AgeCategory, AgeGenderAdaptationConfig, AgeGenderAdaptationResult, AgeGenderAdapter,
696            GenderCategory, VoiceAdaptationTarget, VoiceCharacteristics,
697        },
698        auto_scaling::{
699            AutoScaler, AutoScalingConfig, AutoScalingStats, AutoScalingStrategy, CostImpact,
700            ExpectedImpact, InstanceHealth, InstanceState, PerformanceTier, ScalableGpuInstance,
701            ScalingAction, ScalingDecision, ScalingTrigger, WorkloadPrediction,
702        },
703        config::{CloningConfig, CloningConfigBuilder},
704        consent::{
705            ConsentManager, ConsentPermissions, ConsentRecord, ConsentStatistics, ConsentStatus,
706            ConsentType, ConsentUsageContext, ConsentUsageResult, ConsentVerificationMethod,
707            SubjectIdentity, UsageRestrictions,
708        },
709        core::{
710            AdaptationConfig, RealtimeSynthesisConfig, RealtimeSynthesisRequest,
711            RealtimeSynthesisResponse, SpeakerAdaptationResult, SynthesisConfig, VoiceCloner,
712            VoiceClonerBuilder,
713        },
714        embedding::{SpeakerEmbedding, SpeakerEmbeddingExtractor},
715        emotion_transfer::{
716            EmotionCategory, EmotionTransfer, EmotionTransferConfig, EmotionTransferRequest,
717            EmotionTransferResult, EmotionTransferStatistics, EmotionalCharacteristics,
718            ProsodyFeatures,
719        },
720        error_handling::{
721            ErrorClassification, ErrorContext, ErrorRecoveryManager, ErrorReport, ErrorSeverity,
722            RecoverableError, RecoveryConfig, RecoveryResult, RecoveryStrategy,
723        },
724        few_shot::{
725            DistanceMetric, FewShotConfig, FewShotLearner, FewShotMetrics, FewShotResult,
726            MetaLearningAlgorithm, SampleQuality,
727        },
728        gpu_acceleration::{
729            GpuAccelerationConfig, GpuAccelerator, GpuDeviceType, GpuMemoryStats, GpuOperationType,
730            GpuPerformanceMetrics, GpuUtils, TensorOperation, TensorOperationResult,
731        },
732        load_balancing::{
733            GpuAssignment, GpuDeviceInfo, GpuLoadBalancer, LoadBalancingConfig, LoadBalancingStats,
734            LoadBalancingStrategy, PerformancePrediction,
735        },
736        long_term_stability::{
737            RiskLevel, StabilityAssessment, StabilityCheckResult, StabilityTestConfig,
738            StabilityTestResults, StabilityValidator,
739        },
740        memory_optimization::{
741            CacheLimits, CompressedEmbedding, GarbageCollectionResult, MemoryManager,
742            MemoryOptimizationConfig, MemoryOptimizationRecommendation, MemoryPool,
743            MemoryPoolSizes, MemoryPoolStats, MemoryStats, OptimizationCategory,
744            OptimizationImpact, PooledObject,
745        },
746        mobile::{
747            CacheStrategy, MobileCloningConfig, MobileCloningStats, MobileDeviceInfo,
748            MobilePlatform, MobileVoiceCloner, NeonCloningOptimizer, PowerMode, ThermalState,
749        },
750        neural_codec::{
751            CodecCompressionRequest, CodecCompressionResult, CodecDecompressionResult,
752            CodecMetadata, CodecPerformanceStats, CodecQualityMetrics, NeuralCodec,
753            NeuralCodecConfig, NeuralCodecManager,
754        },
755        perceptual_evaluation::{
756            AgeGroup, AudioExperience, EvaluationResponse, EvaluationResults, EvaluationSample,
757            EvaluationScores, EvaluationStudy, ExpertiseLevel, HearingStatus,
758            PerceptualEvaluationConfig, PerceptualEvaluator, StudyResults,
759        },
760        performance_monitoring::{
761            AdaptationMonitor, PerformanceMeasurement, PerformanceMetrics, PerformanceMonitor,
762            PerformanceStatistics, PerformanceTargets, TargetResults,
763        },
764        personality::{
765            AnalysisMetadata, ConversationalStyle, LinguisticPreferences, PersonalityComponents,
766            PersonalityProfile, PersonalityTraits, PersonalityTransferConfig,
767            PersonalityTransferEngine, SpeakingPatterns, TransferStats,
768        },
769        plugins::{
770            CloningPlugin, ExamplePlugin, PluginCapabilities, PluginConfig, PluginContext,
771            PluginHealth, PluginHealthStatus, PluginManager, PluginManagerConfig,
772            PluginValidationResult,
773        },
774        preprocessing::{AudioPreprocessor, PreprocessingPipeline},
775        quality::{CloningQualityAssessor, QualityMetrics},
776        quantization::{
777            LayerQuantizationConfig, ModelQuantizer, QuantizationConfig,
778            QuantizationMemoryAnalysis, QuantizationMethod, QuantizationPrecision,
779            QuantizationResult, QuantizedTensor,
780        },
781        similarity::{SimilarityMeasurer, SimilarityScore},
782        storage::{
783            CompressionAlgorithm, MaintenanceReport, ModelFilter, SpeakerInfo, StorageConfig,
784            StorageInfo, StorageOperation, StorageOperationResult, StorageStatistics, StorageTier,
785            StoredModelMetadata, VoiceModelStorage,
786        },
787        streaming_adaptation::{
788            AdaptationStep, StreamingAdaptationConfig, StreamingAdaptationManager,
789            StreamingAdaptationManagerStats, StreamingAdaptationResult, StreamingAdaptationSession,
790            StreamingAdaptationStats,
791        },
792        thread_safety::{
793            CacheStats, ComponentHealthMonitor, ComponentRegistry, ComponentStatus, ModelCache,
794            OperationCoordinator, OperationState, OperationStatus,
795            PerformanceMetrics as ThreadPerformanceMetrics, ResourceLimits, ResourceMonitor,
796            UnifiedConfigManager,
797        },
798        types::{
799            CloningMethod, SpeakerData, SpeakerProfile, VoiceCloneRequest, VoiceCloneResult,
800            VoiceSample,
801        },
802        usage_tracking::{
803            CloningOperationType, OperationRecord, ResourceUsage, UsageRecord, UsageStatistics,
804            UsageStatus, UsageTracker, UsageTrackingConfig, UserContext,
805        },
806        verification::{SpeakerVerifier, VerificationResult},
807        vits2::{
808            Vits2Cloner, Vits2Config, Vits2PerformanceStats, Vits2QualityMetrics,
809            Vits2SynthesisRequest, Vits2SynthesisResult,
810        },
811        voice_aging::{
812            AgeTransition, AgingCharacteristics, AgingCurveType, AgingFactors, AgingQuality,
813            VoiceAgingConfig, VoiceAgingEngine, VoiceAgingModel, VoiceAgingResult,
814        },
815        voice_morphing::{
816            InterpolationMethod, MorphingWeight, RealtimeMorphingSession, VoiceMorpher,
817            VoiceMorphingConfig, VoiceMorphingRequest, VoiceMorphingResult,
818        },
819        zero_shot::{
820            ReferenceVoice, ZeroShotCloner, ZeroShotConfig, ZeroShotMethod, ZeroShotResult,
821        },
822        Error, Result,
823    };
824}