Skip to main content

voirs_recognizer/preprocessing/
adaptive_algorithms.rs

1//! Adaptive algorithms for dynamic audio processing parameter adjustment
2//!
3//! This module provides intelligent, adaptive processing that automatically
4//! adjusts parameters based on real-time audio characteristics including:
5//! - Adaptive noise suppression based on SNR estimation
6//! - Dynamic gain control with speech/music detection
7//! - Intelligent echo cancellation parameter adjustment
8//! - Adaptive filtering based on audio content analysis
9
10use crate::RecognitionError;
11use std::collections::VecDeque;
12use voirs_sdk::AudioBuffer;
13
14/// Audio content type detected by the adaptive system
15#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
16/// Audio Content Type
17pub enum AudioContentType {
18    /// Speech
19    Speech,
20    /// Music
21    Music,
22    /// Noise
23    Noise,
24    /// Mixed
25    Mixed,
26    /// Silence
27    Silence,
28}
29
30/// Adaptive algorithm configuration
31#[derive(Debug, Clone)]
32/// Adaptive Config
33pub struct AdaptiveConfig {
34    /// Window size for analysis (samples)
35    pub analysis_window_size: usize,
36    /// Overlap between analysis windows
37    pub window_overlap: f32,
38    /// History length for adaptive decisions
39    pub history_length: usize,
40    /// Adaptation rate (0.0 - 1.0)
41    pub adaptation_rate: f32,
42    /// SNR threshold for noise suppression adaptation
43    pub snr_threshold: f32,
44    /// Speech detection sensitivity
45    pub speech_detection_sensitivity: f32,
46    /// Music detection sensitivity
47    pub music_detection_sensitivity: f32,
48    /// Sample rate
49    pub sample_rate: u32,
50}
51
52impl Default for AdaptiveConfig {
53    fn default() -> Self {
54        Self {
55            analysis_window_size: 2048,
56            window_overlap: 0.5,
57            history_length: 10,
58            adaptation_rate: 0.1,
59            snr_threshold: 10.0,
60            speech_detection_sensitivity: 0.7,
61            music_detection_sensitivity: 0.6,
62            sample_rate: 16000,
63        }
64    }
65}
66
67/// Audio analysis features used for adaptive processing
68#[derive(Debug, Clone)]
69/// Audio Features
70pub struct AudioFeatures {
71    /// Signal-to-noise ratio (dB)
72    pub snr_db: f32,
73    /// Zero crossing rate
74    pub zero_crossing_rate: f32,
75    /// Spectral centroid (Hz)
76    pub spectral_centroid: f32,
77    /// Spectral rolloff (Hz)
78    pub spectral_rolloff: f32,
79    /// Spectral flux
80    pub spectral_flux: f32,
81    /// Harmonic-to-noise ratio
82    pub harmonic_noise_ratio: f32,
83    /// Energy (RMS)
84    pub energy: f32,
85    /// Pitch confidence
86    pub pitch_confidence: f32,
87    /// Spectral flatness
88    pub spectral_flatness: f32,
89    /// Temporal features
90    pub temporal_stability: f32,
91}
92
93/// Adaptive processing parameters determined by the algorithm
94#[derive(Debug, Clone)]
95/// Adaptive Parameters
96pub struct AdaptiveParameters {
97    /// Noise suppression strength (0.0 - 1.0)
98    pub noise_suppression_strength: f32,
99    /// AGC target level (dB)
100    pub agc_target_level: f32,
101    /// AGC attack time (seconds)
102    pub agc_attack_time: f32,
103    /// AGC release time (seconds)
104    pub agc_release_time: f32,
105    /// Echo cancellation filter length
106    pub echo_filter_length: usize,
107    /// Echo cancellation adaptation rate
108    pub echo_adaptation_rate: f32,
109    /// Bandwidth extension strength
110    pub bandwidth_extension_strength: f32,
111    /// Content type classification
112    pub content_type: AudioContentType,
113    /// Confidence in content classification
114    pub classification_confidence: f32,
115}
116
117impl Default for AdaptiveParameters {
118    fn default() -> Self {
119        Self {
120            noise_suppression_strength: 0.5,
121            agc_target_level: -20.0,
122            agc_attack_time: 0.001,
123            agc_release_time: 0.1,
124            echo_filter_length: 1024,
125            echo_adaptation_rate: 0.01,
126            bandwidth_extension_strength: 0.3,
127            content_type: AudioContentType::Mixed,
128            classification_confidence: 0.5,
129        }
130    }
131}
132
133/// Adaptive processing statistics
134#[derive(Debug, Clone)]
135/// Adaptive Stats
136pub struct AdaptiveStats {
137    /// Number of adaptations performed
138    pub adaptations_count: u32,
139    /// Average adaptation rate
140    pub avg_adaptation_rate: f32,
141    /// Content type detection accuracy
142    pub detection_accuracy: f32,
143    /// Processing overhead (ms)
144    pub processing_overhead_ms: f64,
145    /// Current parameters
146    pub current_parameters: AdaptiveParameters,
147}
148
149/// Result of adaptive processing
150#[derive(Debug, Clone)]
151/// Adaptive Result
152pub struct AdaptiveResult {
153    /// Adaptive parameters
154    pub parameters: AdaptiveParameters,
155    /// Audio features extracted
156    pub features: AudioFeatures,
157    /// Processing statistics
158    pub stats: AdaptiveStats,
159}
160
161/// Adaptive algorithm processor
162#[derive(Debug)]
163/// Adaptive Processor
164pub struct AdaptiveProcessor {
165    config: AdaptiveConfig,
166    features_history: VecDeque<AudioFeatures>,
167    parameters_history: VecDeque<AdaptiveParameters>,
168    content_type_history: VecDeque<AudioContentType>,
169    adaptation_count: u32,
170    last_parameters: AdaptiveParameters,
171    noise_estimator: NoiseEstimator,
172    pitch_tracker: PitchTracker,
173    spectral_analyzer: SpectralAnalyzer,
174}
175
176impl AdaptiveProcessor {
177    /// Create a new adaptive processor
178    pub fn new(config: AdaptiveConfig) -> Result<Self, RecognitionError> {
179        let features_history = VecDeque::with_capacity(config.history_length);
180        let parameters_history = VecDeque::with_capacity(config.history_length);
181        let content_type_history = VecDeque::with_capacity(config.history_length);
182
183        let noise_estimator = NoiseEstimator::new(config.sample_rate)?;
184        let pitch_tracker = PitchTracker::new(config.sample_rate)?;
185        let spectral_analyzer =
186            SpectralAnalyzer::new(config.analysis_window_size, config.sample_rate)?;
187
188        Ok(Self {
189            config,
190            features_history,
191            parameters_history,
192            content_type_history,
193            adaptation_count: 0,
194            last_parameters: AdaptiveParameters::default(),
195            noise_estimator,
196            pitch_tracker,
197            spectral_analyzer,
198        })
199    }
200
201    /// Analyze audio and adapt processing parameters
202    pub fn analyze_and_adapt(
203        &mut self,
204        audio: &AudioBuffer,
205    ) -> Result<AdaptiveResult, RecognitionError> {
206        let start_time = std::time::Instant::now();
207
208        // Extract audio features
209        let features = self.extract_features(audio)?;
210
211        // Classify content type
212        let content_type = self.classify_content(&features)?;
213
214        // Determine adaptive parameters
215        let mut parameters = self.determine_parameters(&features, content_type)?;
216
217        // Apply temporal smoothing
218        parameters = self.apply_temporal_smoothing(parameters)?;
219
220        // Update history
221        self.update_history(features.clone(), parameters.clone(), content_type);
222
223        // Calculate statistics
224        let stats = self.calculate_stats(&parameters);
225
226        let processing_time = start_time.elapsed().as_secs_f64() * 1000.0;
227
228        Ok(AdaptiveResult {
229            parameters,
230            features,
231            stats: AdaptiveStats {
232                adaptations_count: self.adaptation_count,
233                avg_adaptation_rate: self.calculate_avg_adaptation_rate(),
234                detection_accuracy: self.calculate_detection_accuracy(),
235                processing_overhead_ms: processing_time,
236                current_parameters: self.last_parameters.clone(),
237            },
238        })
239    }
240
241    /// Extract comprehensive audio features
242    fn extract_features(&mut self, audio: &AudioBuffer) -> Result<AudioFeatures, RecognitionError> {
243        let samples = audio.samples();
244
245        // Basic energy and RMS
246        let energy = self.calculate_energy(samples);
247
248        // Zero crossing rate
249        let zcr = self.calculate_zero_crossing_rate(samples);
250
251        // Noise estimation and SNR
252        let noise_level = self.noise_estimator.estimate(samples)?;
253        let signal_level = energy;
254        let snr_db = if noise_level > 0.0 {
255            20.0 * (signal_level / noise_level).log10()
256        } else {
257            60.0 // Very high SNR if no noise detected
258        };
259
260        // Spectral features
261        let spectral_features = self.spectral_analyzer.analyze(samples)?;
262
263        // Pitch tracking
264        let pitch_features = self.pitch_tracker.track(samples)?;
265
266        // Temporal stability
267        let temporal_stability = self.calculate_temporal_stability(samples);
268
269        Ok(AudioFeatures {
270            snr_db,
271            zero_crossing_rate: zcr,
272            spectral_centroid: spectral_features.centroid,
273            spectral_rolloff: spectral_features.rolloff,
274            spectral_flux: spectral_features.flux,
275            harmonic_noise_ratio: pitch_features.harmonic_noise_ratio,
276            energy,
277            pitch_confidence: pitch_features.confidence,
278            spectral_flatness: spectral_features.flatness,
279            temporal_stability,
280        })
281    }
282
283    /// Classify audio content type
284    fn classify_content(
285        &self,
286        features: &AudioFeatures,
287    ) -> Result<AudioContentType, RecognitionError> {
288        // Multi-feature classifier for content type detection
289
290        // Silence detection
291        if features.energy < 0.001 {
292            return Ok(AudioContentType::Silence);
293        }
294
295        // Speech indicators
296        let speech_score = (features.pitch_confidence * 0.3)
297            + ((features.harmonic_noise_ratio.min(10.0) / 10.0) * 0.2)
298            + ((features.zero_crossing_rate.min(0.3) / 0.3) * 0.2)
299            + ((1.0 - features.spectral_flatness) * 0.3);
300
301        // Music indicators
302        let music_score = (features.harmonic_noise_ratio.min(20.0) / 20.0 * 0.4)
303            + ((1.0 - features.spectral_flatness) * 0.3)
304            + (features.temporal_stability * 0.3);
305
306        // Noise indicators
307        let noise_score = (features.spectral_flatness * 0.5)
308            + ((1.0 - features.pitch_confidence) * 0.3)
309            + ((1.0 - features.temporal_stability) * 0.2);
310
311        // Classification based on scores
312        let max_score = speech_score.max(music_score).max(noise_score);
313
314        if max_score < 0.4 {
315            Ok(AudioContentType::Mixed)
316        } else if speech_score == max_score
317            && speech_score > self.config.speech_detection_sensitivity
318        {
319            Ok(AudioContentType::Speech)
320        } else if music_score == max_score && music_score > self.config.music_detection_sensitivity
321        {
322            Ok(AudioContentType::Music)
323        } else if noise_score == max_score {
324            Ok(AudioContentType::Noise)
325        } else {
326            Ok(AudioContentType::Mixed)
327        }
328    }
329
330    /// Determine optimal parameters based on features and content type
331    fn determine_parameters(
332        &self,
333        features: &AudioFeatures,
334        content_type: AudioContentType,
335    ) -> Result<AdaptiveParameters, RecognitionError> {
336        let mut params = AdaptiveParameters::default();
337        params.content_type = content_type;
338
339        match content_type {
340            AudioContentType::Speech => {
341                // Optimize for speech clarity
342                params.noise_suppression_strength = if features.snr_db < self.config.snr_threshold {
343                    0.8 // Aggressive noise suppression for noisy speech
344                } else {
345                    0.3 // Light noise suppression for clean speech
346                };
347
348                params.agc_target_level = -18.0; // Optimize for speech intelligibility
349                params.agc_attack_time = 0.002; // Fast attack for speech transients
350                params.agc_release_time = 0.05; // Medium release
351                params.echo_filter_length = 2048; // Longer filter for speech
352                params.echo_adaptation_rate = 0.02; // Moderate adaptation
353                params.bandwidth_extension_strength = 0.5; // Enhance speech bandwidth
354                params.classification_confidence = 0.9;
355            }
356
357            AudioContentType::Music => {
358                // Optimize for musical quality
359                params.noise_suppression_strength = 0.2; // Gentle noise suppression
360                params.agc_target_level = -23.0; // Preserve dynamics
361                params.agc_attack_time = 0.005; // Slower attack to preserve transients
362                params.agc_release_time = 0.2; // Longer release for music
363                params.echo_filter_length = 1024; // Shorter filter for music
364                params.echo_adaptation_rate = 0.005; // Slow adaptation
365                params.bandwidth_extension_strength = 0.7; // Enhance full bandwidth
366                params.classification_confidence = 0.8;
367            }
368
369            AudioContentType::Noise => {
370                // Aggressive noise reduction
371                params.noise_suppression_strength = 0.9;
372                params.agc_target_level = -25.0; // Lower target level
373                params.agc_attack_time = 0.001; // Fast attack
374                params.agc_release_time = 0.3; // Slow release
375                params.echo_filter_length = 512; // Short filter
376                params.echo_adaptation_rate = 0.001; // Very slow adaptation
377                params.bandwidth_extension_strength = 0.1; // Minimal enhancement
378                params.classification_confidence = 0.7;
379            }
380
381            AudioContentType::Silence => {
382                // Minimal processing for silence
383                params.noise_suppression_strength = 0.1;
384                params.agc_target_level = -30.0;
385                params.agc_attack_time = 0.01;
386                params.agc_release_time = 0.5;
387                params.echo_filter_length = 256;
388                params.echo_adaptation_rate = 0.0;
389                params.bandwidth_extension_strength = 0.0;
390                params.classification_confidence = 0.95;
391            }
392
393            AudioContentType::Mixed => {
394                // Balanced parameters
395                params.noise_suppression_strength = 0.5;
396                params.agc_target_level = -20.0;
397                params.agc_attack_time = 0.003;
398                params.agc_release_time = 0.1;
399                params.echo_filter_length = 1024;
400                params.echo_adaptation_rate = 0.01;
401                params.bandwidth_extension_strength = 0.4;
402                params.classification_confidence = 0.5;
403            }
404        }
405
406        // Fine-tune based on SNR
407        if features.snr_db < 5.0 {
408            params.noise_suppression_strength = (params.noise_suppression_strength + 0.3).min(1.0);
409        } else if features.snr_db > 20.0 {
410            params.noise_suppression_strength = (params.noise_suppression_strength - 0.2).max(0.0);
411        }
412
413        Ok(params)
414    }
415
416    /// Apply temporal smoothing to prevent parameter oscillation
417    fn apply_temporal_smoothing(
418        &mut self,
419        mut params: AdaptiveParameters,
420    ) -> Result<AdaptiveParameters, RecognitionError> {
421        if self.parameters_history.is_empty() {
422            self.last_parameters = params.clone();
423            return Ok(params);
424        }
425
426        let alpha = self.config.adaptation_rate;
427
428        // Smooth critical parameters
429        params.noise_suppression_strength = alpha * params.noise_suppression_strength
430            + (1.0 - alpha) * self.last_parameters.noise_suppression_strength;
431
432        params.agc_target_level =
433            alpha * params.agc_target_level + (1.0 - alpha) * self.last_parameters.agc_target_level;
434
435        params.bandwidth_extension_strength = alpha * params.bandwidth_extension_strength
436            + (1.0 - alpha) * self.last_parameters.bandwidth_extension_strength;
437
438        self.last_parameters = params.clone();
439        self.adaptation_count += 1;
440
441        Ok(params)
442    }
443
444    /// Update processing history
445    fn update_history(
446        &mut self,
447        features: AudioFeatures,
448        parameters: AdaptiveParameters,
449        content_type: AudioContentType,
450    ) {
451        if self.features_history.len() >= self.config.history_length {
452            self.features_history.pop_front();
453        }
454        if self.parameters_history.len() >= self.config.history_length {
455            self.parameters_history.pop_front();
456        }
457        if self.content_type_history.len() >= self.config.history_length {
458            self.content_type_history.pop_front();
459        }
460
461        self.features_history.push_back(features);
462        self.parameters_history.push_back(parameters);
463        self.content_type_history.push_back(content_type);
464    }
465
466    /// Calculate processing statistics
467    fn calculate_stats(&self, parameters: &AdaptiveParameters) -> AdaptiveStats {
468        AdaptiveStats {
469            adaptations_count: self.adaptation_count,
470            avg_adaptation_rate: self.calculate_avg_adaptation_rate(),
471            detection_accuracy: self.calculate_detection_accuracy(),
472            processing_overhead_ms: 0.0, // Will be filled by caller
473            current_parameters: parameters.clone(),
474        }
475    }
476
477    /// Calculate average adaptation rate
478    fn calculate_avg_adaptation_rate(&self) -> f32 {
479        if self.parameters_history.len() < 2 {
480            return 0.0;
481        }
482
483        let mut total_change = 0.0;
484        for i in 1..self.parameters_history.len() {
485            let prev = &self.parameters_history[i - 1];
486            let curr = &self.parameters_history[i];
487
488            let change = (curr.noise_suppression_strength - prev.noise_suppression_strength).abs() +
489                        (curr.agc_target_level - prev.agc_target_level).abs() / 10.0 + // Normalize dB scale
490                        (curr.bandwidth_extension_strength - prev.bandwidth_extension_strength).abs();
491
492            total_change += change;
493        }
494
495        total_change / (self.parameters_history.len() - 1) as f32
496    }
497
498    /// Calculate content detection accuracy estimate
499    fn calculate_detection_accuracy(&self) -> f32 {
500        if self.content_type_history.len() < 3 {
501            return 0.5;
502        }
503
504        // Estimate accuracy based on consistency of classification
505        let mut consistent_count = 0;
506        let window_size = 3;
507
508        for i in window_size..self.content_type_history.len() {
509            let recent: Vec<AudioContentType> = self
510                .content_type_history
511                .range((i - window_size)..i)
512                .copied()
513                .collect();
514            let most_common = Self::most_common_content_type(&recent);
515
516            if recent.iter().all(|&ct| ct == most_common) {
517                consistent_count += 1;
518            }
519        }
520
521        consistent_count as f32 / (self.content_type_history.len() - window_size) as f32
522    }
523
524    /// Find most common content type in slice
525    fn most_common_content_type(types: &[AudioContentType]) -> AudioContentType {
526        let mut counts = [0; 5]; // For 5 content types
527
528        for &content_type in types {
529            let index = match content_type {
530                AudioContentType::Speech => 0,
531                AudioContentType::Music => 1,
532                AudioContentType::Noise => 2,
533                AudioContentType::Mixed => 3,
534                AudioContentType::Silence => 4,
535            };
536            counts[index] += 1;
537        }
538
539        let max_index = counts
540            .iter()
541            .position(|&x| x == *counts.iter().max().unwrap())
542            .unwrap();
543        match max_index {
544            0 => AudioContentType::Speech,
545            1 => AudioContentType::Music,
546            2 => AudioContentType::Noise,
547            3 => AudioContentType::Mixed,
548            4 => AudioContentType::Silence,
549            _ => AudioContentType::Mixed,
550        }
551    }
552
553    /// Calculate basic audio energy
554    fn calculate_energy(&self, samples: &[f32]) -> f32 {
555        let sum_squares: f32 = samples.iter().map(|&x| x * x).sum();
556        (sum_squares / samples.len() as f32).sqrt()
557    }
558
559    /// Calculate zero crossing rate
560    fn calculate_zero_crossing_rate(&self, samples: &[f32]) -> f32 {
561        let mut crossings = 0;
562
563        for i in 1..samples.len() {
564            if (samples[i] >= 0.0) != (samples[i - 1] >= 0.0) {
565                crossings += 1;
566            }
567        }
568
569        crossings as f32 / (samples.len() - 1) as f32
570    }
571
572    /// Calculate temporal stability
573    fn calculate_temporal_stability(&self, samples: &[f32]) -> f32 {
574        if samples.len() < 100 {
575            return 0.5;
576        }
577
578        let chunk_size = samples.len() / 10;
579        let mut chunk_energies = Vec::new();
580
581        for i in 0..10 {
582            let start = i * chunk_size;
583            let end = ((i + 1) * chunk_size).min(samples.len());
584            let chunk = &samples[start..end];
585            let energy = self.calculate_energy(chunk);
586            chunk_energies.push(energy);
587        }
588
589        // Calculate coefficient of variation
590        let mean = chunk_energies.iter().sum::<f32>() / chunk_energies.len() as f32;
591        let variance = chunk_energies
592            .iter()
593            .map(|&x| (x - mean).powi(2))
594            .sum::<f32>()
595            / chunk_energies.len() as f32;
596        let std_dev = variance.sqrt();
597
598        if mean > 0.0 {
599            1.0 - (std_dev / mean).min(1.0) // Higher stability = lower coefficient of variation
600        } else {
601            0.0
602        }
603    }
604
605    /// Reset processor state
606    pub fn reset(&mut self) -> Result<(), RecognitionError> {
607        self.features_history.clear();
608        self.parameters_history.clear();
609        self.content_type_history.clear();
610        self.adaptation_count = 0;
611        self.last_parameters = AdaptiveParameters::default();
612        self.noise_estimator.reset()?;
613        self.pitch_tracker.reset()?;
614        self.spectral_analyzer.reset()?;
615        Ok(())
616    }
617
618    /// Get current configuration
619    #[must_use]
620    pub fn config(&self) -> &AdaptiveConfig {
621        &self.config
622    }
623
624    /// Get current parameters
625    #[must_use]
626    pub fn current_parameters(&self) -> &AdaptiveParameters {
627        &self.last_parameters
628    }
629}
630
631/// Noise level estimator
632#[derive(Debug)]
633struct NoiseEstimator {
634    sample_rate: u32,
635    noise_floor: f32,
636    adaptation_rate: f32,
637}
638
639impl NoiseEstimator {
640    fn new(sample_rate: u32) -> Result<Self, RecognitionError> {
641        Ok(Self {
642            sample_rate,
643            noise_floor: 0.001,
644            adaptation_rate: 0.01,
645        })
646    }
647
648    fn estimate(&mut self, samples: &[f32]) -> Result<f32, RecognitionError> {
649        let energy = (samples.iter().map(|&x| x * x).sum::<f32>() / samples.len() as f32).sqrt();
650
651        // Simple noise floor estimation using minimum statistics
652        if energy < self.noise_floor * 2.0 {
653            self.noise_floor =
654                self.adaptation_rate * energy + (1.0 - self.adaptation_rate) * self.noise_floor;
655        }
656
657        Ok(self.noise_floor)
658    }
659
660    fn reset(&mut self) -> Result<(), RecognitionError> {
661        self.noise_floor = 0.001;
662        Ok(())
663    }
664}
665
666/// Simple pitch tracker
667#[derive(Debug)]
668struct PitchTracker {
669    sample_rate: u32,
670    min_pitch: f32,
671    max_pitch: f32,
672}
673
674impl PitchTracker {
675    fn new(sample_rate: u32) -> Result<Self, RecognitionError> {
676        Ok(Self {
677            sample_rate,
678            min_pitch: 50.0,  // 50 Hz
679            max_pitch: 800.0, // 800 Hz
680        })
681    }
682
683    fn track(&self, samples: &[f32]) -> Result<PitchFeatures, RecognitionError> {
684        // Simplified autocorrelation-based pitch detection
685        let min_period = (self.sample_rate as f32 / self.max_pitch) as usize;
686        let max_period = (self.sample_rate as f32 / self.min_pitch) as usize;
687
688        let mut max_correlation = 0.0;
689        let mut best_period = min_period;
690
691        for period in min_period..=max_period.min(samples.len() / 2) {
692            let mut correlation = 0.0;
693            let mut norm1 = 0.0;
694            let mut norm2 = 0.0;
695
696            for i in 0..(samples.len() - period) {
697                correlation += samples[i] * samples[i + period];
698                norm1 += samples[i] * samples[i];
699                norm2 += samples[i + period] * samples[i + period];
700            }
701
702            if norm1 > 0.0 && norm2 > 0.0 {
703                correlation /= (norm1 * norm2).sqrt();
704                if correlation > max_correlation {
705                    max_correlation = correlation;
706                    best_period = period;
707                }
708            }
709        }
710
711        let pitch_confidence = max_correlation.max(0.0);
712        let fundamental_freq = self.sample_rate as f32 / best_period as f32;
713
714        // Simple harmonic-to-noise ratio estimation
715        let harmonic_noise_ratio = if pitch_confidence > 0.3 {
716            pitch_confidence * 10.0 // Convert to rough HNR in dB
717        } else {
718            0.0
719        };
720
721        Ok(PitchFeatures {
722            confidence: pitch_confidence,
723            harmonic_noise_ratio,
724        })
725    }
726
727    fn reset(&mut self) -> Result<(), RecognitionError> {
728        // No state to reset in this simple implementation
729        Ok(())
730    }
731}
732
733/// Pitch tracking features
734#[derive(Debug)]
735struct PitchFeatures {
736    confidence: f32,
737    harmonic_noise_ratio: f32,
738}
739
740/// Spectral analyzer
741#[derive(Debug)]
742struct SpectralAnalyzer {
743    fft_size: usize,
744    sample_rate: u32,
745    prev_spectrum: Vec<f32>,
746}
747
748impl SpectralAnalyzer {
749    fn new(fft_size: usize, sample_rate: u32) -> Result<Self, RecognitionError> {
750        Ok(Self {
751            fft_size,
752            sample_rate,
753            prev_spectrum: Vec::new(),
754        })
755    }
756
757    fn analyze(&mut self, samples: &[f32]) -> Result<SpectralFeatures, RecognitionError> {
758        // Simplified spectral analysis
759        let window_size = self.fft_size.min(samples.len());
760        let windowed: Vec<f32> = samples[..window_size]
761            .iter()
762            .enumerate()
763            .map(|(i, &x)| {
764                let window_val = 0.5
765                    * (1.0
766                        - (2.0 * std::f32::consts::PI * i as f32 / (window_size - 1) as f32).cos());
767                x * window_val
768            })
769            .collect();
770
771        // Simple magnitude spectrum computation (placeholder for proper FFT)
772        let mut spectrum = vec![0.0; window_size / 2];
773        for i in 0..spectrum.len() {
774            let mut real_sum = 0.0;
775            let mut imag_sum = 0.0;
776
777            for n in 0..window_size {
778                let angle = -2.0 * std::f32::consts::PI * i as f32 * n as f32 / window_size as f32;
779                real_sum += windowed[n] * angle.cos();
780                imag_sum += windowed[n] * angle.sin();
781            }
782
783            spectrum[i] = (real_sum * real_sum + imag_sum * imag_sum).sqrt();
784        }
785
786        // Calculate spectral features
787        let centroid = self.calculate_spectral_centroid(&spectrum);
788        let rolloff = self.calculate_spectral_rolloff(&spectrum);
789        let flatness = self.calculate_spectral_flatness(&spectrum);
790        let flux = self.calculate_spectral_flux(&spectrum);
791
792        self.prev_spectrum = spectrum;
793
794        Ok(SpectralFeatures {
795            centroid,
796            rolloff,
797            flatness,
798            flux,
799        })
800    }
801
802    fn calculate_spectral_centroid(&self, spectrum: &[f32]) -> f32 {
803        let mut weighted_sum = 0.0;
804        let mut total_magnitude = 0.0;
805
806        for (i, &magnitude) in spectrum.iter().enumerate() {
807            let freq = i as f32 * self.sample_rate as f32 / (2.0 * spectrum.len() as f32);
808            weighted_sum += freq * magnitude;
809            total_magnitude += magnitude;
810        }
811
812        if total_magnitude > 0.0 {
813            weighted_sum / total_magnitude
814        } else {
815            0.0
816        }
817    }
818
819    fn calculate_spectral_rolloff(&self, spectrum: &[f32]) -> f32 {
820        let total_energy: f32 = spectrum.iter().map(|&x| x * x).sum();
821        let threshold = 0.85 * total_energy;
822
823        let mut cumulative_energy = 0.0;
824        for (i, &magnitude) in spectrum.iter().enumerate() {
825            cumulative_energy += magnitude * magnitude;
826            if cumulative_energy >= threshold {
827                return i as f32 * self.sample_rate as f32 / (2.0 * spectrum.len() as f32);
828            }
829        }
830
831        self.sample_rate as f32 / 2.0
832    }
833
834    fn calculate_spectral_flatness(&self, spectrum: &[f32]) -> f32 {
835        let geometric_mean = spectrum
836            .iter()
837            .filter(|&&x| x > 0.0)
838            .map(|&x| x.ln())
839            .sum::<f32>()
840            / spectrum.len() as f32;
841
842        let arithmetic_mean = spectrum.iter().sum::<f32>() / spectrum.len() as f32;
843
844        if arithmetic_mean > 0.0 {
845            geometric_mean.exp() / arithmetic_mean
846        } else {
847            0.0
848        }
849    }
850
851    fn calculate_spectral_flux(&self, spectrum: &[f32]) -> f32 {
852        if self.prev_spectrum.is_empty() || self.prev_spectrum.len() != spectrum.len() {
853            return 0.0;
854        }
855
856        let mut flux = 0.0;
857        for (curr, prev) in spectrum.iter().zip(self.prev_spectrum.iter()) {
858            let diff = curr - prev;
859            if diff > 0.0 {
860                flux += diff * diff;
861            }
862        }
863
864        flux.sqrt()
865    }
866
867    fn reset(&mut self) -> Result<(), RecognitionError> {
868        self.prev_spectrum.clear();
869        Ok(())
870    }
871}
872
873/// Spectral features
874#[derive(Debug)]
875struct SpectralFeatures {
876    centroid: f32,
877    rolloff: f32,
878    flatness: f32,
879    flux: f32,
880}
881
882#[cfg(test)]
883mod tests {
884    use super::*;
885
886    #[test]
887    fn test_adaptive_processor_creation() {
888        let config = AdaptiveConfig::default();
889        let processor = AdaptiveProcessor::new(config);
890        assert!(processor.is_ok());
891    }
892
893    #[test]
894    fn test_content_classification() {
895        let config = AdaptiveConfig::default();
896        let processor = AdaptiveProcessor::new(config).unwrap();
897
898        // Test silence classification
899        let silent_features = AudioFeatures {
900            snr_db: 40.0,
901            zero_crossing_rate: 0.1,
902            spectral_centroid: 1000.0,
903            spectral_rolloff: 2000.0,
904            spectral_flux: 0.1,
905            harmonic_noise_ratio: 5.0,
906            energy: 0.0001, // Very low energy
907            pitch_confidence: 0.2,
908            spectral_flatness: 0.8,
909            temporal_stability: 0.9,
910        };
911
912        let content_type = processor.classify_content(&silent_features).unwrap();
913        assert_eq!(content_type, AudioContentType::Silence);
914    }
915
916    #[test]
917    fn test_parameter_adaptation() {
918        let config = AdaptiveConfig::default();
919        let mut processor = AdaptiveProcessor::new(config).unwrap();
920
921        let samples = vec![0.1; 4096];
922        let audio = AudioBuffer::mono(samples, 16000);
923
924        let result = processor.analyze_and_adapt(&audio);
925        assert!(result.is_ok());
926
927        let result = result.unwrap();
928        assert!(result.stats.processing_overhead_ms > 0.0);
929        assert!(result.parameters.classification_confidence >= 0.0);
930        assert!(result.parameters.classification_confidence <= 1.0);
931    }
932
933    #[test]
934    fn test_noise_estimator() {
935        let mut estimator = NoiseEstimator::new(16000).unwrap();
936
937        let noisy_samples = vec![0.01; 1000];
938        let noise_level = estimator.estimate(&noisy_samples).unwrap();
939        assert!(noise_level > 0.0);
940        assert!(noise_level < 1.0);
941    }
942
943    #[test]
944    fn test_pitch_tracker() {
945        let tracker = PitchTracker::new(16000).unwrap();
946
947        // Generate a simple sine wave at 440 Hz
948        let mut samples = Vec::new();
949        for i in 0..1000 {
950            let t = i as f32 / 16000.0;
951            samples.push((2.0 * std::f32::consts::PI * 440.0 * t).sin() * 0.5);
952        }
953
954        let features = tracker.track(&samples).unwrap();
955        assert!(features.confidence > 0.0);
956        assert!(features.harmonic_noise_ratio >= 0.0);
957    }
958
959    #[test]
960    fn test_spectral_analyzer() {
961        let mut analyzer = SpectralAnalyzer::new(2048, 16000).unwrap();
962
963        let samples = vec![0.1; 2048];
964        let features = analyzer.analyze(&samples).unwrap();
965
966        assert!(features.centroid >= 0.0);
967        assert!(features.rolloff >= 0.0);
968        assert!(features.flatness >= 0.0);
969        assert!(features.flux >= 0.0);
970    }
971}