use super::buffer::AudioBuffer;
use crate::{error::Result, VoirsError};
use scirs2_core::ndarray::Array1;
use scirs2_core::simd_ops::SimdUnifiedOps;
impl AudioBuffer {
pub fn resample(&self, target_rate: u32) -> Result<AudioBuffer> {
if target_rate == self.sample_rate {
return Ok(self.clone());
}
let ratio = target_rate as f32 / self.sample_rate as f32;
let new_length = (self.samples.len() as f32 * ratio) as usize;
let mut resampled = Vec::with_capacity(new_length);
for i in 0..new_length {
let src_index = i as f32 / ratio;
let idx = src_index as usize;
if idx < self.samples.len() {
resampled.push(self.samples[idx]);
}
}
Ok(AudioBuffer::new(resampled, target_rate, self.channels))
}
pub fn apply_gain(&mut self, gain_db: f32) -> Result<()> {
let gain_linear = 10.0_f32.powf(gain_db / 20.0);
if self.samples.len() > 64 && f32::simd_available() {
let samples_array = Array1::from_vec(self.samples.clone());
let gained = f32::simd_scalar_mul(&samples_array.view(), gain_linear);
self.samples = gained.iter().map(|&s| s.clamp(-1.0, 1.0)).collect();
} else {
for sample in &mut self.samples {
*sample *= gain_linear;
*sample = sample.clamp(-1.0, 1.0);
}
}
self.update_metadata();
Ok(())
}
pub fn normalize(&mut self, target_peak: f32) -> Result<()> {
let current_peak = if self.samples.len() > 64 && f32::simd_available() {
let samples_array = Array1::from_vec(self.samples.clone());
let abs_samples = f32::simd_abs(&samples_array.view());
f32::simd_max_element(&abs_samples.view())
} else {
self.samples.iter().map(|&s| s.abs()).fold(0.0, f32::max)
};
if current_peak > 0.0 {
let gain = target_peak / current_peak;
if self.samples.len() > 64 && f32::simd_available() {
let samples_array = Array1::from_vec(self.samples.clone());
let normalized = f32::simd_scalar_mul(&samples_array.view(), gain);
self.samples = normalized.to_vec();
} else {
for sample in &mut self.samples {
*sample *= gain;
}
}
self.update_metadata();
}
Ok(())
}
pub fn mix(&mut self, other: &AudioBuffer, gain: f32) -> Result<()> {
if self.sample_rate != other.sample_rate {
return Err(VoirsError::audio_error(
"Sample rates must match for mixing",
));
}
let mix_length = self.samples.len().min(other.samples.len());
if mix_length > 64 && f32::simd_available() {
let self_portion = Array1::from_vec(self.samples[..mix_length].to_vec());
let other_portion = Array1::from_vec(other.samples[..mix_length].to_vec());
let gain_vec = Array1::from_elem(mix_length, gain);
let mixed = f32::simd_fma(
&other_portion.view(),
&gain_vec.view(),
&self_portion.view(),
);
for (i, &sample) in mixed.iter().enumerate() {
self.samples[i] = sample.clamp(-1.0, 1.0);
}
} else {
for i in 0..mix_length {
self.samples[i] += other.samples[i] * gain;
self.samples[i] = self.samples[i].clamp(-1.0, 1.0);
}
}
self.update_metadata();
Ok(())
}
pub fn append(&mut self, other: &AudioBuffer) -> Result<()> {
if self.sample_rate != other.sample_rate || self.channels != other.channels {
return Err(VoirsError::audio_error(
"Sample rate and channels must match for appending",
));
}
self.samples.extend_from_slice(&other.samples);
self.update_metadata();
Ok(())
}
pub fn split(&self, time_seconds: f32) -> Result<(AudioBuffer, AudioBuffer)> {
let split_sample = (time_seconds * self.sample_rate as f32 * self.channels as f32) as usize;
if split_sample >= self.samples.len() {
return Err(VoirsError::audio_error("Split time exceeds audio duration"));
}
let first_part = AudioBuffer::new(
self.samples[..split_sample].to_vec(),
self.sample_rate,
self.channels,
);
let second_part = AudioBuffer::new(
self.samples[split_sample..].to_vec(),
self.sample_rate,
self.channels,
);
Ok((first_part, second_part))
}
pub fn fade_in(&mut self, duration_seconds: f32) -> Result<()> {
let fade_samples =
(duration_seconds * self.sample_rate as f32 * self.channels as f32) as usize;
let fade_samples = fade_samples.min(self.samples.len());
for i in 0..fade_samples {
let fade_factor = i as f32 / fade_samples as f32;
self.samples[i] *= fade_factor;
}
self.update_metadata();
Ok(())
}
pub fn fade_out(&mut self, duration_seconds: f32) -> Result<()> {
let fade_samples =
(duration_seconds * self.sample_rate as f32 * self.channels as f32) as usize;
let fade_samples = fade_samples.min(self.samples.len());
let start_index = self.samples.len().saturating_sub(fade_samples);
for i in 0..fade_samples {
let fade_factor = 1.0 - (i as f32 / fade_samples as f32);
self.samples[start_index + i] *= fade_factor;
}
self.update_metadata();
Ok(())
}
pub fn crossfade(&mut self, other: &AudioBuffer, crossfade_duration: f32) -> Result<()> {
if self.sample_rate != other.sample_rate || self.channels != other.channels {
return Err(VoirsError::audio_error(
"Sample rate and channels must match for crossfading",
));
}
let crossfade_samples =
(crossfade_duration * self.sample_rate as f32 * self.channels as f32) as usize;
let crossfade_samples = crossfade_samples
.min(self.samples.len())
.min(other.samples.len());
let fade_start = self.samples.len().saturating_sub(crossfade_samples);
for i in 0..crossfade_samples {
let fade_factor = 1.0 - (i as f32 / crossfade_samples as f32);
self.samples[fade_start + i] *= fade_factor;
}
for i in 0..crossfade_samples {
let fade_factor = i as f32 / crossfade_samples as f32;
self.samples[fade_start + i] += other.samples[i] * fade_factor;
self.samples[fade_start + i] = self.samples[fade_start + i].clamp(-1.0, 1.0);
}
if crossfade_samples < other.samples.len() {
self.samples
.extend_from_slice(&other.samples[crossfade_samples..]);
}
self.update_metadata();
Ok(())
}
pub fn lowpass_filter(&mut self, cutoff_frequency: f32) -> Result<()> {
let dt = 1.0 / self.sample_rate as f32;
let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_frequency);
let alpha = dt / (rc + dt);
let mut previous_output = 0.0;
for sample in &mut self.samples {
let output = alpha * (*sample) + (1.0 - alpha) * previous_output;
*sample = output;
previous_output = output;
}
self.update_metadata();
Ok(())
}
pub fn highpass_filter(&mut self, cutoff_frequency: f32) -> Result<()> {
let dt = 1.0 / self.sample_rate as f32;
let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_frequency);
let alpha = rc / (rc + dt);
let mut previous_input = 0.0;
let mut previous_output = 0.0;
for sample in &mut self.samples {
let output = alpha * (previous_output + *sample - previous_input);
previous_input = *sample;
*sample = output;
previous_output = output;
}
self.update_metadata();
Ok(())
}
pub fn time_stretch(&self, stretch_factor: f32) -> Result<AudioBuffer> {
if stretch_factor <= 0.0 {
return Err(VoirsError::audio_error("Stretch factor must be positive"));
}
let new_length = (self.samples.len() as f32 / stretch_factor) as usize;
let mut stretched = Vec::with_capacity(new_length);
for i in 0..new_length {
let src_index = (i as f32 * stretch_factor) as usize;
if src_index < self.samples.len() {
stretched.push(self.samples[src_index]);
}
}
Ok(AudioBuffer::new(stretched, self.sample_rate, self.channels))
}
pub fn pitch_shift(&self, semitones: f32) -> Result<AudioBuffer> {
use scirs2_core::Complex;
use std::f32::consts::PI;
if semitones == 0.0 {
return Ok(self.clone());
}
let pitch_factor = 2.0_f32.powf(semitones / 12.0);
let frame_size = 1024; let hop_size = frame_size / 4; let _overlap_factor = frame_size / hop_size;
let mut input_samples = self.samples.clone();
let padding = frame_size * 2;
input_samples.resize(input_samples.len() + padding, 0.0);
let output_length = self.samples.len();
let mut output_samples = vec![0.0; output_length + padding];
let mut previous_phase = vec![0.0; frame_size / 2 + 1];
let mut synthesis_phase = vec![0.0; frame_size / 2 + 1];
let window: Vec<f32> = (0..frame_size)
.map(|i| 0.5 * (1.0 - (2.0 * PI * i as f32 / (frame_size - 1) as f32).cos()))
.collect();
let mut input_pos = 0;
let mut output_pos = 0;
while input_pos + frame_size <= input_samples.len() {
let frame_real: Vec<f32> = (0..frame_size)
.map(|i| input_samples[input_pos + i] * window[i])
.collect();
let frame_real_f64: Vec<f64> = frame_real.iter().map(|&x| x as f64).collect();
let frame_complex_f64 =
scirs2_fft::rfft(&frame_real_f64, None).map_err(|e| VoirsError::AudioError {
message: format!("FFT failed: {}", e),
buffer_info: None,
})?;
let frame_complex: Vec<Complex<f32>> = frame_complex_f64
.iter()
.map(|c| Complex::new(c.re as f32, c.im as f32))
.collect();
let mut modified_frame = vec![Complex::new(0.0f64, 0.0f64); frame_size / 2 + 1];
for k in 0..frame_complex.len() {
let magnitude = frame_complex[k].norm();
let phase = frame_complex[k].arg();
let phase_diff = phase - previous_phase[k];
previous_phase[k] = phase;
let unwrapped_phase_diff = phase_diff;
let expected_phase_diff = 2.0 * PI * k as f32 * hop_size as f32 / frame_size as f32;
let phase_deviation = unwrapped_phase_diff - expected_phase_diff;
let wrapped_deviation = ((phase_deviation + PI) % (2.0 * PI)) - PI;
let true_freq =
2.0 * PI * k as f32 / frame_size as f32 + wrapped_deviation / hop_size as f32;
let shifted_freq = true_freq * pitch_factor;
let shifted_bin = shifted_freq * frame_size as f32 / (2.0 * PI);
if shifted_bin >= 0.0 && shifted_bin < (frame_size / 2) as f32 {
let target_bin = shifted_bin.round() as usize;
if target_bin < frame_size / 2 + 1 {
synthesis_phase[target_bin] += shifted_freq * hop_size as f32;
let new_complex = Complex::new(
(magnitude * synthesis_phase[target_bin].cos()) as f64,
(magnitude * synthesis_phase[target_bin].sin()) as f64,
);
modified_frame[target_bin] = new_complex;
}
}
}
let frame_output_f64 =
scirs2_fft::irfft(&modified_frame, Some(frame_size)).map_err(|e| {
VoirsError::AudioError {
message: format!("IFFT failed: {}", e),
buffer_info: None,
}
})?;
let frame_output: Vec<f32> = frame_output_f64.iter().map(|&x| x as f32).collect();
let norm_factor = 2.0 / 3.0;
for i in 0..frame_size {
if output_pos + i < output_samples.len() {
let windowed_sample = frame_output[i] * window[i] * norm_factor;
output_samples[output_pos + i] += windowed_sample;
}
}
input_pos += hop_size;
output_pos += hop_size;
}
output_samples.truncate(output_length);
let max_amplitude = output_samples
.iter()
.map(|&s: &f32| s.abs())
.fold(0.0f32, f32::max);
if max_amplitude > 1.0 {
let normalization_factor = 0.95 / max_amplitude;
for sample in &mut output_samples {
*sample *= normalization_factor;
}
}
Ok(AudioBuffer::new(
output_samples,
self.sample_rate,
self.channels,
))
}
pub fn pitch_shift_psola(&self, semitones: f32) -> Result<AudioBuffer> {
if semitones == 0.0 {
return Ok(self.clone());
}
let pitch_factor = 2.0_f32.powf(semitones / 12.0);
let min_period = (self.sample_rate as f32 / 800.0) as usize; let max_period = (self.sample_rate as f32 / 50.0) as usize;
let pitch_periods = self.detect_pitch_periods(min_period, max_period)?;
if pitch_periods.is_empty() {
return self.pitch_shift(semitones);
}
let output_length = self.samples.len();
let mut output_samples = vec![0.0; output_length];
let mut output_pos = 0.0;
let mut input_idx = 0;
while input_idx < pitch_periods.len() - 1 && (output_pos as usize) < output_length {
let current_period = pitch_periods[input_idx];
let next_period = pitch_periods[input_idx + 1];
let period_length = next_period - current_period;
let grain_size = period_length * 2; let grain_start = current_period.saturating_sub(period_length / 2);
let grain_end = (grain_start + grain_size).min(self.samples.len());
if grain_end > grain_start {
let grain_length = grain_end - grain_start;
let window: Vec<f32> = (0..grain_length)
.map(|i| {
0.5 * (1.0
- (2.0 * std::f32::consts::PI * i as f32 / (grain_length - 1) as f32)
.cos())
})
.collect();
for (i, &sample) in self.samples[grain_start..grain_end].iter().enumerate() {
let windowed_sample = sample * window[i];
let output_index = (output_pos as usize) + i;
if output_index < output_samples.len() {
output_samples[output_index] += windowed_sample;
}
}
}
output_pos += period_length as f32 / pitch_factor;
input_idx += 1;
}
let max_amplitude = output_samples.iter().map(|&s| s.abs()).fold(0.0, f32::max);
if max_amplitude > 1.0 {
let normalization_factor = 0.95 / max_amplitude;
for sample in &mut output_samples {
*sample *= normalization_factor;
}
}
Ok(AudioBuffer::new(
output_samples,
self.sample_rate,
self.channels,
))
}
fn detect_pitch_periods(&self, min_period: usize, max_period: usize) -> Result<Vec<usize>> {
let mut periods = Vec::new();
let analysis_window = max_period * 4;
let mut pos = 0;
while pos + analysis_window < self.samples.len() {
let window = &self.samples[pos..pos + analysis_window];
let mut max_correlation = 0.0;
let mut best_period = min_period;
for period in min_period..=max_period.min(analysis_window / 2) {
let mut correlation = 0.0;
let mut energy = 0.0;
for i in 0..(analysis_window - period) {
correlation += window[i] * window[i + period];
energy += window[i] * window[i];
}
if energy > 0.0 {
let normalized_correlation = correlation / energy;
if normalized_correlation > max_correlation {
max_correlation = normalized_correlation;
best_period = period;
}
}
}
if max_correlation > 0.3 {
periods.push(pos + best_period);
pos += best_period;
} else {
pos += min_period; }
}
Ok(periods)
}
pub fn compress(
&mut self,
threshold: f32,
ratio: f32,
attack_ms: f32,
release_ms: f32,
) -> Result<()> {
let attack_coeff = (-1.0 / (attack_ms * 0.001 * self.sample_rate as f32)).exp();
let release_coeff = (-1.0 / (release_ms * 0.001 * self.sample_rate as f32)).exp();
let mut envelope = 0.0;
for sample in &mut self.samples {
let input_level = sample.abs();
if input_level > envelope {
envelope = input_level + (envelope - input_level) * attack_coeff;
} else {
envelope = input_level + (envelope - input_level) * release_coeff;
}
if envelope > threshold {
let excess = envelope - threshold;
let compressed_excess = excess / ratio;
let gain_reduction = (threshold + compressed_excess) / envelope;
*sample *= gain_reduction;
}
}
self.update_metadata();
Ok(())
}
pub fn reverb(&mut self, room_size: f32, damping: f32, wet_level: f32) -> Result<()> {
let delay_samples = (room_size * self.sample_rate as f32 * 0.1) as usize; let delay_samples = delay_samples.max(1);
let mut delay_buffer = vec![0.0; delay_samples];
let mut delay_index = 0;
for sample in &mut self.samples {
let delayed_sample = delay_buffer[delay_index];
let damped_sample = delayed_sample * (1.0 - damping) + *sample * damping;
delay_buffer[delay_index] = damped_sample;
delay_index = (delay_index + 1) % delay_samples;
*sample = *sample * (1.0 - wet_level) + delayed_sample * wet_level;
}
self.update_metadata();
Ok(())
}
pub fn extract(&self, start_seconds: f32, duration_seconds: f32) -> Result<AudioBuffer> {
let start_sample =
(start_seconds * self.sample_rate as f32 * self.channels as f32) as usize;
let duration_samples =
(duration_seconds * self.sample_rate as f32 * self.channels as f32) as usize;
let end_sample = (start_sample + duration_samples).min(self.samples.len());
if start_sample >= self.samples.len() {
return Err(VoirsError::audio_error("Start time exceeds audio duration"));
}
let extracted_samples = self.samples[start_sample..end_sample].to_vec();
Ok(AudioBuffer::new(
extracted_samples,
self.sample_rate,
self.channels,
))
}
pub fn rms(&self) -> f32 {
if self.samples.is_empty() {
return 0.0;
}
let sum_squares = if self.samples.len() > 64 && f32::simd_available() {
let samples_array = Array1::from_vec(self.samples.clone());
f32::simd_sum_squares(&samples_array.view())
} else {
self.samples.iter().map(|&s| s * s).sum()
};
(sum_squares / self.samples.len() as f32).sqrt()
}
pub fn peak(&self) -> f32 {
if self.samples.len() > 64 && f32::simd_available() {
let samples_array = Array1::from_vec(self.samples.clone());
let abs_samples = f32::simd_abs(&samples_array.view());
f32::simd_max_element(&abs_samples.view())
} else {
self.samples.iter().map(|&s| s.abs()).fold(0.0, f32::max)
}
}
pub fn is_clipped(&self, threshold: f32) -> bool {
self.samples.iter().any(|&s| s.abs() >= threshold)
}
pub fn soft_clip(&mut self, threshold: f32) -> Result<()> {
for sample in &mut self.samples {
if sample.abs() > threshold {
let sign = if *sample >= 0.0 { 1.0 } else { -1.0 };
*sample = sign * threshold * (1.0 - (-(*sample).abs() / threshold).exp());
}
}
self.update_metadata();
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::audio::buffer::AudioBuffer;
#[test]
fn test_gain_application() {
let mut buffer = AudioBuffer::sine_wave(440.0, 0.1, 44100, 0.5);
let original_peak = buffer.metadata().peak_amplitude;
buffer.apply_gain(6.0).unwrap();
let new_peak = buffer.metadata().peak_amplitude;
assert!(new_peak > original_peak);
}
#[test]
fn test_normalization() {
let mut buffer = AudioBuffer::sine_wave(440.0, 0.1, 44100, 0.3);
buffer.normalize(0.8).unwrap();
let peak = buffer.metadata().peak_amplitude;
assert!((peak - 0.8).abs() < 0.01);
}
#[test]
fn test_mixing() {
let mut buffer1 = AudioBuffer::sine_wave(440.0, 0.1, 44100, 0.5);
let buffer2 = AudioBuffer::sine_wave(880.0, 0.1, 44100, 0.3);
let original_peak = buffer1.metadata().peak_amplitude;
buffer1.mix(&buffer2, 0.5).unwrap();
assert!(buffer1.metadata().peak_amplitude != original_peak);
}
#[test]
fn test_split() {
let buffer = AudioBuffer::sine_wave(440.0, 2.0, 44100, 0.5);
let (first, second) = buffer.split(1.0).unwrap();
assert!((first.duration() - 1.0).abs() < 0.01);
assert!((second.duration() - 1.0).abs() < 0.01);
}
#[test]
fn test_append() {
let mut buffer1 = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let buffer2 = AudioBuffer::sine_wave(880.0, 1.0, 44100, 0.3);
let original_duration = buffer1.duration();
buffer1.append(&buffer2).unwrap();
assert!((buffer1.duration() - 2.0 * original_duration).abs() < 0.01);
}
#[test]
fn test_fade_in_out() {
let mut buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
buffer.fade_in(0.1).unwrap();
buffer.fade_out(0.1).unwrap();
assert!(buffer.samples()[0].abs() < 0.1);
assert!(buffer.samples()[buffer.len() - 1].abs() < 0.1);
}
#[test]
fn test_time_stretch() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let stretched = buffer.time_stretch(2.0).unwrap();
assert!((stretched.duration() - 0.5).abs() < 0.01);
}
#[test]
fn test_extract() {
let buffer = AudioBuffer::sine_wave(440.0, 2.0, 44100, 0.5);
let extracted = buffer.extract(0.5, 1.0).unwrap();
assert!((extracted.duration() - 1.0).abs() < 0.01);
}
#[test]
fn test_rms_calculation() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let rms = buffer.rms();
assert!((rms - 0.5 / 2.0_f32.sqrt()).abs() < 0.01);
}
#[test]
fn test_clipping_detection() {
let mut buffer = AudioBuffer::sine_wave(440.0, 0.1, 44100, 1.5);
assert!(buffer.is_clipped(1.0));
buffer.soft_clip(0.95).unwrap();
assert!(!buffer.is_clipped(0.95));
}
#[test]
fn test_resampling() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let resampled = buffer.resample(22050).unwrap();
assert_eq!(resampled.sample_rate(), 22050);
assert_eq!(resampled.len(), 22050); }
#[test]
fn test_pitch_shift_phase_vocoder() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let original_duration = buffer.duration();
let shifted_up = buffer.pitch_shift(12.0).unwrap(); let shifted_duration = shifted_up.duration();
let duration_diff = (shifted_duration - original_duration).abs();
assert!(
duration_diff < 0.01,
"Original duration: {original_duration}, Shifted duration: {shifted_duration}, Difference: {duration_diff}"
);
assert_eq!(shifted_up.len(), buffer.len());
let shifted_down = buffer.pitch_shift(-12.0).unwrap(); assert_eq!(shifted_down.len(), buffer.len());
let no_shift = buffer.pitch_shift(0.0).unwrap();
assert_eq!(no_shift.len(), buffer.len());
let small_shift = buffer.pitch_shift(2.0).unwrap(); assert_eq!(small_shift.len(), buffer.len());
}
#[test]
fn test_pitch_shift_psola() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let original_duration = buffer.duration();
let shifted_up = buffer.pitch_shift_psola(7.0).unwrap(); let shifted_duration = shifted_up.duration();
let duration_diff = (shifted_duration - original_duration).abs();
assert!(
duration_diff < 0.01,
"Original duration: {original_duration}, Shifted duration: {shifted_duration}, Difference: {duration_diff}"
);
assert_eq!(shifted_up.len(), buffer.len());
let shifted_down = buffer.pitch_shift_psola(-7.0).unwrap(); assert_eq!(shifted_down.len(), buffer.len());
let no_shift = buffer.pitch_shift_psola(0.0).unwrap();
assert_eq!(no_shift.len(), buffer.len());
}
#[test]
fn test_pitch_detection() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let min_period = (buffer.sample_rate as f32 / 800.0) as usize;
let max_period = (buffer.sample_rate as f32 / 50.0) as usize;
let periods = buffer.detect_pitch_periods(min_period, max_period).unwrap();
assert!(!periods.is_empty(), "Should detect periods in a sine wave");
for &period in &periods {
assert!(
period < buffer.samples.len(),
"Period {} exceeds buffer length {}",
period,
buffer.samples.len()
);
}
}
#[test]
fn test_pitch_shift_algorithms_comparison() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let semitones = 5.0; let phase_vocoder_result = buffer.pitch_shift(semitones).unwrap();
let psola_result = buffer.pitch_shift_psola(semitones).unwrap();
assert_eq!(phase_vocoder_result.len(), buffer.len());
assert_eq!(psola_result.len(), buffer.len());
assert_eq!(phase_vocoder_result.sample_rate, buffer.sample_rate);
assert_eq!(psola_result.sample_rate, buffer.sample_rate);
assert_eq!(phase_vocoder_result.channels, buffer.channels);
assert_eq!(psola_result.channels, buffer.channels);
let pv_duration = phase_vocoder_result.duration();
let psola_duration = psola_result.duration();
let duration_diff = (pv_duration - psola_duration).abs();
assert!(
duration_diff < 0.01,
"Phase vocoder duration: {pv_duration}, PSOLA duration: {psola_duration}"
);
}
#[test]
fn test_pitch_shift_edge_cases() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let extreme_up = buffer.pitch_shift(24.0).unwrap(); assert_eq!(extreme_up.len(), buffer.len());
let extreme_down = buffer.pitch_shift(-24.0).unwrap(); assert_eq!(extreme_down.len(), buffer.len());
let fractional = buffer.pitch_shift(1.5).unwrap(); assert_eq!(fractional.len(), buffer.len());
let neg_fractional = buffer.pitch_shift(-2.5).unwrap(); assert_eq!(neg_fractional.len(), buffer.len());
}
#[test]
fn test_pitch_shift_quality() {
let buffer = AudioBuffer::sine_wave(440.0, 1.0, 44100, 0.5);
let shifted = buffer.pitch_shift(12.0).unwrap();
let max_amplitude = shifted.samples.iter().map(|&s| s.abs()).fold(0.0, f32::max);
assert!(max_amplitude > 0.0, "Output should not be silent");
assert!(
max_amplitude <= 1.0,
"Output should not exceed [-1, 1] range"
);
let mut has_variation = false;
let first_sample = shifted.samples[0];
for &sample in &shifted.samples {
if (sample - first_sample).abs() > 0.001 {
has_variation = true;
break;
}
}
assert!(has_variation, "Output should have some variation");
}
}