use crate::buffer::AudioBuffer;
pub use svara::voice::VoiceProfile;
pub use svara::glottal::{GlottalModel, GlottalSource};
pub use svara::formant::Formant;
pub use svara::formant::Vowel;
pub use svara::formant::VowelTarget;
pub use svara::formant::FormantFilter;
pub use svara::tract::{NasalPlace, VocalTract};
pub use svara::phoneme::{Phoneme, PhonemeClass};
pub use svara::phoneme::phoneme_formants;
pub use svara::phoneme::phoneme_duration;
pub use svara::phoneme::f2_locus_equation;
pub use svara::phoneme::synthesize_phoneme;
pub use svara::prosody::Stress;
pub use svara::prosody::IntonationPattern;
pub use svara::prosody::ProsodyContour;
pub use svara::sequence::PhonemeEvent;
pub use svara::sequence::PhonemeSequence;
pub use svara::spectral;
pub use svara::prelude::SvaraError;
pub fn render_sequence(
sequence: &PhonemeSequence,
voice: &VoiceProfile,
sample_rate: u32,
) -> crate::Result<AudioBuffer> {
let samples = sequence
.render(voice, sample_rate as f32)
.map_err(|e| crate::NadaError::Dsp(format!("voice synthesis failed: {e}")))?;
AudioBuffer::from_interleaved(samples, 1, sample_rate).map_err(|e| {
crate::NadaError::Dsp(format!("failed to create buffer from voice output: {e}"))
})
}
pub fn render_phoneme(
phoneme: &Phoneme,
voice: &VoiceProfile,
sample_rate: u32,
duration: f32,
) -> crate::Result<AudioBuffer> {
let samples = synthesize_phoneme(phoneme, voice, sample_rate as f32, duration)
.map_err(|e| crate::NadaError::Dsp(format!("phoneme synthesis failed: {e}")))?;
AudioBuffer::from_interleaved(samples, 1, sample_rate).map_err(|e| {
crate::NadaError::Dsp(format!("failed to create buffer from phoneme output: {e}"))
})
}
pub fn render_vocal_tract(
source: &mut GlottalSource,
tract: &mut VocalTract,
frames: usize,
sample_rate: u32,
) -> AudioBuffer {
let mut samples = Vec::with_capacity(frames);
for _ in 0..frames {
let glottal = source.next_sample();
samples.push(tract.process_sample(glottal));
}
AudioBuffer::from_interleaved(samples, 1, sample_rate)
.unwrap_or_else(|_| AudioBuffer::silence(1, frames, sample_rate.max(1)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn render_vowel_a() {
let voice = VoiceProfile::new_male();
let buf = render_phoneme(&Phoneme::VowelA, &voice, 44100, 0.2).unwrap();
assert_eq!(buf.channels(), 1);
assert!(buf.frames() > 0);
assert!(buf.rms() > 0.0);
assert!(buf.samples().iter().all(|s| s.is_finite()));
}
#[test]
fn render_phoneme_sequence() {
let voice = VoiceProfile::new_female();
let mut seq = PhonemeSequence::new();
seq.push(PhonemeEvent::new(Phoneme::VowelA, 0.2, Stress::Primary));
seq.push(PhonemeEvent::new(Phoneme::NasalN, 0.1, Stress::Unstressed));
seq.push(PhonemeEvent::new(Phoneme::VowelI, 0.2, Stress::Unstressed));
let buf = render_sequence(&seq, &voice, 44100).unwrap();
assert!(buf.frames() > 0);
assert!(buf.rms() > 0.0);
}
#[test]
fn voice_profiles() {
let male = VoiceProfile::new_male();
let female = VoiceProfile::new_female();
let child = VoiceProfile::new_child();
assert!(female.base_f0 > male.base_f0);
assert!(child.base_f0 > female.base_f0);
assert!(male.base_f0 > 0.0);
}
#[test]
fn glottal_source_renders() {
let voice = VoiceProfile::new_male();
let mut source = voice.create_glottal_source(44100.0).unwrap();
let mut tract = VocalTract::new(44100.0);
let buf = render_vocal_tract(&mut source, &mut tract, 4410, 44100);
assert_eq!(buf.frames(), 4410);
assert!(buf.samples().iter().all(|s| s.is_finite()));
}
}