use std::env;
use hound::WavReader;
use vosk::{Model, Recognizer, SpeakerModel};
fn main() {
let mut args = env::args();
args.next();
let model_path = args.next().expect("A model path was not provided");
let speaker_model_path = args.next().expect("A speaker model path was not provided");
let wav_path = args
.next()
.expect("A path for the WAV file to be read was not provided");
let mut reader = WavReader::open(wav_path).expect("Could not create the WAV reader");
let samples = reader
.samples()
.collect::<hound::Result<Vec<i16>>>()
.expect("Could not read WAV file");
let model = Model::new(model_path).expect("Could not create the model");
let spk_model =
SpeakerModel::new(speaker_model_path).expect("Could not create the speaker model");
let mut recognizer =
Recognizer::new_with_speaker(&model, reader.spec().sample_rate as f32, &spk_model)
.expect("Could not create the recognizer");
for sample in samples.chunks(100) {
recognizer.accept_waveform(sample).unwrap();
println!("{:#?}", recognizer.partial_result());
}
println!("{:#?}", recognizer.final_result().single().unwrap());
}