1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
use crate::stream::record_audio;
use deepspeech::Model;
use std::path::Path;
pub struct StreamSettings {
pub silence_level: i32,
pub model_dir_str: String,
pub show_amplitudes: bool,
pub pause_length: f32,
}
pub fn transcribe(config: StreamSettings) -> Option<String> {
match record_audio(
config.silence_level,
config.show_amplitudes,
config.pause_length,
) {
Some(audio_stream) => Some(convert(&audio_stream, &config.model_dir_str)),
None => None,
}
}
fn convert(audio_stream: &[i16], model_dir_str: &str) -> String {
let dir_path = Path::new(&model_dir_str);
let mut graph_name: Box<Path> = dir_path.join("output_graph.pb").into_boxed_path();
let mut scorer_name: Option<Box<Path>> = None;
for file in dir_path.read_dir().into_iter().flatten() {
let file_path = file.unwrap().path();
if file_path.is_file() {
if let Some(ext) = file_path.extension() {
if ext == "pb" || ext == "pbmm" || ext == "tflite" {
graph_name = file_path.into_boxed_path();
} else if ext == "scorer" {
scorer_name = Some(file_path.into_boxed_path());
}
}
}
}
let mut m = Model::load_from_files(&graph_name).unwrap();
if let Some(scorer) = scorer_name {
m.enable_external_scorer(&scorer).unwrap();
}
m.speech_to_text(audio_stream).unwrap()
}