1#[allow(clippy::derive_partial_eq_without_eq)]
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct TextNormalizationOptions {
5 #[prost(enumeration = "text_normalization_options::TextNormalization", tag = "1")]
6 pub text_normalization: i32,
7 #[prost(bool, tag = "2")]
9 pub profanity_filter: bool,
10 #[prost(bool, tag = "3")]
12 pub literature_text: bool,
13 #[prost(enumeration = "text_normalization_options::PhoneFormattingMode", tag = "4")]
15 pub phone_formatting_mode: i32,
16}
17pub mod text_normalization_options {
19 #[derive(
21 Clone,
22 Copy,
23 Debug,
24 PartialEq,
25 Eq,
26 Hash,
27 PartialOrd,
28 Ord,
29 ::prost::Enumeration
30 )]
31 #[repr(i32)]
32 pub enum TextNormalization {
33 Unspecified = 0,
34 Enabled = 1,
36 Disabled = 2,
38 }
39 impl TextNormalization {
40 pub fn as_str_name(&self) -> &'static str {
45 match self {
46 TextNormalization::Unspecified => "TEXT_NORMALIZATION_UNSPECIFIED",
47 TextNormalization::Enabled => "TEXT_NORMALIZATION_ENABLED",
48 TextNormalization::Disabled => "TEXT_NORMALIZATION_DISABLED",
49 }
50 }
51 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
53 match value {
54 "TEXT_NORMALIZATION_UNSPECIFIED" => Some(Self::Unspecified),
55 "TEXT_NORMALIZATION_ENABLED" => Some(Self::Enabled),
56 "TEXT_NORMALIZATION_DISABLED" => Some(Self::Disabled),
57 _ => None,
58 }
59 }
60 }
61 #[derive(
62 Clone,
63 Copy,
64 Debug,
65 PartialEq,
66 Eq,
67 Hash,
68 PartialOrd,
69 Ord,
70 ::prost::Enumeration
71 )]
72 #[repr(i32)]
73 pub enum PhoneFormattingMode {
74 Unspecified = 0,
75 Disabled = 1,
77 }
78 impl PhoneFormattingMode {
79 pub fn as_str_name(&self) -> &'static str {
84 match self {
85 PhoneFormattingMode::Unspecified => "PHONE_FORMATTING_MODE_UNSPECIFIED",
86 PhoneFormattingMode::Disabled => "PHONE_FORMATTING_MODE_DISABLED",
87 }
88 }
89 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
91 match value {
92 "PHONE_FORMATTING_MODE_UNSPECIFIED" => Some(Self::Unspecified),
93 "PHONE_FORMATTING_MODE_DISABLED" => Some(Self::Disabled),
94 _ => None,
95 }
96 }
97 }
98}
99#[allow(clippy::derive_partial_eq_without_eq)]
100#[derive(Clone, PartialEq, ::prost::Message)]
101pub struct DefaultEouClassifier {
102 #[prost(enumeration = "default_eou_classifier::EouSensitivity", tag = "1")]
104 pub r#type: i32,
105 #[prost(int64, tag = "2")]
107 pub max_pause_between_words_hint_ms: i64,
108}
109pub mod default_eou_classifier {
111 #[derive(
112 Clone,
113 Copy,
114 Debug,
115 PartialEq,
116 Eq,
117 Hash,
118 PartialOrd,
119 Ord,
120 ::prost::Enumeration
121 )]
122 #[repr(i32)]
123 pub enum EouSensitivity {
124 Unspecified = 0,
125 Default = 1,
126 High = 2,
127 }
128 impl EouSensitivity {
129 pub fn as_str_name(&self) -> &'static str {
134 match self {
135 EouSensitivity::Unspecified => "EOU_SENSITIVITY_UNSPECIFIED",
136 EouSensitivity::Default => "DEFAULT",
137 EouSensitivity::High => "HIGH",
138 }
139 }
140 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
142 match value {
143 "EOU_SENSITIVITY_UNSPECIFIED" => Some(Self::Unspecified),
144 "DEFAULT" => Some(Self::Default),
145 "HIGH" => Some(Self::High),
146 _ => None,
147 }
148 }
149 }
150}
151#[allow(clippy::derive_partial_eq_without_eq)]
153#[derive(Clone, PartialEq, ::prost::Message)]
154pub struct ExternalEouClassifier {}
155#[allow(clippy::derive_partial_eq_without_eq)]
156#[derive(Clone, PartialEq, ::prost::Message)]
157pub struct EouClassifierOptions {
158 #[prost(oneof = "eou_classifier_options::Classifier", tags = "1, 2")]
160 pub classifier: ::core::option::Option<eou_classifier_options::Classifier>,
161}
162pub mod eou_classifier_options {
164 #[allow(clippy::derive_partial_eq_without_eq)]
166 #[derive(Clone, PartialEq, ::prost::Oneof)]
167 pub enum Classifier {
168 #[prost(message, tag = "1")]
170 DefaultClassifier(super::DefaultEouClassifier),
171 #[prost(message, tag = "2")]
173 ExternalClassifier(super::ExternalEouClassifier),
174 }
175}
176#[allow(clippy::derive_partial_eq_without_eq)]
177#[derive(Clone, PartialEq, ::prost::Message)]
178pub struct RecognitionClassifier {
179 #[prost(string, tag = "1")]
181 pub classifier: ::prost::alloc::string::String,
182 #[prost(enumeration = "recognition_classifier::TriggerType", repeated, tag = "2")]
184 pub triggers: ::prost::alloc::vec::Vec<i32>,
185}
186pub mod recognition_classifier {
188 #[derive(
190 Clone,
191 Copy,
192 Debug,
193 PartialEq,
194 Eq,
195 Hash,
196 PartialOrd,
197 Ord,
198 ::prost::Enumeration
199 )]
200 #[repr(i32)]
201 pub enum TriggerType {
202 Unspecified = 0,
203 OnUtterance = 1,
205 OnFinal = 2,
207 OnPartial = 3,
209 }
210 impl TriggerType {
211 pub fn as_str_name(&self) -> &'static str {
216 match self {
217 TriggerType::Unspecified => "TRIGGER_TYPE_UNSPECIFIED",
218 TriggerType::OnUtterance => "ON_UTTERANCE",
219 TriggerType::OnFinal => "ON_FINAL",
220 TriggerType::OnPartial => "ON_PARTIAL",
221 }
222 }
223 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
225 match value {
226 "TRIGGER_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
227 "ON_UTTERANCE" => Some(Self::OnUtterance),
228 "ON_FINAL" => Some(Self::OnFinal),
229 "ON_PARTIAL" => Some(Self::OnPartial),
230 _ => None,
231 }
232 }
233 }
234}
235#[allow(clippy::derive_partial_eq_without_eq)]
236#[derive(Clone, PartialEq, ::prost::Message)]
237pub struct RecognitionClassifierOptions {
238 #[prost(message, repeated, tag = "1")]
240 pub classifiers: ::prost::alloc::vec::Vec<RecognitionClassifier>,
241}
242#[allow(clippy::derive_partial_eq_without_eq)]
243#[derive(Clone, PartialEq, ::prost::Message)]
244pub struct SpeechAnalysisOptions {
245 #[prost(bool, tag = "1")]
247 pub enable_speaker_analysis: bool,
248 #[prost(bool, tag = "2")]
250 pub enable_conversation_analysis: bool,
251 #[prost(double, repeated, tag = "3")]
253 pub descriptive_statistics_quantiles: ::prost::alloc::vec::Vec<f64>,
254}
255#[allow(clippy::derive_partial_eq_without_eq)]
257#[derive(Clone, PartialEq, ::prost::Message)]
258pub struct RawAudio {
259 #[prost(enumeration = "raw_audio::AudioEncoding", tag = "1")]
261 pub audio_encoding: i32,
262 #[prost(int64, tag = "2")]
264 pub sample_rate_hertz: i64,
265 #[prost(int64, tag = "3")]
267 pub audio_channel_count: i64,
268}
269pub mod raw_audio {
271 #[derive(
272 Clone,
273 Copy,
274 Debug,
275 PartialEq,
276 Eq,
277 Hash,
278 PartialOrd,
279 Ord,
280 ::prost::Enumeration
281 )]
282 #[repr(i32)]
283 pub enum AudioEncoding {
284 Unspecified = 0,
285 Linear16Pcm = 1,
287 }
288 impl AudioEncoding {
289 pub fn as_str_name(&self) -> &'static str {
294 match self {
295 AudioEncoding::Unspecified => "AUDIO_ENCODING_UNSPECIFIED",
296 AudioEncoding::Linear16Pcm => "LINEAR16_PCM",
297 }
298 }
299 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
301 match value {
302 "AUDIO_ENCODING_UNSPECIFIED" => Some(Self::Unspecified),
303 "LINEAR16_PCM" => Some(Self::Linear16Pcm),
304 _ => None,
305 }
306 }
307 }
308}
309#[allow(clippy::derive_partial_eq_without_eq)]
311#[derive(Clone, PartialEq, ::prost::Message)]
312pub struct ContainerAudio {
313 #[prost(enumeration = "container_audio::ContainerAudioType", tag = "1")]
315 pub container_audio_type: i32,
316}
317pub mod container_audio {
319 #[derive(
320 Clone,
321 Copy,
322 Debug,
323 PartialEq,
324 Eq,
325 Hash,
326 PartialOrd,
327 Ord,
328 ::prost::Enumeration
329 )]
330 #[repr(i32)]
331 pub enum ContainerAudioType {
332 Unspecified = 0,
333 Wav = 1,
335 OggOpus = 2,
337 Mp3 = 3,
339 }
340 impl ContainerAudioType {
341 pub fn as_str_name(&self) -> &'static str {
346 match self {
347 ContainerAudioType::Unspecified => "CONTAINER_AUDIO_TYPE_UNSPECIFIED",
348 ContainerAudioType::Wav => "WAV",
349 ContainerAudioType::OggOpus => "OGG_OPUS",
350 ContainerAudioType::Mp3 => "MP3",
351 }
352 }
353 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
355 match value {
356 "CONTAINER_AUDIO_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
357 "WAV" => Some(Self::Wav),
358 "OGG_OPUS" => Some(Self::OggOpus),
359 "MP3" => Some(Self::Mp3),
360 _ => None,
361 }
362 }
363 }
364}
365#[allow(clippy::derive_partial_eq_without_eq)]
367#[derive(Clone, PartialEq, ::prost::Message)]
368pub struct AudioFormatOptions {
369 #[prost(oneof = "audio_format_options::AudioFormat", tags = "1, 2")]
370 pub audio_format: ::core::option::Option<audio_format_options::AudioFormat>,
371}
372pub mod audio_format_options {
374 #[allow(clippy::derive_partial_eq_without_eq)]
375 #[derive(Clone, PartialEq, ::prost::Oneof)]
376 pub enum AudioFormat {
377 #[prost(message, tag = "1")]
379 RawAudio(super::RawAudio),
380 #[prost(message, tag = "2")]
382 ContainerAudio(super::ContainerAudio),
383 }
384}
385#[allow(clippy::derive_partial_eq_without_eq)]
387#[derive(Clone, PartialEq, ::prost::Message)]
388pub struct LanguageRestrictionOptions {
389 #[prost(
391 enumeration = "language_restriction_options::LanguageRestrictionType",
392 tag = "1"
393 )]
394 pub restriction_type: i32,
395 #[prost(string, repeated, tag = "2")]
397 pub language_code: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
398}
399pub mod language_restriction_options {
401 #[derive(
402 Clone,
403 Copy,
404 Debug,
405 PartialEq,
406 Eq,
407 Hash,
408 PartialOrd,
409 Ord,
410 ::prost::Enumeration
411 )]
412 #[repr(i32)]
413 pub enum LanguageRestrictionType {
414 Unspecified = 0,
415 Whitelist = 1,
417 Blacklist = 2,
419 }
420 impl LanguageRestrictionType {
421 pub fn as_str_name(&self) -> &'static str {
426 match self {
427 LanguageRestrictionType::Unspecified => {
428 "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED"
429 }
430 LanguageRestrictionType::Whitelist => "WHITELIST",
431 LanguageRestrictionType::Blacklist => "BLACKLIST",
432 }
433 }
434 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
436 match value {
437 "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
438 "WHITELIST" => Some(Self::Whitelist),
439 "BLACKLIST" => Some(Self::Blacklist),
440 _ => None,
441 }
442 }
443 }
444}
445#[allow(clippy::derive_partial_eq_without_eq)]
446#[derive(Clone, PartialEq, ::prost::Message)]
447pub struct RecognitionModelOptions {
448 #[prost(string, tag = "1")]
451 pub model: ::prost::alloc::string::String,
452 #[prost(message, optional, tag = "2")]
454 pub audio_format: ::core::option::Option<AudioFormatOptions>,
455 #[prost(message, optional, tag = "3")]
457 pub text_normalization: ::core::option::Option<TextNormalizationOptions>,
458 #[prost(message, optional, tag = "4")]
460 pub language_restriction: ::core::option::Option<LanguageRestrictionOptions>,
461 #[prost(enumeration = "recognition_model_options::AudioProcessingType", tag = "5")]
463 pub audio_processing_type: i32,
464}
465pub mod recognition_model_options {
467 #[derive(
468 Clone,
469 Copy,
470 Debug,
471 PartialEq,
472 Eq,
473 Hash,
474 PartialOrd,
475 Ord,
476 ::prost::Enumeration
477 )]
478 #[repr(i32)]
479 pub enum AudioProcessingType {
480 Unspecified = 0,
481 RealTime = 1,
483 FullData = 2,
485 }
486 impl AudioProcessingType {
487 pub fn as_str_name(&self) -> &'static str {
492 match self {
493 AudioProcessingType::Unspecified => "AUDIO_PROCESSING_TYPE_UNSPECIFIED",
494 AudioProcessingType::RealTime => "REAL_TIME",
495 AudioProcessingType::FullData => "FULL_DATA",
496 }
497 }
498 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
500 match value {
501 "AUDIO_PROCESSING_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
502 "REAL_TIME" => Some(Self::RealTime),
503 "FULL_DATA" => Some(Self::FullData),
504 _ => None,
505 }
506 }
507 }
508}
509#[allow(clippy::derive_partial_eq_without_eq)]
510#[derive(Clone, PartialEq, ::prost::Message)]
511pub struct SpeakerLabelingOptions {
512 #[prost(enumeration = "speaker_labeling_options::SpeakerLabeling", tag = "1")]
514 pub speaker_labeling: i32,
515}
516pub mod speaker_labeling_options {
518 #[derive(
519 Clone,
520 Copy,
521 Debug,
522 PartialEq,
523 Eq,
524 Hash,
525 PartialOrd,
526 Ord,
527 ::prost::Enumeration
528 )]
529 #[repr(i32)]
530 pub enum SpeakerLabeling {
531 Unspecified = 0,
532 Enabled = 1,
534 Disabled = 2,
536 }
537 impl SpeakerLabeling {
538 pub fn as_str_name(&self) -> &'static str {
543 match self {
544 SpeakerLabeling::Unspecified => "SPEAKER_LABELING_UNSPECIFIED",
545 SpeakerLabeling::Enabled => "SPEAKER_LABELING_ENABLED",
546 SpeakerLabeling::Disabled => "SPEAKER_LABELING_DISABLED",
547 }
548 }
549 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
551 match value {
552 "SPEAKER_LABELING_UNSPECIFIED" => Some(Self::Unspecified),
553 "SPEAKER_LABELING_ENABLED" => Some(Self::Enabled),
554 "SPEAKER_LABELING_DISABLED" => Some(Self::Disabled),
555 _ => None,
556 }
557 }
558 }
559}
560#[allow(clippy::derive_partial_eq_without_eq)]
561#[derive(Clone, PartialEq, ::prost::Message)]
562pub struct StreamingOptions {
563 #[prost(message, optional, tag = "1")]
565 pub recognition_model: ::core::option::Option<RecognitionModelOptions>,
566 #[prost(message, optional, tag = "2")]
568 pub eou_classifier: ::core::option::Option<EouClassifierOptions>,
569 #[prost(message, optional, tag = "3")]
571 pub recognition_classifier: ::core::option::Option<RecognitionClassifierOptions>,
572 #[prost(message, optional, tag = "4")]
574 pub speech_analysis: ::core::option::Option<SpeechAnalysisOptions>,
575 #[prost(message, optional, tag = "5")]
577 pub speaker_labeling: ::core::option::Option<SpeakerLabelingOptions>,
578}
579#[allow(clippy::derive_partial_eq_without_eq)]
581#[derive(Clone, PartialEq, ::prost::Message)]
582pub struct AudioChunk {
583 #[prost(bytes = "vec", tag = "1")]
585 pub data: ::prost::alloc::vec::Vec<u8>,
586}
587#[allow(clippy::derive_partial_eq_without_eq)]
589#[derive(Clone, PartialEq, ::prost::Message)]
590pub struct SilenceChunk {
591 #[prost(int64, tag = "1")]
593 pub duration_ms: i64,
594}
595#[allow(clippy::derive_partial_eq_without_eq)]
597#[derive(Clone, PartialEq, ::prost::Message)]
598pub struct Eou {}
599#[allow(clippy::derive_partial_eq_without_eq)]
604#[derive(Clone, PartialEq, ::prost::Message)]
605pub struct StreamingRequest {
606 #[prost(oneof = "streaming_request::Event", tags = "1, 2, 3, 4")]
607 pub event: ::core::option::Option<streaming_request::Event>,
608}
609pub mod streaming_request {
611 #[allow(clippy::derive_partial_eq_without_eq)]
612 #[derive(Clone, PartialEq, ::prost::Oneof)]
613 pub enum Event {
614 #[prost(message, tag = "1")]
616 SessionOptions(super::StreamingOptions),
617 #[prost(message, tag = "2")]
619 Chunk(super::AudioChunk),
620 #[prost(message, tag = "3")]
622 SilenceChunk(super::SilenceChunk),
623 #[prost(message, tag = "4")]
625 Eou(super::Eou),
626 }
627}
628#[allow(clippy::derive_partial_eq_without_eq)]
629#[derive(Clone, PartialEq, ::prost::Message)]
630pub struct RecognizeFileRequest {
631 #[prost(message, optional, tag = "3")]
633 pub recognition_model: ::core::option::Option<RecognitionModelOptions>,
634 #[prost(message, optional, tag = "4")]
636 pub recognition_classifier: ::core::option::Option<RecognitionClassifierOptions>,
637 #[prost(message, optional, tag = "5")]
639 pub speech_analysis: ::core::option::Option<SpeechAnalysisOptions>,
640 #[prost(message, optional, tag = "6")]
642 pub speaker_labeling: ::core::option::Option<SpeakerLabelingOptions>,
643 #[prost(oneof = "recognize_file_request::AudioSource", tags = "1, 2")]
644 pub audio_source: ::core::option::Option<recognize_file_request::AudioSource>,
645}
646pub mod recognize_file_request {
648 #[allow(clippy::derive_partial_eq_without_eq)]
649 #[derive(Clone, PartialEq, ::prost::Oneof)]
650 pub enum AudioSource {
651 #[prost(bytes, tag = "1")]
653 Content(::prost::alloc::vec::Vec<u8>),
654 #[prost(string, tag = "2")]
656 Uri(::prost::alloc::string::String),
657 }
658}
659#[allow(clippy::derive_partial_eq_without_eq)]
661#[derive(Clone, PartialEq, ::prost::Message)]
662pub struct Word {
663 #[prost(string, tag = "1")]
665 pub text: ::prost::alloc::string::String,
666 #[prost(int64, tag = "2")]
668 pub start_time_ms: i64,
669 #[prost(int64, tag = "3")]
671 pub end_time_ms: i64,
672}
673#[allow(clippy::derive_partial_eq_without_eq)]
675#[derive(Clone, PartialEq, ::prost::Message)]
676pub struct LanguageEstimation {
677 #[prost(string, tag = "1")]
679 pub language_code: ::prost::alloc::string::String,
680 #[prost(double, tag = "2")]
682 pub probability: f64,
683}
684#[allow(clippy::derive_partial_eq_without_eq)]
686#[derive(Clone, PartialEq, ::prost::Message)]
687pub struct Alternative {
688 #[prost(message, repeated, tag = "1")]
690 pub words: ::prost::alloc::vec::Vec<Word>,
691 #[prost(string, tag = "2")]
693 pub text: ::prost::alloc::string::String,
694 #[prost(int64, tag = "3")]
696 pub start_time_ms: i64,
697 #[prost(int64, tag = "4")]
699 pub end_time_ms: i64,
700 #[prost(double, tag = "5")]
702 pub confidence: f64,
703 #[prost(message, repeated, tag = "6")]
705 pub languages: ::prost::alloc::vec::Vec<LanguageEstimation>,
706}
707#[allow(clippy::derive_partial_eq_without_eq)]
709#[derive(Clone, PartialEq, ::prost::Message)]
710pub struct EouUpdate {
711 #[prost(int64, tag = "2")]
713 pub time_ms: i64,
714}
715#[allow(clippy::derive_partial_eq_without_eq)]
717#[derive(Clone, PartialEq, ::prost::Message)]
718pub struct AlternativeUpdate {
719 #[prost(message, repeated, tag = "1")]
721 pub alternatives: ::prost::alloc::vec::Vec<Alternative>,
722 #[deprecated]
723 #[prost(string, tag = "2")]
724 pub channel_tag: ::prost::alloc::string::String,
725}
726#[allow(clippy::derive_partial_eq_without_eq)]
728#[derive(Clone, PartialEq, ::prost::Message)]
729pub struct AudioCursors {
730 #[prost(int64, tag = "1")]
732 pub received_data_ms: i64,
733 #[prost(int64, tag = "2")]
735 pub reset_time_ms: i64,
736 #[prost(int64, tag = "3")]
739 pub partial_time_ms: i64,
740 #[prost(int64, tag = "4")]
743 pub final_time_ms: i64,
744 #[prost(int64, tag = "5")]
746 pub final_index: i64,
747 #[prost(int64, tag = "6")]
751 pub eou_time_ms: i64,
752}
753#[allow(clippy::derive_partial_eq_without_eq)]
755#[derive(Clone, PartialEq, ::prost::Message)]
756pub struct FinalRefinement {
757 #[prost(int64, tag = "1")]
759 pub final_index: i64,
760 #[prost(oneof = "final_refinement::Type", tags = "2")]
762 pub r#type: ::core::option::Option<final_refinement::Type>,
763}
764pub mod final_refinement {
766 #[allow(clippy::derive_partial_eq_without_eq)]
768 #[derive(Clone, PartialEq, ::prost::Oneof)]
769 pub enum Type {
770 #[prost(message, tag = "2")]
772 NormalizedText(super::AlternativeUpdate),
773 }
774}
775#[allow(clippy::derive_partial_eq_without_eq)]
777#[derive(Clone, PartialEq, ::prost::Message)]
778pub struct StatusCode {
779 #[prost(enumeration = "CodeType", tag = "1")]
781 pub code_type: i32,
782 #[prost(string, tag = "2")]
784 pub message: ::prost::alloc::string::String,
785}
786#[allow(clippy::derive_partial_eq_without_eq)]
788#[derive(Clone, PartialEq, ::prost::Message)]
789pub struct SessionUuid {
790 #[prost(string, tag = "1")]
792 pub uuid: ::prost::alloc::string::String,
793 #[prost(string, tag = "2")]
795 pub user_request_id: ::prost::alloc::string::String,
796}
797#[allow(clippy::derive_partial_eq_without_eq)]
798#[derive(Clone, PartialEq, ::prost::Message)]
799pub struct PhraseHighlight {
800 #[prost(string, tag = "1")]
802 pub text: ::prost::alloc::string::String,
803 #[prost(int64, tag = "2")]
805 pub start_time_ms: i64,
806 #[prost(int64, tag = "3")]
808 pub end_time_ms: i64,
809}
810#[allow(clippy::derive_partial_eq_without_eq)]
811#[derive(Clone, PartialEq, ::prost::Message)]
812pub struct RecognitionClassifierLabel {
813 #[prost(string, tag = "1")]
815 pub label: ::prost::alloc::string::String,
816 #[prost(double, tag = "2")]
818 pub confidence: f64,
819}
820#[allow(clippy::derive_partial_eq_without_eq)]
821#[derive(Clone, PartialEq, ::prost::Message)]
822pub struct RecognitionClassifierResult {
823 #[prost(string, tag = "1")]
825 pub classifier: ::prost::alloc::string::String,
826 #[prost(message, repeated, tag = "2")]
828 pub highlights: ::prost::alloc::vec::Vec<PhraseHighlight>,
829 #[prost(message, repeated, tag = "3")]
831 pub labels: ::prost::alloc::vec::Vec<RecognitionClassifierLabel>,
832}
833#[allow(clippy::derive_partial_eq_without_eq)]
834#[derive(Clone, PartialEq, ::prost::Message)]
835pub struct RecognitionClassifierUpdate {
836 #[prost(enumeration = "recognition_classifier_update::WindowType", tag = "1")]
838 pub window_type: i32,
839 #[prost(int64, tag = "2")]
841 pub start_time_ms: i64,
842 #[prost(int64, tag = "3")]
844 pub end_time_ms: i64,
845 #[prost(message, optional, tag = "4")]
847 pub classifier_result: ::core::option::Option<RecognitionClassifierResult>,
848}
849pub mod recognition_classifier_update {
851 #[derive(
852 Clone,
853 Copy,
854 Debug,
855 PartialEq,
856 Eq,
857 Hash,
858 PartialOrd,
859 Ord,
860 ::prost::Enumeration
861 )]
862 #[repr(i32)]
863 pub enum WindowType {
864 Unspecified = 0,
865 LastUtterance = 1,
867 LastFinal = 2,
869 LastPartial = 3,
871 }
872 impl WindowType {
873 pub fn as_str_name(&self) -> &'static str {
878 match self {
879 WindowType::Unspecified => "WINDOW_TYPE_UNSPECIFIED",
880 WindowType::LastUtterance => "LAST_UTTERANCE",
881 WindowType::LastFinal => "LAST_FINAL",
882 WindowType::LastPartial => "LAST_PARTIAL",
883 }
884 }
885 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
887 match value {
888 "WINDOW_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
889 "LAST_UTTERANCE" => Some(Self::LastUtterance),
890 "LAST_FINAL" => Some(Self::LastFinal),
891 "LAST_PARTIAL" => Some(Self::LastPartial),
892 _ => None,
893 }
894 }
895 }
896}
897#[allow(clippy::derive_partial_eq_without_eq)]
898#[derive(Clone, PartialEq, ::prost::Message)]
899pub struct DescriptiveStatistics {
900 #[prost(double, tag = "1")]
902 pub min: f64,
903 #[prost(double, tag = "2")]
905 pub max: f64,
906 #[prost(double, tag = "3")]
908 pub mean: f64,
909 #[prost(double, tag = "4")]
911 pub std: f64,
912 #[prost(message, repeated, tag = "5")]
914 pub quantiles: ::prost::alloc::vec::Vec<descriptive_statistics::Quantile>,
915}
916pub mod descriptive_statistics {
918 #[allow(clippy::derive_partial_eq_without_eq)]
919 #[derive(Clone, PartialEq, ::prost::Message)]
920 pub struct Quantile {
921 #[prost(double, tag = "1")]
923 pub level: f64,
924 #[prost(double, tag = "2")]
926 pub value: f64,
927 }
928}
929#[allow(clippy::derive_partial_eq_without_eq)]
930#[derive(Clone, PartialEq, ::prost::Message)]
931pub struct AudioSegmentBoundaries {
932 #[prost(int64, tag = "1")]
934 pub start_time_ms: i64,
935 #[prost(int64, tag = "2")]
937 pub end_time_ms: i64,
938}
939#[allow(clippy::derive_partial_eq_without_eq)]
940#[derive(Clone, PartialEq, ::prost::Message)]
941pub struct SpeakerAnalysis {
942 #[prost(string, tag = "1")]
944 pub speaker_tag: ::prost::alloc::string::String,
945 #[prost(enumeration = "speaker_analysis::WindowType", tag = "2")]
947 pub window_type: i32,
948 #[prost(message, optional, tag = "3")]
950 pub speech_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
951 #[prost(int64, tag = "4")]
953 pub total_speech_ms: i64,
954 #[prost(double, tag = "5")]
956 pub speech_ratio: f64,
957 #[prost(int64, tag = "6")]
959 pub total_silence_ms: i64,
960 #[prost(double, tag = "7")]
962 pub silence_ratio: f64,
963 #[prost(int64, tag = "8")]
965 pub words_count: i64,
966 #[prost(int64, tag = "9")]
968 pub letters_count: i64,
969 #[prost(message, optional, tag = "10")]
971 pub words_per_second: ::core::option::Option<DescriptiveStatistics>,
972 #[prost(message, optional, tag = "11")]
974 pub letters_per_second: ::core::option::Option<DescriptiveStatistics>,
975 #[prost(message, optional, tag = "12")]
977 pub words_per_utterance: ::core::option::Option<DescriptiveStatistics>,
978 #[prost(message, optional, tag = "13")]
980 pub letters_per_utterance: ::core::option::Option<DescriptiveStatistics>,
981 #[prost(int64, tag = "14")]
983 pub utterance_count: i64,
984 #[prost(message, optional, tag = "15")]
986 pub utterance_duration_estimation: ::core::option::Option<DescriptiveStatistics>,
987}
988pub mod speaker_analysis {
990 #[derive(
991 Clone,
992 Copy,
993 Debug,
994 PartialEq,
995 Eq,
996 Hash,
997 PartialOrd,
998 Ord,
999 ::prost::Enumeration
1000 )]
1001 #[repr(i32)]
1002 pub enum WindowType {
1003 Unspecified = 0,
1004 Total = 1,
1006 LastUtterance = 2,
1008 }
1009 impl WindowType {
1010 pub fn as_str_name(&self) -> &'static str {
1015 match self {
1016 WindowType::Unspecified => "WINDOW_TYPE_UNSPECIFIED",
1017 WindowType::Total => "TOTAL",
1018 WindowType::LastUtterance => "LAST_UTTERANCE",
1019 }
1020 }
1021 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1023 match value {
1024 "WINDOW_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1025 "TOTAL" => Some(Self::Total),
1026 "LAST_UTTERANCE" => Some(Self::LastUtterance),
1027 _ => None,
1028 }
1029 }
1030 }
1031}
1032#[allow(clippy::derive_partial_eq_without_eq)]
1033#[derive(Clone, PartialEq, ::prost::Message)]
1034pub struct ConversationAnalysis {
1035 #[prost(message, optional, tag = "1")]
1037 pub conversation_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
1038 #[prost(int64, tag = "2")]
1040 pub total_simultaneous_silence_duration_ms: i64,
1041 #[prost(double, tag = "3")]
1043 pub total_simultaneous_silence_ratio: f64,
1044 #[prost(message, optional, tag = "4")]
1046 pub simultaneous_silence_duration_estimation: ::core::option::Option<
1047 DescriptiveStatistics,
1048 >,
1049 #[prost(int64, tag = "5")]
1051 pub total_simultaneous_speech_duration_ms: i64,
1052 #[prost(double, tag = "6")]
1054 pub total_simultaneous_speech_ratio: f64,
1055 #[prost(message, optional, tag = "7")]
1057 pub simultaneous_speech_duration_estimation: ::core::option::Option<
1058 DescriptiveStatistics,
1059 >,
1060 #[prost(message, repeated, tag = "8")]
1062 pub speaker_interrupts: ::prost::alloc::vec::Vec<
1063 conversation_analysis::InterruptsEvaluation,
1064 >,
1065 #[prost(int64, tag = "9")]
1067 pub total_speech_duration_ms: i64,
1068 #[prost(double, tag = "10")]
1070 pub total_speech_ratio: f64,
1071}
1072pub mod conversation_analysis {
1074 #[allow(clippy::derive_partial_eq_without_eq)]
1075 #[derive(Clone, PartialEq, ::prost::Message)]
1076 pub struct InterruptsEvaluation {
1077 #[prost(string, tag = "1")]
1079 pub speaker_tag: ::prost::alloc::string::String,
1080 #[prost(int64, tag = "2")]
1082 pub interrupts_count: i64,
1083 #[prost(int64, tag = "3")]
1085 pub interrupts_duration_ms: i64,
1086 #[prost(message, repeated, tag = "4")]
1088 pub interrupts: ::prost::alloc::vec::Vec<super::AudioSegmentBoundaries>,
1089 }
1090}
1091#[allow(clippy::derive_partial_eq_without_eq)]
1096#[derive(Clone, PartialEq, ::prost::Message)]
1097pub struct StreamingResponse {
1098 #[prost(message, optional, tag = "1")]
1100 pub session_uuid: ::core::option::Option<SessionUuid>,
1101 #[prost(message, optional, tag = "2")]
1103 pub audio_cursors: ::core::option::Option<AudioCursors>,
1104 #[prost(int64, tag = "3")]
1106 pub response_wall_time_ms: i64,
1107 #[prost(string, tag = "9")]
1109 pub channel_tag: ::prost::alloc::string::String,
1110 #[prost(oneof = "streaming_response::Event", tags = "4, 5, 6, 7, 8, 10, 11, 12")]
1111 pub event: ::core::option::Option<streaming_response::Event>,
1112}
1113pub mod streaming_response {
1115 #[allow(clippy::derive_partial_eq_without_eq)]
1116 #[derive(Clone, PartialEq, ::prost::Oneof)]
1117 pub enum Event {
1118 #[prost(message, tag = "4")]
1121 Partial(super::AlternativeUpdate),
1122 #[prost(message, tag = "5")]
1124 Final(super::AlternativeUpdate),
1125 #[prost(message, tag = "6")]
1128 EouUpdate(super::EouUpdate),
1129 #[prost(message, tag = "7")]
1132 FinalRefinement(super::FinalRefinement),
1133 #[prost(message, tag = "8")]
1135 StatusCode(super::StatusCode),
1136 #[prost(message, tag = "10")]
1138 ClassifierUpdate(super::RecognitionClassifierUpdate),
1139 #[prost(message, tag = "11")]
1141 SpeakerAnalysis(super::SpeakerAnalysis),
1142 #[prost(message, tag = "12")]
1144 ConversationAnalysis(super::ConversationAnalysis),
1145 }
1146}
1147#[allow(clippy::derive_partial_eq_without_eq)]
1148#[derive(Clone, PartialEq, ::prost::Message)]
1149pub struct DeleteRecognitionRequest {
1150 #[prost(string, tag = "1")]
1151 pub operation_id: ::prost::alloc::string::String,
1152}
1153#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1154#[repr(i32)]
1155pub enum CodeType {
1156 Unspecified = 0,
1157 Working = 1,
1159 Warning = 2,
1161 Closed = 3,
1163}
1164impl CodeType {
1165 pub fn as_str_name(&self) -> &'static str {
1170 match self {
1171 CodeType::Unspecified => "CODE_TYPE_UNSPECIFIED",
1172 CodeType::Working => "WORKING",
1173 CodeType::Warning => "WARNING",
1174 CodeType::Closed => "CLOSED",
1175 }
1176 }
1177 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1179 match value {
1180 "CODE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1181 "WORKING" => Some(Self::Working),
1182 "WARNING" => Some(Self::Warning),
1183 "CLOSED" => Some(Self::Closed),
1184 _ => None,
1185 }
1186 }
1187}
1188#[allow(clippy::derive_partial_eq_without_eq)]
1189#[derive(Clone, PartialEq, ::prost::Message)]
1190pub struct GetRecognitionRequest {
1191 #[prost(string, tag = "1")]
1192 pub operation_id: ::prost::alloc::string::String,
1193}
1194pub mod recognizer_client {
1196 #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
1197 use tonic::codegen::*;
1198 use tonic::codegen::http::Uri;
1199 #[derive(Debug, Clone)]
1201 pub struct RecognizerClient<T> {
1202 inner: tonic::client::Grpc<T>,
1203 }
1204 impl RecognizerClient<tonic::transport::Channel> {
1205 pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1207 where
1208 D: TryInto<tonic::transport::Endpoint>,
1209 D::Error: Into<StdError>,
1210 {
1211 let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1212 Ok(Self::new(conn))
1213 }
1214 }
1215 impl<T> RecognizerClient<T>
1216 where
1217 T: tonic::client::GrpcService<tonic::body::BoxBody>,
1218 T::Error: Into<StdError>,
1219 T::ResponseBody: Body<Data = Bytes> + Send + 'static,
1220 <T::ResponseBody as Body>::Error: Into<StdError> + Send,
1221 {
1222 pub fn new(inner: T) -> Self {
1223 let inner = tonic::client::Grpc::new(inner);
1224 Self { inner }
1225 }
1226 pub fn with_origin(inner: T, origin: Uri) -> Self {
1227 let inner = tonic::client::Grpc::with_origin(inner, origin);
1228 Self { inner }
1229 }
1230 pub fn with_interceptor<F>(
1231 inner: T,
1232 interceptor: F,
1233 ) -> RecognizerClient<InterceptedService<T, F>>
1234 where
1235 F: tonic::service::Interceptor,
1236 T::ResponseBody: Default,
1237 T: tonic::codegen::Service<
1238 http::Request<tonic::body::BoxBody>,
1239 Response = http::Response<
1240 <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
1241 >,
1242 >,
1243 <T as tonic::codegen::Service<
1244 http::Request<tonic::body::BoxBody>,
1245 >>::Error: Into<StdError> + Send + Sync,
1246 {
1247 RecognizerClient::new(InterceptedService::new(inner, interceptor))
1248 }
1249 #[must_use]
1254 pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1255 self.inner = self.inner.send_compressed(encoding);
1256 self
1257 }
1258 #[must_use]
1260 pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1261 self.inner = self.inner.accept_compressed(encoding);
1262 self
1263 }
1264 #[must_use]
1268 pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1269 self.inner = self.inner.max_decoding_message_size(limit);
1270 self
1271 }
1272 #[must_use]
1276 pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1277 self.inner = self.inner.max_encoding_message_size(limit);
1278 self
1279 }
1280 pub async fn recognize_streaming(
1282 &mut self,
1283 request: impl tonic::IntoStreamingRequest<Message = super::StreamingRequest>,
1284 ) -> std::result::Result<
1285 tonic::Response<tonic::codec::Streaming<super::StreamingResponse>>,
1286 tonic::Status,
1287 > {
1288 self.inner
1289 .ready()
1290 .await
1291 .map_err(|e| {
1292 tonic::Status::new(
1293 tonic::Code::Unknown,
1294 format!("Service was not ready: {}", e.into()),
1295 )
1296 })?;
1297 let codec = tonic::codec::ProstCodec::default();
1298 let path = http::uri::PathAndQuery::from_static(
1299 "/speechkit.stt.v3.Recognizer/RecognizeStreaming",
1300 );
1301 let mut req = request.into_streaming_request();
1302 req.extensions_mut()
1303 .insert(
1304 GrpcMethod::new("speechkit.stt.v3.Recognizer", "RecognizeStreaming"),
1305 );
1306 self.inner.streaming(req, path, codec).await
1307 }
1308 }
1309}
1310pub mod async_recognizer_client {
1312 #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
1313 use tonic::codegen::*;
1314 use tonic::codegen::http::Uri;
1315 #[derive(Debug, Clone)]
1317 pub struct AsyncRecognizerClient<T> {
1318 inner: tonic::client::Grpc<T>,
1319 }
1320 impl AsyncRecognizerClient<tonic::transport::Channel> {
1321 pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1323 where
1324 D: TryInto<tonic::transport::Endpoint>,
1325 D::Error: Into<StdError>,
1326 {
1327 let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1328 Ok(Self::new(conn))
1329 }
1330 }
1331 impl<T> AsyncRecognizerClient<T>
1332 where
1333 T: tonic::client::GrpcService<tonic::body::BoxBody>,
1334 T::Error: Into<StdError>,
1335 T::ResponseBody: Body<Data = Bytes> + Send + 'static,
1336 <T::ResponseBody as Body>::Error: Into<StdError> + Send,
1337 {
1338 pub fn new(inner: T) -> Self {
1339 let inner = tonic::client::Grpc::new(inner);
1340 Self { inner }
1341 }
1342 pub fn with_origin(inner: T, origin: Uri) -> Self {
1343 let inner = tonic::client::Grpc::with_origin(inner, origin);
1344 Self { inner }
1345 }
1346 pub fn with_interceptor<F>(
1347 inner: T,
1348 interceptor: F,
1349 ) -> AsyncRecognizerClient<InterceptedService<T, F>>
1350 where
1351 F: tonic::service::Interceptor,
1352 T::ResponseBody: Default,
1353 T: tonic::codegen::Service<
1354 http::Request<tonic::body::BoxBody>,
1355 Response = http::Response<
1356 <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
1357 >,
1358 >,
1359 <T as tonic::codegen::Service<
1360 http::Request<tonic::body::BoxBody>,
1361 >>::Error: Into<StdError> + Send + Sync,
1362 {
1363 AsyncRecognizerClient::new(InterceptedService::new(inner, interceptor))
1364 }
1365 #[must_use]
1370 pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1371 self.inner = self.inner.send_compressed(encoding);
1372 self
1373 }
1374 #[must_use]
1376 pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1377 self.inner = self.inner.accept_compressed(encoding);
1378 self
1379 }
1380 #[must_use]
1384 pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1385 self.inner = self.inner.max_decoding_message_size(limit);
1386 self
1387 }
1388 #[must_use]
1392 pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1393 self.inner = self.inner.max_encoding_message_size(limit);
1394 self
1395 }
1396 pub async fn recognize_file(
1397 &mut self,
1398 request: impl tonic::IntoRequest<super::RecognizeFileRequest>,
1399 ) -> std::result::Result<
1400 tonic::Response<
1401 super::super::super::super::yandex::cloud::operation::Operation,
1402 >,
1403 tonic::Status,
1404 > {
1405 self.inner
1406 .ready()
1407 .await
1408 .map_err(|e| {
1409 tonic::Status::new(
1410 tonic::Code::Unknown,
1411 format!("Service was not ready: {}", e.into()),
1412 )
1413 })?;
1414 let codec = tonic::codec::ProstCodec::default();
1415 let path = http::uri::PathAndQuery::from_static(
1416 "/speechkit.stt.v3.AsyncRecognizer/RecognizeFile",
1417 );
1418 let mut req = request.into_request();
1419 req.extensions_mut()
1420 .insert(
1421 GrpcMethod::new("speechkit.stt.v3.AsyncRecognizer", "RecognizeFile"),
1422 );
1423 self.inner.unary(req, path, codec).await
1424 }
1425 pub async fn get_recognition(
1426 &mut self,
1427 request: impl tonic::IntoRequest<super::GetRecognitionRequest>,
1428 ) -> std::result::Result<
1429 tonic::Response<tonic::codec::Streaming<super::StreamingResponse>>,
1430 tonic::Status,
1431 > {
1432 self.inner
1433 .ready()
1434 .await
1435 .map_err(|e| {
1436 tonic::Status::new(
1437 tonic::Code::Unknown,
1438 format!("Service was not ready: {}", e.into()),
1439 )
1440 })?;
1441 let codec = tonic::codec::ProstCodec::default();
1442 let path = http::uri::PathAndQuery::from_static(
1443 "/speechkit.stt.v3.AsyncRecognizer/GetRecognition",
1444 );
1445 let mut req = request.into_request();
1446 req.extensions_mut()
1447 .insert(
1448 GrpcMethod::new("speechkit.stt.v3.AsyncRecognizer", "GetRecognition"),
1449 );
1450 self.inner.server_streaming(req, path, codec).await
1451 }
1452 pub async fn delete_recognition(
1453 &mut self,
1454 request: impl tonic::IntoRequest<super::DeleteRecognitionRequest>,
1455 ) -> std::result::Result<tonic::Response<()>, tonic::Status> {
1456 self.inner
1457 .ready()
1458 .await
1459 .map_err(|e| {
1460 tonic::Status::new(
1461 tonic::Code::Unknown,
1462 format!("Service was not ready: {}", e.into()),
1463 )
1464 })?;
1465 let codec = tonic::codec::ProstCodec::default();
1466 let path = http::uri::PathAndQuery::from_static(
1467 "/speechkit.stt.v3.AsyncRecognizer/DeleteRecognition",
1468 );
1469 let mut req = request.into_request();
1470 req.extensions_mut()
1471 .insert(
1472 GrpcMethod::new(
1473 "speechkit.stt.v3.AsyncRecognizer",
1474 "DeleteRecognition",
1475 ),
1476 );
1477 self.inner.unary(req, path, codec).await
1478 }
1479 }
1480}