yandex_cloud/
speechkit.stt.v3.rs

1/// Options
2#[allow(clippy::derive_partial_eq_without_eq)]
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct TextNormalizationOptions {
5    #[prost(enumeration = "text_normalization_options::TextNormalization", tag = "1")]
6    pub text_normalization: i32,
7    /// Profanity filter (default: false).
8    #[prost(bool, tag = "2")]
9    pub profanity_filter: bool,
10    /// Rewrite text in literature style (default: false).
11    #[prost(bool, tag = "3")]
12    pub literature_text: bool,
13    /// Define phone formatting mode
14    #[prost(enumeration = "text_normalization_options::PhoneFormattingMode", tag = "4")]
15    pub phone_formatting_mode: i32,
16}
17/// Nested message and enum types in `TextNormalizationOptions`.
18pub mod text_normalization_options {
19    /// Normalization
20    #[derive(
21        Clone,
22        Copy,
23        Debug,
24        PartialEq,
25        Eq,
26        Hash,
27        PartialOrd,
28        Ord,
29        ::prost::Enumeration
30    )]
31    #[repr(i32)]
32    pub enum TextNormalization {
33        Unspecified = 0,
34        /// Enable normalization
35        Enabled = 1,
36        /// Disable normalization
37        Disabled = 2,
38    }
39    impl TextNormalization {
40        /// String value of the enum field names used in the ProtoBuf definition.
41        ///
42        /// The values are not transformed in any way and thus are considered stable
43        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
44        pub fn as_str_name(&self) -> &'static str {
45            match self {
46                TextNormalization::Unspecified => "TEXT_NORMALIZATION_UNSPECIFIED",
47                TextNormalization::Enabled => "TEXT_NORMALIZATION_ENABLED",
48                TextNormalization::Disabled => "TEXT_NORMALIZATION_DISABLED",
49            }
50        }
51        /// Creates an enum from field names used in the ProtoBuf definition.
52        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
53            match value {
54                "TEXT_NORMALIZATION_UNSPECIFIED" => Some(Self::Unspecified),
55                "TEXT_NORMALIZATION_ENABLED" => Some(Self::Enabled),
56                "TEXT_NORMALIZATION_DISABLED" => Some(Self::Disabled),
57                _ => None,
58            }
59        }
60    }
61    #[derive(
62        Clone,
63        Copy,
64        Debug,
65        PartialEq,
66        Eq,
67        Hash,
68        PartialOrd,
69        Ord,
70        ::prost::Enumeration
71    )]
72    #[repr(i32)]
73    pub enum PhoneFormattingMode {
74        Unspecified = 0,
75        /// Disable phone formatting
76        Disabled = 1,
77    }
78    impl PhoneFormattingMode {
79        /// String value of the enum field names used in the ProtoBuf definition.
80        ///
81        /// The values are not transformed in any way and thus are considered stable
82        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
83        pub fn as_str_name(&self) -> &'static str {
84            match self {
85                PhoneFormattingMode::Unspecified => "PHONE_FORMATTING_MODE_UNSPECIFIED",
86                PhoneFormattingMode::Disabled => "PHONE_FORMATTING_MODE_DISABLED",
87            }
88        }
89        /// Creates an enum from field names used in the ProtoBuf definition.
90        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
91            match value {
92                "PHONE_FORMATTING_MODE_UNSPECIFIED" => Some(Self::Unspecified),
93                "PHONE_FORMATTING_MODE_DISABLED" => Some(Self::Disabled),
94                _ => None,
95            }
96        }
97    }
98}
99#[allow(clippy::derive_partial_eq_without_eq)]
100#[derive(Clone, PartialEq, ::prost::Message)]
101pub struct DefaultEouClassifier {
102    /// EOU sensitivity. Currently two levels, faster with more error and more conservative (our default).
103    #[prost(enumeration = "default_eou_classifier::EouSensitivity", tag = "1")]
104    pub r#type: i32,
105    /// Hint for max pause between words. Our EOU detector could use this information to distinguish between end of utterance and slow speech (like one <long pause> two <long pause> three, etc).
106    #[prost(int64, tag = "2")]
107    pub max_pause_between_words_hint_ms: i64,
108}
109/// Nested message and enum types in `DefaultEouClassifier`.
110pub mod default_eou_classifier {
111    #[derive(
112        Clone,
113        Copy,
114        Debug,
115        PartialEq,
116        Eq,
117        Hash,
118        PartialOrd,
119        Ord,
120        ::prost::Enumeration
121    )]
122    #[repr(i32)]
123    pub enum EouSensitivity {
124        Unspecified = 0,
125        Default = 1,
126        High = 2,
127    }
128    impl EouSensitivity {
129        /// String value of the enum field names used in the ProtoBuf definition.
130        ///
131        /// The values are not transformed in any way and thus are considered stable
132        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
133        pub fn as_str_name(&self) -> &'static str {
134            match self {
135                EouSensitivity::Unspecified => "EOU_SENSITIVITY_UNSPECIFIED",
136                EouSensitivity::Default => "DEFAULT",
137                EouSensitivity::High => "HIGH",
138            }
139        }
140        /// Creates an enum from field names used in the ProtoBuf definition.
141        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
142            match value {
143                "EOU_SENSITIVITY_UNSPECIFIED" => Some(Self::Unspecified),
144                "DEFAULT" => Some(Self::Default),
145                "HIGH" => Some(Self::High),
146                _ => None,
147            }
148        }
149    }
150}
151/// Use EOU provided by user
152#[allow(clippy::derive_partial_eq_without_eq)]
153#[derive(Clone, PartialEq, ::prost::Message)]
154pub struct ExternalEouClassifier {}
155#[allow(clippy::derive_partial_eq_without_eq)]
156#[derive(Clone, PartialEq, ::prost::Message)]
157pub struct EouClassifierOptions {
158    /// Type of EOU classifier.
159    #[prost(oneof = "eou_classifier_options::Classifier", tags = "1, 2")]
160    pub classifier: ::core::option::Option<eou_classifier_options::Classifier>,
161}
162/// Nested message and enum types in `EouClassifierOptions`.
163pub mod eou_classifier_options {
164    /// Type of EOU classifier.
165    #[allow(clippy::derive_partial_eq_without_eq)]
166    #[derive(Clone, PartialEq, ::prost::Oneof)]
167    pub enum Classifier {
168        /// EOU classifier provided by SpeechKit. Default.
169        #[prost(message, tag = "1")]
170        DefaultClassifier(super::DefaultEouClassifier),
171        /// EOU is enforced by external messages from user.
172        #[prost(message, tag = "2")]
173        ExternalClassifier(super::ExternalEouClassifier),
174    }
175}
176#[allow(clippy::derive_partial_eq_without_eq)]
177#[derive(Clone, PartialEq, ::prost::Message)]
178pub struct RecognitionClassifier {
179    /// Classifier name
180    #[prost(string, tag = "1")]
181    pub classifier: ::prost::alloc::string::String,
182    /// Describes the types of responses to which the classification results will come
183    #[prost(enumeration = "recognition_classifier::TriggerType", repeated, tag = "2")]
184    pub triggers: ::prost::alloc::vec::Vec<i32>,
185}
186/// Nested message and enum types in `RecognitionClassifier`.
187pub mod recognition_classifier {
188    /// Type of recognition classifier trigger.
189    #[derive(
190        Clone,
191        Copy,
192        Debug,
193        PartialEq,
194        Eq,
195        Hash,
196        PartialOrd,
197        Ord,
198        ::prost::Enumeration
199    )]
200    #[repr(i32)]
201    pub enum TriggerType {
202        Unspecified = 0,
203        /// Apply classifier to utterance responses
204        OnUtterance = 1,
205        /// Apply classifier to final responses
206        OnFinal = 2,
207        /// Apply classifier to partial responses
208        OnPartial = 3,
209    }
210    impl TriggerType {
211        /// String value of the enum field names used in the ProtoBuf definition.
212        ///
213        /// The values are not transformed in any way and thus are considered stable
214        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
215        pub fn as_str_name(&self) -> &'static str {
216            match self {
217                TriggerType::Unspecified => "TRIGGER_TYPE_UNSPECIFIED",
218                TriggerType::OnUtterance => "ON_UTTERANCE",
219                TriggerType::OnFinal => "ON_FINAL",
220                TriggerType::OnPartial => "ON_PARTIAL",
221            }
222        }
223        /// Creates an enum from field names used in the ProtoBuf definition.
224        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
225            match value {
226                "TRIGGER_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
227                "ON_UTTERANCE" => Some(Self::OnUtterance),
228                "ON_FINAL" => Some(Self::OnFinal),
229                "ON_PARTIAL" => Some(Self::OnPartial),
230                _ => None,
231            }
232        }
233    }
234}
235#[allow(clippy::derive_partial_eq_without_eq)]
236#[derive(Clone, PartialEq, ::prost::Message)]
237pub struct RecognitionClassifierOptions {
238    /// List of classifiers to use
239    #[prost(message, repeated, tag = "1")]
240    pub classifiers: ::prost::alloc::vec::Vec<RecognitionClassifier>,
241}
242#[allow(clippy::derive_partial_eq_without_eq)]
243#[derive(Clone, PartialEq, ::prost::Message)]
244pub struct SpeechAnalysisOptions {
245    /// Analyse speech for every speaker
246    #[prost(bool, tag = "1")]
247    pub enable_speaker_analysis: bool,
248    /// Analyse conversation of two speakers
249    #[prost(bool, tag = "2")]
250    pub enable_conversation_analysis: bool,
251    /// Quantile levels in range (0, 1) for descriptive statistics
252    #[prost(double, repeated, tag = "3")]
253    pub descriptive_statistics_quantiles: ::prost::alloc::vec::Vec<f64>,
254}
255/// RAW Audio format spec (no container to infer type). Used in AudioFormat options.
256#[allow(clippy::derive_partial_eq_without_eq)]
257#[derive(Clone, PartialEq, ::prost::Message)]
258pub struct RawAudio {
259    /// Type of audio encoding
260    #[prost(enumeration = "raw_audio::AudioEncoding", tag = "1")]
261    pub audio_encoding: i32,
262    /// PCM sample rate
263    #[prost(int64, tag = "2")]
264    pub sample_rate_hertz: i64,
265    /// PCM channel count. Currently only single channel audio is supported in real-time recognition.
266    #[prost(int64, tag = "3")]
267    pub audio_channel_count: i64,
268}
269/// Nested message and enum types in `RawAudio`.
270pub mod raw_audio {
271    #[derive(
272        Clone,
273        Copy,
274        Debug,
275        PartialEq,
276        Eq,
277        Hash,
278        PartialOrd,
279        Ord,
280        ::prost::Enumeration
281    )]
282    #[repr(i32)]
283    pub enum AudioEncoding {
284        Unspecified = 0,
285        /// Audio bit depth 16-bit signed little-endian (Linear PCM).
286        Linear16Pcm = 1,
287    }
288    impl AudioEncoding {
289        /// String value of the enum field names used in the ProtoBuf definition.
290        ///
291        /// The values are not transformed in any way and thus are considered stable
292        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
293        pub fn as_str_name(&self) -> &'static str {
294            match self {
295                AudioEncoding::Unspecified => "AUDIO_ENCODING_UNSPECIFIED",
296                AudioEncoding::Linear16Pcm => "LINEAR16_PCM",
297            }
298        }
299        /// Creates an enum from field names used in the ProtoBuf definition.
300        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
301            match value {
302                "AUDIO_ENCODING_UNSPECIFIED" => Some(Self::Unspecified),
303                "LINEAR16_PCM" => Some(Self::Linear16Pcm),
304                _ => None,
305            }
306        }
307    }
308}
309/// Audio with fixed type in container. Used in AudioFormat options.
310#[allow(clippy::derive_partial_eq_without_eq)]
311#[derive(Clone, PartialEq, ::prost::Message)]
312pub struct ContainerAudio {
313    /// Type of audio container.
314    #[prost(enumeration = "container_audio::ContainerAudioType", tag = "1")]
315    pub container_audio_type: i32,
316}
317/// Nested message and enum types in `ContainerAudio`.
318pub mod container_audio {
319    #[derive(
320        Clone,
321        Copy,
322        Debug,
323        PartialEq,
324        Eq,
325        Hash,
326        PartialOrd,
327        Ord,
328        ::prost::Enumeration
329    )]
330    #[repr(i32)]
331    pub enum ContainerAudioType {
332        Unspecified = 0,
333        /// Audio bit depth 16-bit signed little-endian (Linear PCM).
334        Wav = 1,
335        /// Data is encoded using the OPUS audio codec and compressed using the OGG container format.
336        OggOpus = 2,
337        /// Data is encoded using MPEG-1/2 Layer III and compressed using the MP3 container format.
338        Mp3 = 3,
339    }
340    impl ContainerAudioType {
341        /// String value of the enum field names used in the ProtoBuf definition.
342        ///
343        /// The values are not transformed in any way and thus are considered stable
344        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
345        pub fn as_str_name(&self) -> &'static str {
346            match self {
347                ContainerAudioType::Unspecified => "CONTAINER_AUDIO_TYPE_UNSPECIFIED",
348                ContainerAudioType::Wav => "WAV",
349                ContainerAudioType::OggOpus => "OGG_OPUS",
350                ContainerAudioType::Mp3 => "MP3",
351            }
352        }
353        /// Creates an enum from field names used in the ProtoBuf definition.
354        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
355            match value {
356                "CONTAINER_AUDIO_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
357                "WAV" => Some(Self::Wav),
358                "OGG_OPUS" => Some(Self::OggOpus),
359                "MP3" => Some(Self::Mp3),
360                _ => None,
361            }
362        }
363    }
364}
365/// Audio format options.
366#[allow(clippy::derive_partial_eq_without_eq)]
367#[derive(Clone, PartialEq, ::prost::Message)]
368pub struct AudioFormatOptions {
369    #[prost(oneof = "audio_format_options::AudioFormat", tags = "1, 2")]
370    pub audio_format: ::core::option::Option<audio_format_options::AudioFormat>,
371}
372/// Nested message and enum types in `AudioFormatOptions`.
373pub mod audio_format_options {
374    #[allow(clippy::derive_partial_eq_without_eq)]
375    #[derive(Clone, PartialEq, ::prost::Oneof)]
376    pub enum AudioFormat {
377        /// Audio without container.
378        #[prost(message, tag = "1")]
379        RawAudio(super::RawAudio),
380        /// Audio is wrapped in container.
381        #[prost(message, tag = "2")]
382        ContainerAudio(super::ContainerAudio),
383    }
384}
385/// Type of restriction for the list of languages expected in the incoming speech stream.
386#[allow(clippy::derive_partial_eq_without_eq)]
387#[derive(Clone, PartialEq, ::prost::Message)]
388pub struct LanguageRestrictionOptions {
389    /// Language restriction type
390    #[prost(
391        enumeration = "language_restriction_options::LanguageRestrictionType",
392        tag = "1"
393    )]
394    pub restriction_type: i32,
395    /// The list of language codes to restrict recognition in the case of an auto model
396    #[prost(string, repeated, tag = "2")]
397    pub language_code: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
398}
399/// Nested message and enum types in `LanguageRestrictionOptions`.
400pub mod language_restriction_options {
401    #[derive(
402        Clone,
403        Copy,
404        Debug,
405        PartialEq,
406        Eq,
407        Hash,
408        PartialOrd,
409        Ord,
410        ::prost::Enumeration
411    )]
412    #[repr(i32)]
413    pub enum LanguageRestrictionType {
414        Unspecified = 0,
415        /// The allowing list. The incoming audio can contain only the listed languages.
416        Whitelist = 1,
417        /// The forbidding list. The incoming audio cannot contain the listed languages.
418        Blacklist = 2,
419    }
420    impl LanguageRestrictionType {
421        /// String value of the enum field names used in the ProtoBuf definition.
422        ///
423        /// The values are not transformed in any way and thus are considered stable
424        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
425        pub fn as_str_name(&self) -> &'static str {
426            match self {
427                LanguageRestrictionType::Unspecified => {
428                    "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED"
429                }
430                LanguageRestrictionType::Whitelist => "WHITELIST",
431                LanguageRestrictionType::Blacklist => "BLACKLIST",
432            }
433        }
434        /// Creates an enum from field names used in the ProtoBuf definition.
435        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
436            match value {
437                "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
438                "WHITELIST" => Some(Self::Whitelist),
439                "BLACKLIST" => Some(Self::Blacklist),
440                _ => None,
441            }
442        }
443    }
444}
445#[allow(clippy::derive_partial_eq_without_eq)]
446#[derive(Clone, PartialEq, ::prost::Message)]
447pub struct RecognitionModelOptions {
448    /// Sets the recognition model for the cloud version of SpeechKit. Possible values: 'general', 'general:rc', 'general:deprecated'.
449    /// The model is ignored for SpeechKit Hybrid.
450    #[prost(string, tag = "1")]
451    pub model: ::prost::alloc::string::String,
452    /// Specified input audio.
453    #[prost(message, optional, tag = "2")]
454    pub audio_format: ::core::option::Option<AudioFormatOptions>,
455    /// Text normalization options.
456    #[prost(message, optional, tag = "3")]
457    pub text_normalization: ::core::option::Option<TextNormalizationOptions>,
458    /// Possible languages in audio.
459    #[prost(message, optional, tag = "4")]
460    pub language_restriction: ::core::option::Option<LanguageRestrictionOptions>,
461    /// How to deal with audio data (in real time, after all data is received, etc). Default is REAL_TIME.
462    #[prost(enumeration = "recognition_model_options::AudioProcessingType", tag = "5")]
463    pub audio_processing_type: i32,
464}
465/// Nested message and enum types in `RecognitionModelOptions`.
466pub mod recognition_model_options {
467    #[derive(
468        Clone,
469        Copy,
470        Debug,
471        PartialEq,
472        Eq,
473        Hash,
474        PartialOrd,
475        Ord,
476        ::prost::Enumeration
477    )]
478    #[repr(i32)]
479    pub enum AudioProcessingType {
480        Unspecified = 0,
481        /// Process audio in mode optimized for real-time recognition, i.e. send partials and final responses as soon as possible
482        RealTime = 1,
483        /// Process audio after all data was received
484        FullData = 2,
485    }
486    impl AudioProcessingType {
487        /// String value of the enum field names used in the ProtoBuf definition.
488        ///
489        /// The values are not transformed in any way and thus are considered stable
490        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
491        pub fn as_str_name(&self) -> &'static str {
492            match self {
493                AudioProcessingType::Unspecified => "AUDIO_PROCESSING_TYPE_UNSPECIFIED",
494                AudioProcessingType::RealTime => "REAL_TIME",
495                AudioProcessingType::FullData => "FULL_DATA",
496            }
497        }
498        /// Creates an enum from field names used in the ProtoBuf definition.
499        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
500            match value {
501                "AUDIO_PROCESSING_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
502                "REAL_TIME" => Some(Self::RealTime),
503                "FULL_DATA" => Some(Self::FullData),
504                _ => None,
505            }
506        }
507    }
508}
509#[allow(clippy::derive_partial_eq_without_eq)]
510#[derive(Clone, PartialEq, ::prost::Message)]
511pub struct SpeakerLabelingOptions {
512    /// Specifies the execution of speaker labeling. Default is SPEAKER_LABELING_DISABLED.
513    #[prost(enumeration = "speaker_labeling_options::SpeakerLabeling", tag = "1")]
514    pub speaker_labeling: i32,
515}
516/// Nested message and enum types in `SpeakerLabelingOptions`.
517pub mod speaker_labeling_options {
518    #[derive(
519        Clone,
520        Copy,
521        Debug,
522        PartialEq,
523        Eq,
524        Hash,
525        PartialOrd,
526        Ord,
527        ::prost::Enumeration
528    )]
529    #[repr(i32)]
530    pub enum SpeakerLabeling {
531        Unspecified = 0,
532        /// Enable speaker labeling
533        Enabled = 1,
534        /// Disable speaker labeling
535        Disabled = 2,
536    }
537    impl SpeakerLabeling {
538        /// String value of the enum field names used in the ProtoBuf definition.
539        ///
540        /// The values are not transformed in any way and thus are considered stable
541        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
542        pub fn as_str_name(&self) -> &'static str {
543            match self {
544                SpeakerLabeling::Unspecified => "SPEAKER_LABELING_UNSPECIFIED",
545                SpeakerLabeling::Enabled => "SPEAKER_LABELING_ENABLED",
546                SpeakerLabeling::Disabled => "SPEAKER_LABELING_DISABLED",
547            }
548        }
549        /// Creates an enum from field names used in the ProtoBuf definition.
550        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
551            match value {
552                "SPEAKER_LABELING_UNSPECIFIED" => Some(Self::Unspecified),
553                "SPEAKER_LABELING_ENABLED" => Some(Self::Enabled),
554                "SPEAKER_LABELING_DISABLED" => Some(Self::Disabled),
555                _ => None,
556            }
557        }
558    }
559}
560#[allow(clippy::derive_partial_eq_without_eq)]
561#[derive(Clone, PartialEq, ::prost::Message)]
562pub struct StreamingOptions {
563    /// Configuration for speech recognition model.
564    #[prost(message, optional, tag = "1")]
565    pub recognition_model: ::core::option::Option<RecognitionModelOptions>,
566    /// Configuration for end of utterance detection model.
567    #[prost(message, optional, tag = "2")]
568    pub eou_classifier: ::core::option::Option<EouClassifierOptions>,
569    /// Configuration for classifiers over speech recognition.
570    #[prost(message, optional, tag = "3")]
571    pub recognition_classifier: ::core::option::Option<RecognitionClassifierOptions>,
572    /// Configuration for speech analysis over speech recognition.
573    #[prost(message, optional, tag = "4")]
574    pub speech_analysis: ::core::option::Option<SpeechAnalysisOptions>,
575    /// Configuration for speaker labeling
576    #[prost(message, optional, tag = "5")]
577    pub speaker_labeling: ::core::option::Option<SpeakerLabelingOptions>,
578}
579/// Data chunk with audio.
580#[allow(clippy::derive_partial_eq_without_eq)]
581#[derive(Clone, PartialEq, ::prost::Message)]
582pub struct AudioChunk {
583    /// Bytes with audio data.
584    #[prost(bytes = "vec", tag = "1")]
585    pub data: ::prost::alloc::vec::Vec<u8>,
586}
587/// Data chunk with silence.
588#[allow(clippy::derive_partial_eq_without_eq)]
589#[derive(Clone, PartialEq, ::prost::Message)]
590pub struct SilenceChunk {
591    /// Duration of silence chunk in ms.
592    #[prost(int64, tag = "1")]
593    pub duration_ms: i64,
594}
595/// Force EOU
596#[allow(clippy::derive_partial_eq_without_eq)]
597#[derive(Clone, PartialEq, ::prost::Message)]
598pub struct Eou {}
599/// Streaming audio request
600/// Events are control messages from user.
601/// First message should be session options.
602/// The next messages are audio data chunks or control messages.
603#[allow(clippy::derive_partial_eq_without_eq)]
604#[derive(Clone, PartialEq, ::prost::Message)]
605pub struct StreamingRequest {
606    #[prost(oneof = "streaming_request::Event", tags = "1, 2, 3, 4")]
607    pub event: ::core::option::Option<streaming_request::Event>,
608}
609/// Nested message and enum types in `StreamingRequest`.
610pub mod streaming_request {
611    #[allow(clippy::derive_partial_eq_without_eq)]
612    #[derive(Clone, PartialEq, ::prost::Oneof)]
613    pub enum Event {
614        /// Session options. Should be the first message from user.
615        #[prost(message, tag = "1")]
616        SessionOptions(super::StreamingOptions),
617        /// Chunk with audio data.
618        #[prost(message, tag = "2")]
619        Chunk(super::AudioChunk),
620        /// Chunk with silence.
621        #[prost(message, tag = "3")]
622        SilenceChunk(super::SilenceChunk),
623        /// Request to end current utterance. Works only with external EOU detector.
624        #[prost(message, tag = "4")]
625        Eou(super::Eou),
626    }
627}
628#[allow(clippy::derive_partial_eq_without_eq)]
629#[derive(Clone, PartialEq, ::prost::Message)]
630pub struct RecognizeFileRequest {
631    /// Configuration for speech recognition model.
632    #[prost(message, optional, tag = "3")]
633    pub recognition_model: ::core::option::Option<RecognitionModelOptions>,
634    /// Configuration for classifiers over speech recognition.
635    #[prost(message, optional, tag = "4")]
636    pub recognition_classifier: ::core::option::Option<RecognitionClassifierOptions>,
637    /// Configuration for speech analysis over speech recognition.
638    #[prost(message, optional, tag = "5")]
639    pub speech_analysis: ::core::option::Option<SpeechAnalysisOptions>,
640    /// Configuration for speaker labeling
641    #[prost(message, optional, tag = "6")]
642    pub speaker_labeling: ::core::option::Option<SpeakerLabelingOptions>,
643    #[prost(oneof = "recognize_file_request::AudioSource", tags = "1, 2")]
644    pub audio_source: ::core::option::Option<recognize_file_request::AudioSource>,
645}
646/// Nested message and enum types in `RecognizeFileRequest`.
647pub mod recognize_file_request {
648    #[allow(clippy::derive_partial_eq_without_eq)]
649    #[derive(Clone, PartialEq, ::prost::Oneof)]
650    pub enum AudioSource {
651        /// Bytes with data
652        #[prost(bytes, tag = "1")]
653        Content(::prost::alloc::vec::Vec<u8>),
654        /// S3 data url
655        #[prost(string, tag = "2")]
656        Uri(::prost::alloc::string::String),
657    }
658}
659/// Recognized word.
660#[allow(clippy::derive_partial_eq_without_eq)]
661#[derive(Clone, PartialEq, ::prost::Message)]
662pub struct Word {
663    /// Word text.
664    #[prost(string, tag = "1")]
665    pub text: ::prost::alloc::string::String,
666    /// Estimation of word start time in ms.
667    #[prost(int64, tag = "2")]
668    pub start_time_ms: i64,
669    /// Estimation of word end time in ms.
670    #[prost(int64, tag = "3")]
671    pub end_time_ms: i64,
672}
673/// Estimation of language and its probability.
674#[allow(clippy::derive_partial_eq_without_eq)]
675#[derive(Clone, PartialEq, ::prost::Message)]
676pub struct LanguageEstimation {
677    /// Language code in ISO 639-1 format.
678    #[prost(string, tag = "1")]
679    pub language_code: ::prost::alloc::string::String,
680    /// Estimation of language probability.
681    #[prost(double, tag = "2")]
682    pub probability: f64,
683}
684/// Recognition of specific time frame.
685#[allow(clippy::derive_partial_eq_without_eq)]
686#[derive(Clone, PartialEq, ::prost::Message)]
687pub struct Alternative {
688    /// Words in time frame.
689    #[prost(message, repeated, tag = "1")]
690    pub words: ::prost::alloc::vec::Vec<Word>,
691    /// Text in time frame.
692    #[prost(string, tag = "2")]
693    pub text: ::prost::alloc::string::String,
694    /// Start of time frame.
695    #[prost(int64, tag = "3")]
696    pub start_time_ms: i64,
697    /// End of time frame.
698    #[prost(int64, tag = "4")]
699    pub end_time_ms: i64,
700    /// The hypothesis confidence. Currently is not used.
701    #[prost(double, tag = "5")]
702    pub confidence: f64,
703    /// Distribution over possible languages.
704    #[prost(message, repeated, tag = "6")]
705    pub languages: ::prost::alloc::vec::Vec<LanguageEstimation>,
706}
707/// Update information for external End of Utterance.
708#[allow(clippy::derive_partial_eq_without_eq)]
709#[derive(Clone, PartialEq, ::prost::Message)]
710pub struct EouUpdate {
711    /// EOU estimated time.
712    #[prost(int64, tag = "2")]
713    pub time_ms: i64,
714}
715/// Update of hypothesis.
716#[allow(clippy::derive_partial_eq_without_eq)]
717#[derive(Clone, PartialEq, ::prost::Message)]
718pub struct AlternativeUpdate {
719    /// List of hypothesis for timeframes.
720    #[prost(message, repeated, tag = "1")]
721    pub alternatives: ::prost::alloc::vec::Vec<Alternative>,
722    #[deprecated]
723    #[prost(string, tag = "2")]
724    pub channel_tag: ::prost::alloc::string::String,
725}
726/// AudioCursors are state of ASR recognition stream.
727#[allow(clippy::derive_partial_eq_without_eq)]
728#[derive(Clone, PartialEq, ::prost::Message)]
729pub struct AudioCursors {
730    /// Amount of audio chunks server received. This cursor is moved after each audio chunk was received by server.
731    #[prost(int64, tag = "1")]
732    pub received_data_ms: i64,
733    /// Input stream reset data.
734    #[prost(int64, tag = "2")]
735    pub reset_time_ms: i64,
736    /// How much audio was processed. This time includes trimming silences as well. This cursor is moved after server received enough data
737    /// to update recognition results (includes silence as well).
738    #[prost(int64, tag = "3")]
739    pub partial_time_ms: i64,
740    /// Time of last final. This cursor is moved when server decides that recognition from start of audio until final_time_ms will not change anymore
741    /// usually this even is followed by EOU detection (but this could change in future).
742    #[prost(int64, tag = "4")]
743    pub final_time_ms: i64,
744    /// This is index of last final server send. Incremented after each new final.
745    #[prost(int64, tag = "5")]
746    pub final_index: i64,
747    /// Estimated time of EOU. Cursor is updated after each new EOU is sent.
748    /// For external classifier this equals to received_data_ms at the moment EOU event arrives.
749    /// For internal classifier this is estimation of time. The time is not exact and has the same guarantees as word timings.
750    #[prost(int64, tag = "6")]
751    pub eou_time_ms: i64,
752}
753/// Refinement for final hypo. For example, text normalization is refinement.
754#[allow(clippy::derive_partial_eq_without_eq)]
755#[derive(Clone, PartialEq, ::prost::Message)]
756pub struct FinalRefinement {
757    /// Index of final for which server sends additional information.
758    #[prost(int64, tag = "1")]
759    pub final_index: i64,
760    /// Type of refinement.
761    #[prost(oneof = "final_refinement::Type", tags = "2")]
762    pub r#type: ::core::option::Option<final_refinement::Type>,
763}
764/// Nested message and enum types in `FinalRefinement`.
765pub mod final_refinement {
766    /// Type of refinement.
767    #[allow(clippy::derive_partial_eq_without_eq)]
768    #[derive(Clone, PartialEq, ::prost::Oneof)]
769    pub enum Type {
770        /// Normalized text instead of raw one.
771        #[prost(message, tag = "2")]
772        NormalizedText(super::AlternativeUpdate),
773    }
774}
775/// Status message
776#[allow(clippy::derive_partial_eq_without_eq)]
777#[derive(Clone, PartialEq, ::prost::Message)]
778pub struct StatusCode {
779    /// Code type.
780    #[prost(enumeration = "CodeType", tag = "1")]
781    pub code_type: i32,
782    /// Human readable message.
783    #[prost(string, tag = "2")]
784    pub message: ::prost::alloc::string::String,
785}
786/// Session identifier.
787#[allow(clippy::derive_partial_eq_without_eq)]
788#[derive(Clone, PartialEq, ::prost::Message)]
789pub struct SessionUuid {
790    /// Internal session identifier.
791    #[prost(string, tag = "1")]
792    pub uuid: ::prost::alloc::string::String,
793    /// User session identifier.
794    #[prost(string, tag = "2")]
795    pub user_request_id: ::prost::alloc::string::String,
796}
797#[allow(clippy::derive_partial_eq_without_eq)]
798#[derive(Clone, PartialEq, ::prost::Message)]
799pub struct PhraseHighlight {
800    /// Text transcription of the highlighted audio segment
801    #[prost(string, tag = "1")]
802    pub text: ::prost::alloc::string::String,
803    /// Start time of the highlighted audio segment
804    #[prost(int64, tag = "2")]
805    pub start_time_ms: i64,
806    /// End time of the highlighted audio segment
807    #[prost(int64, tag = "3")]
808    pub end_time_ms: i64,
809}
810#[allow(clippy::derive_partial_eq_without_eq)]
811#[derive(Clone, PartialEq, ::prost::Message)]
812pub struct RecognitionClassifierLabel {
813    /// The label of the class predicted by the classifier
814    #[prost(string, tag = "1")]
815    pub label: ::prost::alloc::string::String,
816    /// The prediction confidence
817    #[prost(double, tag = "2")]
818    pub confidence: f64,
819}
820#[allow(clippy::derive_partial_eq_without_eq)]
821#[derive(Clone, PartialEq, ::prost::Message)]
822pub struct RecognitionClassifierResult {
823    /// Name of the triggered classifier
824    #[prost(string, tag = "1")]
825    pub classifier: ::prost::alloc::string::String,
826    /// List of highlights, i.e. parts of phrase that determine the result of the classification
827    #[prost(message, repeated, tag = "2")]
828    pub highlights: ::prost::alloc::vec::Vec<PhraseHighlight>,
829    /// Classifier predictions
830    #[prost(message, repeated, tag = "3")]
831    pub labels: ::prost::alloc::vec::Vec<RecognitionClassifierLabel>,
832}
833#[allow(clippy::derive_partial_eq_without_eq)]
834#[derive(Clone, PartialEq, ::prost::Message)]
835pub struct RecognitionClassifierUpdate {
836    /// Response window type
837    #[prost(enumeration = "recognition_classifier_update::WindowType", tag = "1")]
838    pub window_type: i32,
839    /// Start time of the audio segment used for classification
840    #[prost(int64, tag = "2")]
841    pub start_time_ms: i64,
842    /// End time of the audio segment used for classification
843    #[prost(int64, tag = "3")]
844    pub end_time_ms: i64,
845    /// Result for dictionary-based classifier
846    #[prost(message, optional, tag = "4")]
847    pub classifier_result: ::core::option::Option<RecognitionClassifierResult>,
848}
849/// Nested message and enum types in `RecognitionClassifierUpdate`.
850pub mod recognition_classifier_update {
851    #[derive(
852        Clone,
853        Copy,
854        Debug,
855        PartialEq,
856        Eq,
857        Hash,
858        PartialOrd,
859        Ord,
860        ::prost::Enumeration
861    )]
862    #[repr(i32)]
863    pub enum WindowType {
864        Unspecified = 0,
865        /// The result of applying the classifier to the last utterance response
866        LastUtterance = 1,
867        /// The result of applying the classifier to the last final response
868        LastFinal = 2,
869        /// The result of applying the classifier to the last partial response
870        LastPartial = 3,
871    }
872    impl WindowType {
873        /// String value of the enum field names used in the ProtoBuf definition.
874        ///
875        /// The values are not transformed in any way and thus are considered stable
876        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
877        pub fn as_str_name(&self) -> &'static str {
878            match self {
879                WindowType::Unspecified => "WINDOW_TYPE_UNSPECIFIED",
880                WindowType::LastUtterance => "LAST_UTTERANCE",
881                WindowType::LastFinal => "LAST_FINAL",
882                WindowType::LastPartial => "LAST_PARTIAL",
883            }
884        }
885        /// Creates an enum from field names used in the ProtoBuf definition.
886        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
887            match value {
888                "WINDOW_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
889                "LAST_UTTERANCE" => Some(Self::LastUtterance),
890                "LAST_FINAL" => Some(Self::LastFinal),
891                "LAST_PARTIAL" => Some(Self::LastPartial),
892                _ => None,
893            }
894        }
895    }
896}
897#[allow(clippy::derive_partial_eq_without_eq)]
898#[derive(Clone, PartialEq, ::prost::Message)]
899pub struct DescriptiveStatistics {
900    /// Minimum observed value
901    #[prost(double, tag = "1")]
902    pub min: f64,
903    /// Maximum observed value
904    #[prost(double, tag = "2")]
905    pub max: f64,
906    /// Estimated mean of distribution
907    #[prost(double, tag = "3")]
908    pub mean: f64,
909    /// Estimated standard deviation of distribution
910    #[prost(double, tag = "4")]
911    pub std: f64,
912    /// List of evaluated quantiles
913    #[prost(message, repeated, tag = "5")]
914    pub quantiles: ::prost::alloc::vec::Vec<descriptive_statistics::Quantile>,
915}
916/// Nested message and enum types in `DescriptiveStatistics`.
917pub mod descriptive_statistics {
918    #[allow(clippy::derive_partial_eq_without_eq)]
919    #[derive(Clone, PartialEq, ::prost::Message)]
920    pub struct Quantile {
921        /// Quantile level in range (0, 1)
922        #[prost(double, tag = "1")]
923        pub level: f64,
924        /// Quantile value
925        #[prost(double, tag = "2")]
926        pub value: f64,
927    }
928}
929#[allow(clippy::derive_partial_eq_without_eq)]
930#[derive(Clone, PartialEq, ::prost::Message)]
931pub struct AudioSegmentBoundaries {
932    /// Audio segment start time
933    #[prost(int64, tag = "1")]
934    pub start_time_ms: i64,
935    /// Audio segment end time
936    #[prost(int64, tag = "2")]
937    pub end_time_ms: i64,
938}
939#[allow(clippy::derive_partial_eq_without_eq)]
940#[derive(Clone, PartialEq, ::prost::Message)]
941pub struct SpeakerAnalysis {
942    /// Speaker tag
943    #[prost(string, tag = "1")]
944    pub speaker_tag: ::prost::alloc::string::String,
945    /// Response window type
946    #[prost(enumeration = "speaker_analysis::WindowType", tag = "2")]
947    pub window_type: i32,
948    /// Audio segment boundaries
949    #[prost(message, optional, tag = "3")]
950    pub speech_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
951    /// Total speech duration
952    #[prost(int64, tag = "4")]
953    pub total_speech_ms: i64,
954    /// Speech ratio within audio segment
955    #[prost(double, tag = "5")]
956    pub speech_ratio: f64,
957    /// Total silence duration
958    #[prost(int64, tag = "6")]
959    pub total_silence_ms: i64,
960    /// Silence ratio within audio segment
961    #[prost(double, tag = "7")]
962    pub silence_ratio: f64,
963    /// Number of words in recognized speech
964    #[prost(int64, tag = "8")]
965    pub words_count: i64,
966    /// Number of letters in recognized speech
967    #[prost(int64, tag = "9")]
968    pub letters_count: i64,
969    /// Descriptive statistics for words per second distribution
970    #[prost(message, optional, tag = "10")]
971    pub words_per_second: ::core::option::Option<DescriptiveStatistics>,
972    /// Descriptive statistics for letters per second distribution
973    #[prost(message, optional, tag = "11")]
974    pub letters_per_second: ::core::option::Option<DescriptiveStatistics>,
975    /// Descriptive statistics for words per utterance distribution
976    #[prost(message, optional, tag = "12")]
977    pub words_per_utterance: ::core::option::Option<DescriptiveStatistics>,
978    /// Descriptive statistics for letters per utterance distribution
979    #[prost(message, optional, tag = "13")]
980    pub letters_per_utterance: ::core::option::Option<DescriptiveStatistics>,
981    /// Number of utterances
982    #[prost(int64, tag = "14")]
983    pub utterance_count: i64,
984    /// Descriptive statistics for utterance duration distribution
985    #[prost(message, optional, tag = "15")]
986    pub utterance_duration_estimation: ::core::option::Option<DescriptiveStatistics>,
987}
988/// Nested message and enum types in `SpeakerAnalysis`.
989pub mod speaker_analysis {
990    #[derive(
991        Clone,
992        Copy,
993        Debug,
994        PartialEq,
995        Eq,
996        Hash,
997        PartialOrd,
998        Ord,
999        ::prost::Enumeration
1000    )]
1001    #[repr(i32)]
1002    pub enum WindowType {
1003        Unspecified = 0,
1004        /// Stats for all received audio.
1005        Total = 1,
1006        /// Stats for last utterance.
1007        LastUtterance = 2,
1008    }
1009    impl WindowType {
1010        /// String value of the enum field names used in the ProtoBuf definition.
1011        ///
1012        /// The values are not transformed in any way and thus are considered stable
1013        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1014        pub fn as_str_name(&self) -> &'static str {
1015            match self {
1016                WindowType::Unspecified => "WINDOW_TYPE_UNSPECIFIED",
1017                WindowType::Total => "TOTAL",
1018                WindowType::LastUtterance => "LAST_UTTERANCE",
1019            }
1020        }
1021        /// Creates an enum from field names used in the ProtoBuf definition.
1022        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1023            match value {
1024                "WINDOW_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1025                "TOTAL" => Some(Self::Total),
1026                "LAST_UTTERANCE" => Some(Self::LastUtterance),
1027                _ => None,
1028            }
1029        }
1030    }
1031}
1032#[allow(clippy::derive_partial_eq_without_eq)]
1033#[derive(Clone, PartialEq, ::prost::Message)]
1034pub struct ConversationAnalysis {
1035    /// Audio segment boundaries
1036    #[prost(message, optional, tag = "1")]
1037    pub conversation_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
1038    /// Total simultaneous silence duration
1039    #[prost(int64, tag = "2")]
1040    pub total_simultaneous_silence_duration_ms: i64,
1041    /// Simultaneous silence ratio within audio segment
1042    #[prost(double, tag = "3")]
1043    pub total_simultaneous_silence_ratio: f64,
1044    /// Descriptive statistics for simultaneous silence duration distribution
1045    #[prost(message, optional, tag = "4")]
1046    pub simultaneous_silence_duration_estimation: ::core::option::Option<
1047        DescriptiveStatistics,
1048    >,
1049    /// Total simultaneous speech duration
1050    #[prost(int64, tag = "5")]
1051    pub total_simultaneous_speech_duration_ms: i64,
1052    /// Simultaneous speech ratio within audio segment
1053    #[prost(double, tag = "6")]
1054    pub total_simultaneous_speech_ratio: f64,
1055    /// Descriptive statistics for simultaneous speech duration distribution
1056    #[prost(message, optional, tag = "7")]
1057    pub simultaneous_speech_duration_estimation: ::core::option::Option<
1058        DescriptiveStatistics,
1059    >,
1060    /// Interrupts description for every speaker
1061    #[prost(message, repeated, tag = "8")]
1062    pub speaker_interrupts: ::prost::alloc::vec::Vec<
1063        conversation_analysis::InterruptsEvaluation,
1064    >,
1065    /// Total speech duration, including both simultaneous and separate speech
1066    #[prost(int64, tag = "9")]
1067    pub total_speech_duration_ms: i64,
1068    /// Total speech ratio within audio segment
1069    #[prost(double, tag = "10")]
1070    pub total_speech_ratio: f64,
1071}
1072/// Nested message and enum types in `ConversationAnalysis`.
1073pub mod conversation_analysis {
1074    #[allow(clippy::derive_partial_eq_without_eq)]
1075    #[derive(Clone, PartialEq, ::prost::Message)]
1076    pub struct InterruptsEvaluation {
1077        /// Speaker tag
1078        #[prost(string, tag = "1")]
1079        pub speaker_tag: ::prost::alloc::string::String,
1080        /// Number of interrupts made by the speaker
1081        #[prost(int64, tag = "2")]
1082        pub interrupts_count: i64,
1083        /// Total duration of all interrupts
1084        #[prost(int64, tag = "3")]
1085        pub interrupts_duration_ms: i64,
1086        /// Boundaries for every interrupt
1087        #[prost(message, repeated, tag = "4")]
1088        pub interrupts: ::prost::alloc::vec::Vec<super::AudioSegmentBoundaries>,
1089    }
1090}
1091/// Responses from server.
1092/// Each response contains session uuid
1093/// AudioCursors
1094/// plus specific event
1095#[allow(clippy::derive_partial_eq_without_eq)]
1096#[derive(Clone, PartialEq, ::prost::Message)]
1097pub struct StreamingResponse {
1098    /// Session identifier
1099    #[prost(message, optional, tag = "1")]
1100    pub session_uuid: ::core::option::Option<SessionUuid>,
1101    /// Progress bar for stream session recognition: how many data we obtained; final and partial times; etc.
1102    #[prost(message, optional, tag = "2")]
1103    pub audio_cursors: ::core::option::Option<AudioCursors>,
1104    /// Wall clock on server side. This is time when server wrote results to stream
1105    #[prost(int64, tag = "3")]
1106    pub response_wall_time_ms: i64,
1107    /// Tag for distinguish audio channels.
1108    #[prost(string, tag = "9")]
1109    pub channel_tag: ::prost::alloc::string::String,
1110    #[prost(oneof = "streaming_response::Event", tags = "4, 5, 6, 7, 8, 10, 11, 12")]
1111    pub event: ::core::option::Option<streaming_response::Event>,
1112}
1113/// Nested message and enum types in `StreamingResponse`.
1114pub mod streaming_response {
1115    #[allow(clippy::derive_partial_eq_without_eq)]
1116    #[derive(Clone, PartialEq, ::prost::Oneof)]
1117    pub enum Event {
1118        /// Partial results, server will send them regularly after enough audio data was received from user. This are current text estimation
1119        /// from final_time_ms to partial_time_ms. Could change after new data will arrive.
1120        #[prost(message, tag = "4")]
1121        Partial(super::AlternativeUpdate),
1122        /// Final results, the recognition is now fixed until final_time_ms. For now, final is sent only if the EOU event was triggered. This could be change in future releases.
1123        #[prost(message, tag = "5")]
1124        Final(super::AlternativeUpdate),
1125        /// After EOU classifier, send the message with final, send the EouUpdate with time of EOU
1126        /// before eou_update we send final with the same time. there could be several finals before eou update.
1127        #[prost(message, tag = "6")]
1128        EouUpdate(super::EouUpdate),
1129        /// For each final, if normalization is enabled, sent the normalized text (or some other advanced post-processing).
1130        /// Final normalization will introduce additional latency.
1131        #[prost(message, tag = "7")]
1132        FinalRefinement(super::FinalRefinement),
1133        /// Status messages, send by server with fixed interval (keep-alive).
1134        #[prost(message, tag = "8")]
1135        StatusCode(super::StatusCode),
1136        /// Result of the triggered classifier
1137        #[prost(message, tag = "10")]
1138        ClassifierUpdate(super::RecognitionClassifierUpdate),
1139        /// Speech statistics for every speaker
1140        #[prost(message, tag = "11")]
1141        SpeakerAnalysis(super::SpeakerAnalysis),
1142        /// Conversation statistics
1143        #[prost(message, tag = "12")]
1144        ConversationAnalysis(super::ConversationAnalysis),
1145    }
1146}
1147#[allow(clippy::derive_partial_eq_without_eq)]
1148#[derive(Clone, PartialEq, ::prost::Message)]
1149pub struct DeleteRecognitionRequest {
1150    #[prost(string, tag = "1")]
1151    pub operation_id: ::prost::alloc::string::String,
1152}
1153#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1154#[repr(i32)]
1155pub enum CodeType {
1156    Unspecified = 0,
1157    /// All good.
1158    Working = 1,
1159    /// For example, if speech is sent not in real time or context is unknown and we've made fallback.
1160    Warning = 2,
1161    /// After session was closed.
1162    Closed = 3,
1163}
1164impl CodeType {
1165    /// String value of the enum field names used in the ProtoBuf definition.
1166    ///
1167    /// The values are not transformed in any way and thus are considered stable
1168    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1169    pub fn as_str_name(&self) -> &'static str {
1170        match self {
1171            CodeType::Unspecified => "CODE_TYPE_UNSPECIFIED",
1172            CodeType::Working => "WORKING",
1173            CodeType::Warning => "WARNING",
1174            CodeType::Closed => "CLOSED",
1175        }
1176    }
1177    /// Creates an enum from field names used in the ProtoBuf definition.
1178    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1179        match value {
1180            "CODE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1181            "WORKING" => Some(Self::Working),
1182            "WARNING" => Some(Self::Warning),
1183            "CLOSED" => Some(Self::Closed),
1184            _ => None,
1185        }
1186    }
1187}
1188#[allow(clippy::derive_partial_eq_without_eq)]
1189#[derive(Clone, PartialEq, ::prost::Message)]
1190pub struct GetRecognitionRequest {
1191    #[prost(string, tag = "1")]
1192    pub operation_id: ::prost::alloc::string::String,
1193}
1194/// Generated client implementations.
1195pub mod recognizer_client {
1196    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
1197    use tonic::codegen::*;
1198    use tonic::codegen::http::Uri;
1199    /// A set of methods for voice recognition.
1200    #[derive(Debug, Clone)]
1201    pub struct RecognizerClient<T> {
1202        inner: tonic::client::Grpc<T>,
1203    }
1204    impl RecognizerClient<tonic::transport::Channel> {
1205        /// Attempt to create a new client by connecting to a given endpoint.
1206        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1207        where
1208            D: TryInto<tonic::transport::Endpoint>,
1209            D::Error: Into<StdError>,
1210        {
1211            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1212            Ok(Self::new(conn))
1213        }
1214    }
1215    impl<T> RecognizerClient<T>
1216    where
1217        T: tonic::client::GrpcService<tonic::body::BoxBody>,
1218        T::Error: Into<StdError>,
1219        T::ResponseBody: Body<Data = Bytes> + Send + 'static,
1220        <T::ResponseBody as Body>::Error: Into<StdError> + Send,
1221    {
1222        pub fn new(inner: T) -> Self {
1223            let inner = tonic::client::Grpc::new(inner);
1224            Self { inner }
1225        }
1226        pub fn with_origin(inner: T, origin: Uri) -> Self {
1227            let inner = tonic::client::Grpc::with_origin(inner, origin);
1228            Self { inner }
1229        }
1230        pub fn with_interceptor<F>(
1231            inner: T,
1232            interceptor: F,
1233        ) -> RecognizerClient<InterceptedService<T, F>>
1234        where
1235            F: tonic::service::Interceptor,
1236            T::ResponseBody: Default,
1237            T: tonic::codegen::Service<
1238                http::Request<tonic::body::BoxBody>,
1239                Response = http::Response<
1240                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
1241                >,
1242            >,
1243            <T as tonic::codegen::Service<
1244                http::Request<tonic::body::BoxBody>,
1245            >>::Error: Into<StdError> + Send + Sync,
1246        {
1247            RecognizerClient::new(InterceptedService::new(inner, interceptor))
1248        }
1249        /// Compress requests with the given encoding.
1250        ///
1251        /// This requires the server to support it otherwise it might respond with an
1252        /// error.
1253        #[must_use]
1254        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1255            self.inner = self.inner.send_compressed(encoding);
1256            self
1257        }
1258        /// Enable decompressing responses.
1259        #[must_use]
1260        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1261            self.inner = self.inner.accept_compressed(encoding);
1262            self
1263        }
1264        /// Limits the maximum size of a decoded message.
1265        ///
1266        /// Default: `4MB`
1267        #[must_use]
1268        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1269            self.inner = self.inner.max_decoding_message_size(limit);
1270            self
1271        }
1272        /// Limits the maximum size of an encoded message.
1273        ///
1274        /// Default: `usize::MAX`
1275        #[must_use]
1276        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1277            self.inner = self.inner.max_encoding_message_size(limit);
1278            self
1279        }
1280        /// Expects audio in real-time
1281        pub async fn recognize_streaming(
1282            &mut self,
1283            request: impl tonic::IntoStreamingRequest<Message = super::StreamingRequest>,
1284        ) -> std::result::Result<
1285            tonic::Response<tonic::codec::Streaming<super::StreamingResponse>>,
1286            tonic::Status,
1287        > {
1288            self.inner
1289                .ready()
1290                .await
1291                .map_err(|e| {
1292                    tonic::Status::new(
1293                        tonic::Code::Unknown,
1294                        format!("Service was not ready: {}", e.into()),
1295                    )
1296                })?;
1297            let codec = tonic::codec::ProstCodec::default();
1298            let path = http::uri::PathAndQuery::from_static(
1299                "/speechkit.stt.v3.Recognizer/RecognizeStreaming",
1300            );
1301            let mut req = request.into_streaming_request();
1302            req.extensions_mut()
1303                .insert(
1304                    GrpcMethod::new("speechkit.stt.v3.Recognizer", "RecognizeStreaming"),
1305                );
1306            self.inner.streaming(req, path, codec).await
1307        }
1308    }
1309}
1310/// Generated client implementations.
1311pub mod async_recognizer_client {
1312    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
1313    use tonic::codegen::*;
1314    use tonic::codegen::http::Uri;
1315    /// A set of methods for async voice recognition.
1316    #[derive(Debug, Clone)]
1317    pub struct AsyncRecognizerClient<T> {
1318        inner: tonic::client::Grpc<T>,
1319    }
1320    impl AsyncRecognizerClient<tonic::transport::Channel> {
1321        /// Attempt to create a new client by connecting to a given endpoint.
1322        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1323        where
1324            D: TryInto<tonic::transport::Endpoint>,
1325            D::Error: Into<StdError>,
1326        {
1327            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1328            Ok(Self::new(conn))
1329        }
1330    }
1331    impl<T> AsyncRecognizerClient<T>
1332    where
1333        T: tonic::client::GrpcService<tonic::body::BoxBody>,
1334        T::Error: Into<StdError>,
1335        T::ResponseBody: Body<Data = Bytes> + Send + 'static,
1336        <T::ResponseBody as Body>::Error: Into<StdError> + Send,
1337    {
1338        pub fn new(inner: T) -> Self {
1339            let inner = tonic::client::Grpc::new(inner);
1340            Self { inner }
1341        }
1342        pub fn with_origin(inner: T, origin: Uri) -> Self {
1343            let inner = tonic::client::Grpc::with_origin(inner, origin);
1344            Self { inner }
1345        }
1346        pub fn with_interceptor<F>(
1347            inner: T,
1348            interceptor: F,
1349        ) -> AsyncRecognizerClient<InterceptedService<T, F>>
1350        where
1351            F: tonic::service::Interceptor,
1352            T::ResponseBody: Default,
1353            T: tonic::codegen::Service<
1354                http::Request<tonic::body::BoxBody>,
1355                Response = http::Response<
1356                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
1357                >,
1358            >,
1359            <T as tonic::codegen::Service<
1360                http::Request<tonic::body::BoxBody>,
1361            >>::Error: Into<StdError> + Send + Sync,
1362        {
1363            AsyncRecognizerClient::new(InterceptedService::new(inner, interceptor))
1364        }
1365        /// Compress requests with the given encoding.
1366        ///
1367        /// This requires the server to support it otherwise it might respond with an
1368        /// error.
1369        #[must_use]
1370        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1371            self.inner = self.inner.send_compressed(encoding);
1372            self
1373        }
1374        /// Enable decompressing responses.
1375        #[must_use]
1376        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1377            self.inner = self.inner.accept_compressed(encoding);
1378            self
1379        }
1380        /// Limits the maximum size of a decoded message.
1381        ///
1382        /// Default: `4MB`
1383        #[must_use]
1384        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1385            self.inner = self.inner.max_decoding_message_size(limit);
1386            self
1387        }
1388        /// Limits the maximum size of an encoded message.
1389        ///
1390        /// Default: `usize::MAX`
1391        #[must_use]
1392        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1393            self.inner = self.inner.max_encoding_message_size(limit);
1394            self
1395        }
1396        pub async fn recognize_file(
1397            &mut self,
1398            request: impl tonic::IntoRequest<super::RecognizeFileRequest>,
1399        ) -> std::result::Result<
1400            tonic::Response<
1401                super::super::super::super::yandex::cloud::operation::Operation,
1402            >,
1403            tonic::Status,
1404        > {
1405            self.inner
1406                .ready()
1407                .await
1408                .map_err(|e| {
1409                    tonic::Status::new(
1410                        tonic::Code::Unknown,
1411                        format!("Service was not ready: {}", e.into()),
1412                    )
1413                })?;
1414            let codec = tonic::codec::ProstCodec::default();
1415            let path = http::uri::PathAndQuery::from_static(
1416                "/speechkit.stt.v3.AsyncRecognizer/RecognizeFile",
1417            );
1418            let mut req = request.into_request();
1419            req.extensions_mut()
1420                .insert(
1421                    GrpcMethod::new("speechkit.stt.v3.AsyncRecognizer", "RecognizeFile"),
1422                );
1423            self.inner.unary(req, path, codec).await
1424        }
1425        pub async fn get_recognition(
1426            &mut self,
1427            request: impl tonic::IntoRequest<super::GetRecognitionRequest>,
1428        ) -> std::result::Result<
1429            tonic::Response<tonic::codec::Streaming<super::StreamingResponse>>,
1430            tonic::Status,
1431        > {
1432            self.inner
1433                .ready()
1434                .await
1435                .map_err(|e| {
1436                    tonic::Status::new(
1437                        tonic::Code::Unknown,
1438                        format!("Service was not ready: {}", e.into()),
1439                    )
1440                })?;
1441            let codec = tonic::codec::ProstCodec::default();
1442            let path = http::uri::PathAndQuery::from_static(
1443                "/speechkit.stt.v3.AsyncRecognizer/GetRecognition",
1444            );
1445            let mut req = request.into_request();
1446            req.extensions_mut()
1447                .insert(
1448                    GrpcMethod::new("speechkit.stt.v3.AsyncRecognizer", "GetRecognition"),
1449                );
1450            self.inner.server_streaming(req, path, codec).await
1451        }
1452        pub async fn delete_recognition(
1453            &mut self,
1454            request: impl tonic::IntoRequest<super::DeleteRecognitionRequest>,
1455        ) -> std::result::Result<tonic::Response<()>, tonic::Status> {
1456            self.inner
1457                .ready()
1458                .await
1459                .map_err(|e| {
1460                    tonic::Status::new(
1461                        tonic::Code::Unknown,
1462                        format!("Service was not ready: {}", e.into()),
1463                    )
1464                })?;
1465            let codec = tonic::codec::ProstCodec::default();
1466            let path = http::uri::PathAndQuery::from_static(
1467                "/speechkit.stt.v3.AsyncRecognizer/DeleteRecognition",
1468            );
1469            let mut req = request.into_request();
1470            req.extensions_mut()
1471                .insert(
1472                    GrpcMethod::new(
1473                        "speechkit.stt.v3.AsyncRecognizer",
1474                        "DeleteRecognition",
1475                    ),
1476                );
1477            self.inner.unary(req, path, codec).await
1478        }
1479    }
1480}