#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TextNormalizationOptions {
#[prost(enumeration = "text_normalization_options::TextNormalization", tag = "1")]
pub text_normalization: i32,
#[prost(bool, tag = "2")]
pub profanity_filter: bool,
#[prost(bool, tag = "3")]
pub literature_text: bool,
#[prost(enumeration = "text_normalization_options::PhoneFormattingMode", tag = "4")]
pub phone_formatting_mode: i32,
}
pub mod text_normalization_options {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum TextNormalization {
Unspecified = 0,
Enabled = 1,
Disabled = 2,
}
impl TextNormalization {
pub fn as_str_name(&self) -> &'static str {
match self {
TextNormalization::Unspecified => "TEXT_NORMALIZATION_UNSPECIFIED",
TextNormalization::Enabled => "TEXT_NORMALIZATION_ENABLED",
TextNormalization::Disabled => "TEXT_NORMALIZATION_DISABLED",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"TEXT_NORMALIZATION_UNSPECIFIED" => Some(Self::Unspecified),
"TEXT_NORMALIZATION_ENABLED" => Some(Self::Enabled),
"TEXT_NORMALIZATION_DISABLED" => Some(Self::Disabled),
_ => None,
}
}
}
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum PhoneFormattingMode {
Unspecified = 0,
Disabled = 1,
}
impl PhoneFormattingMode {
pub fn as_str_name(&self) -> &'static str {
match self {
PhoneFormattingMode::Unspecified => "PHONE_FORMATTING_MODE_UNSPECIFIED",
PhoneFormattingMode::Disabled => "PHONE_FORMATTING_MODE_DISABLED",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"PHONE_FORMATTING_MODE_UNSPECIFIED" => Some(Self::Unspecified),
"PHONE_FORMATTING_MODE_DISABLED" => Some(Self::Disabled),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DefaultEouClassifier {
#[prost(enumeration = "default_eou_classifier::EouSensitivity", tag = "1")]
pub r#type: i32,
#[prost(int64, tag = "2")]
pub max_pause_between_words_hint_ms: i64,
}
pub mod default_eou_classifier {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum EouSensitivity {
Unspecified = 0,
Default = 1,
High = 2,
}
impl EouSensitivity {
pub fn as_str_name(&self) -> &'static str {
match self {
EouSensitivity::Unspecified => "EOU_SENSITIVITY_UNSPECIFIED",
EouSensitivity::Default => "DEFAULT",
EouSensitivity::High => "HIGH",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"EOU_SENSITIVITY_UNSPECIFIED" => Some(Self::Unspecified),
"DEFAULT" => Some(Self::Default),
"HIGH" => Some(Self::High),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExternalEouClassifier {}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EouClassifierOptions {
#[prost(oneof = "eou_classifier_options::Classifier", tags = "1, 2")]
pub classifier: ::core::option::Option<eou_classifier_options::Classifier>,
}
pub mod eou_classifier_options {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Classifier {
#[prost(message, tag = "1")]
DefaultClassifier(super::DefaultEouClassifier),
#[prost(message, tag = "2")]
ExternalClassifier(super::ExternalEouClassifier),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionClassifier {
#[prost(string, tag = "1")]
pub classifier: ::prost::alloc::string::String,
#[prost(enumeration = "recognition_classifier::TriggerType", repeated, tag = "2")]
pub triggers: ::prost::alloc::vec::Vec<i32>,
}
pub mod recognition_classifier {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum TriggerType {
Unspecified = 0,
OnUtterance = 1,
OnFinal = 2,
OnPartial = 3,
}
impl TriggerType {
pub fn as_str_name(&self) -> &'static str {
match self {
TriggerType::Unspecified => "TRIGGER_TYPE_UNSPECIFIED",
TriggerType::OnUtterance => "ON_UTTERANCE",
TriggerType::OnFinal => "ON_FINAL",
TriggerType::OnPartial => "ON_PARTIAL",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"TRIGGER_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"ON_UTTERANCE" => Some(Self::OnUtterance),
"ON_FINAL" => Some(Self::OnFinal),
"ON_PARTIAL" => Some(Self::OnPartial),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionClassifierOptions {
#[prost(message, repeated, tag = "1")]
pub classifiers: ::prost::alloc::vec::Vec<RecognitionClassifier>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeechAnalysisOptions {
#[prost(bool, tag = "1")]
pub enable_speaker_analysis: bool,
#[prost(bool, tag = "2")]
pub enable_conversation_analysis: bool,
#[prost(double, repeated, tag = "3")]
pub descriptive_statistics_quantiles: ::prost::alloc::vec::Vec<f64>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RawAudio {
#[prost(enumeration = "raw_audio::AudioEncoding", tag = "1")]
pub audio_encoding: i32,
#[prost(int64, tag = "2")]
pub sample_rate_hertz: i64,
#[prost(int64, tag = "3")]
pub audio_channel_count: i64,
}
pub mod raw_audio {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum AudioEncoding {
Unspecified = 0,
Linear16Pcm = 1,
}
impl AudioEncoding {
pub fn as_str_name(&self) -> &'static str {
match self {
AudioEncoding::Unspecified => "AUDIO_ENCODING_UNSPECIFIED",
AudioEncoding::Linear16Pcm => "LINEAR16_PCM",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"AUDIO_ENCODING_UNSPECIFIED" => Some(Self::Unspecified),
"LINEAR16_PCM" => Some(Self::Linear16Pcm),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ContainerAudio {
#[prost(enumeration = "container_audio::ContainerAudioType", tag = "1")]
pub container_audio_type: i32,
}
pub mod container_audio {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum ContainerAudioType {
Unspecified = 0,
Wav = 1,
OggOpus = 2,
Mp3 = 3,
}
impl ContainerAudioType {
pub fn as_str_name(&self) -> &'static str {
match self {
ContainerAudioType::Unspecified => "CONTAINER_AUDIO_TYPE_UNSPECIFIED",
ContainerAudioType::Wav => "WAV",
ContainerAudioType::OggOpus => "OGG_OPUS",
ContainerAudioType::Mp3 => "MP3",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"CONTAINER_AUDIO_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"WAV" => Some(Self::Wav),
"OGG_OPUS" => Some(Self::OggOpus),
"MP3" => Some(Self::Mp3),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AudioFormatOptions {
#[prost(oneof = "audio_format_options::AudioFormat", tags = "1, 2")]
pub audio_format: ::core::option::Option<audio_format_options::AudioFormat>,
}
pub mod audio_format_options {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum AudioFormat {
#[prost(message, tag = "1")]
RawAudio(super::RawAudio),
#[prost(message, tag = "2")]
ContainerAudio(super::ContainerAudio),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LanguageRestrictionOptions {
#[prost(
enumeration = "language_restriction_options::LanguageRestrictionType",
tag = "1"
)]
pub restriction_type: i32,
#[prost(string, repeated, tag = "2")]
pub language_code: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
pub mod language_restriction_options {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum LanguageRestrictionType {
Unspecified = 0,
Whitelist = 1,
Blacklist = 2,
}
impl LanguageRestrictionType {
pub fn as_str_name(&self) -> &'static str {
match self {
LanguageRestrictionType::Unspecified => {
"LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED"
}
LanguageRestrictionType::Whitelist => "WHITELIST",
LanguageRestrictionType::Blacklist => "BLACKLIST",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"WHITELIST" => Some(Self::Whitelist),
"BLACKLIST" => Some(Self::Blacklist),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionModelOptions {
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub audio_format: ::core::option::Option<AudioFormatOptions>,
#[prost(message, optional, tag = "3")]
pub text_normalization: ::core::option::Option<TextNormalizationOptions>,
#[prost(message, optional, tag = "4")]
pub language_restriction: ::core::option::Option<LanguageRestrictionOptions>,
#[prost(enumeration = "recognition_model_options::AudioProcessingType", tag = "5")]
pub audio_processing_type: i32,
}
pub mod recognition_model_options {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum AudioProcessingType {
Unspecified = 0,
RealTime = 1,
FullData = 2,
}
impl AudioProcessingType {
pub fn as_str_name(&self) -> &'static str {
match self {
AudioProcessingType::Unspecified => "AUDIO_PROCESSING_TYPE_UNSPECIFIED",
AudioProcessingType::RealTime => "REAL_TIME",
AudioProcessingType::FullData => "FULL_DATA",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"AUDIO_PROCESSING_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"REAL_TIME" => Some(Self::RealTime),
"FULL_DATA" => Some(Self::FullData),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeakerLabelingOptions {
#[prost(enumeration = "speaker_labeling_options::SpeakerLabeling", tag = "1")]
pub speaker_labeling: i32,
}
pub mod speaker_labeling_options {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum SpeakerLabeling {
Unspecified = 0,
Enabled = 1,
Disabled = 2,
}
impl SpeakerLabeling {
pub fn as_str_name(&self) -> &'static str {
match self {
SpeakerLabeling::Unspecified => "SPEAKER_LABELING_UNSPECIFIED",
SpeakerLabeling::Enabled => "SPEAKER_LABELING_ENABLED",
SpeakerLabeling::Disabled => "SPEAKER_LABELING_DISABLED",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"SPEAKER_LABELING_UNSPECIFIED" => Some(Self::Unspecified),
"SPEAKER_LABELING_ENABLED" => Some(Self::Enabled),
"SPEAKER_LABELING_DISABLED" => Some(Self::Disabled),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingOptions {
#[prost(message, optional, tag = "1")]
pub recognition_model: ::core::option::Option<RecognitionModelOptions>,
#[prost(message, optional, tag = "2")]
pub eou_classifier: ::core::option::Option<EouClassifierOptions>,
#[prost(message, optional, tag = "3")]
pub recognition_classifier: ::core::option::Option<RecognitionClassifierOptions>,
#[prost(message, optional, tag = "4")]
pub speech_analysis: ::core::option::Option<SpeechAnalysisOptions>,
#[prost(message, optional, tag = "5")]
pub speaker_labeling: ::core::option::Option<SpeakerLabelingOptions>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AudioChunk {
#[prost(bytes = "vec", tag = "1")]
pub data: ::prost::alloc::vec::Vec<u8>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SilenceChunk {
#[prost(int64, tag = "1")]
pub duration_ms: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Eou {}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingRequest {
#[prost(oneof = "streaming_request::Event", tags = "1, 2, 3, 4")]
pub event: ::core::option::Option<streaming_request::Event>,
}
pub mod streaming_request {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Event {
#[prost(message, tag = "1")]
SessionOptions(super::StreamingOptions),
#[prost(message, tag = "2")]
Chunk(super::AudioChunk),
#[prost(message, tag = "3")]
SilenceChunk(super::SilenceChunk),
#[prost(message, tag = "4")]
Eou(super::Eou),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognizeFileRequest {
#[prost(message, optional, tag = "3")]
pub recognition_model: ::core::option::Option<RecognitionModelOptions>,
#[prost(message, optional, tag = "4")]
pub recognition_classifier: ::core::option::Option<RecognitionClassifierOptions>,
#[prost(message, optional, tag = "5")]
pub speech_analysis: ::core::option::Option<SpeechAnalysisOptions>,
#[prost(message, optional, tag = "6")]
pub speaker_labeling: ::core::option::Option<SpeakerLabelingOptions>,
#[prost(oneof = "recognize_file_request::AudioSource", tags = "1, 2")]
pub audio_source: ::core::option::Option<recognize_file_request::AudioSource>,
}
pub mod recognize_file_request {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum AudioSource {
#[prost(bytes, tag = "1")]
Content(::prost::alloc::vec::Vec<u8>),
#[prost(string, tag = "2")]
Uri(::prost::alloc::string::String),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Word {
#[prost(string, tag = "1")]
pub text: ::prost::alloc::string::String,
#[prost(int64, tag = "2")]
pub start_time_ms: i64,
#[prost(int64, tag = "3")]
pub end_time_ms: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LanguageEstimation {
#[prost(string, tag = "1")]
pub language_code: ::prost::alloc::string::String,
#[prost(double, tag = "2")]
pub probability: f64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Alternative {
#[prost(message, repeated, tag = "1")]
pub words: ::prost::alloc::vec::Vec<Word>,
#[prost(string, tag = "2")]
pub text: ::prost::alloc::string::String,
#[prost(int64, tag = "3")]
pub start_time_ms: i64,
#[prost(int64, tag = "4")]
pub end_time_ms: i64,
#[prost(double, tag = "5")]
pub confidence: f64,
#[prost(message, repeated, tag = "6")]
pub languages: ::prost::alloc::vec::Vec<LanguageEstimation>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EouUpdate {
#[prost(int64, tag = "2")]
pub time_ms: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AlternativeUpdate {
#[prost(message, repeated, tag = "1")]
pub alternatives: ::prost::alloc::vec::Vec<Alternative>,
#[deprecated]
#[prost(string, tag = "2")]
pub channel_tag: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AudioCursors {
#[prost(int64, tag = "1")]
pub received_data_ms: i64,
#[prost(int64, tag = "2")]
pub reset_time_ms: i64,
#[prost(int64, tag = "3")]
pub partial_time_ms: i64,
#[prost(int64, tag = "4")]
pub final_time_ms: i64,
#[prost(int64, tag = "5")]
pub final_index: i64,
#[prost(int64, tag = "6")]
pub eou_time_ms: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FinalRefinement {
#[prost(int64, tag = "1")]
pub final_index: i64,
#[prost(oneof = "final_refinement::Type", tags = "2")]
pub r#type: ::core::option::Option<final_refinement::Type>,
}
pub mod final_refinement {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Type {
#[prost(message, tag = "2")]
NormalizedText(super::AlternativeUpdate),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatusCode {
#[prost(enumeration = "CodeType", tag = "1")]
pub code_type: i32,
#[prost(string, tag = "2")]
pub message: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SessionUuid {
#[prost(string, tag = "1")]
pub uuid: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub user_request_id: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PhraseHighlight {
#[prost(string, tag = "1")]
pub text: ::prost::alloc::string::String,
#[prost(int64, tag = "2")]
pub start_time_ms: i64,
#[prost(int64, tag = "3")]
pub end_time_ms: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionClassifierLabel {
#[prost(string, tag = "1")]
pub label: ::prost::alloc::string::String,
#[prost(double, tag = "2")]
pub confidence: f64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionClassifierResult {
#[prost(string, tag = "1")]
pub classifier: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "2")]
pub highlights: ::prost::alloc::vec::Vec<PhraseHighlight>,
#[prost(message, repeated, tag = "3")]
pub labels: ::prost::alloc::vec::Vec<RecognitionClassifierLabel>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionClassifierUpdate {
#[prost(enumeration = "recognition_classifier_update::WindowType", tag = "1")]
pub window_type: i32,
#[prost(int64, tag = "2")]
pub start_time_ms: i64,
#[prost(int64, tag = "3")]
pub end_time_ms: i64,
#[prost(message, optional, tag = "4")]
pub classifier_result: ::core::option::Option<RecognitionClassifierResult>,
}
pub mod recognition_classifier_update {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum WindowType {
Unspecified = 0,
LastUtterance = 1,
LastFinal = 2,
LastPartial = 3,
}
impl WindowType {
pub fn as_str_name(&self) -> &'static str {
match self {
WindowType::Unspecified => "WINDOW_TYPE_UNSPECIFIED",
WindowType::LastUtterance => "LAST_UTTERANCE",
WindowType::LastFinal => "LAST_FINAL",
WindowType::LastPartial => "LAST_PARTIAL",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"WINDOW_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"LAST_UTTERANCE" => Some(Self::LastUtterance),
"LAST_FINAL" => Some(Self::LastFinal),
"LAST_PARTIAL" => Some(Self::LastPartial),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DescriptiveStatistics {
#[prost(double, tag = "1")]
pub min: f64,
#[prost(double, tag = "2")]
pub max: f64,
#[prost(double, tag = "3")]
pub mean: f64,
#[prost(double, tag = "4")]
pub std: f64,
#[prost(message, repeated, tag = "5")]
pub quantiles: ::prost::alloc::vec::Vec<descriptive_statistics::Quantile>,
}
pub mod descriptive_statistics {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Quantile {
#[prost(double, tag = "1")]
pub level: f64,
#[prost(double, tag = "2")]
pub value: f64,
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AudioSegmentBoundaries {
#[prost(int64, tag = "1")]
pub start_time_ms: i64,
#[prost(int64, tag = "2")]
pub end_time_ms: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeakerAnalysis {
#[prost(string, tag = "1")]
pub speaker_tag: ::prost::alloc::string::String,
#[prost(enumeration = "speaker_analysis::WindowType", tag = "2")]
pub window_type: i32,
#[prost(message, optional, tag = "3")]
pub speech_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
#[prost(int64, tag = "4")]
pub total_speech_ms: i64,
#[prost(double, tag = "5")]
pub speech_ratio: f64,
#[prost(int64, tag = "6")]
pub total_silence_ms: i64,
#[prost(double, tag = "7")]
pub silence_ratio: f64,
#[prost(int64, tag = "8")]
pub words_count: i64,
#[prost(int64, tag = "9")]
pub letters_count: i64,
#[prost(message, optional, tag = "10")]
pub words_per_second: ::core::option::Option<DescriptiveStatistics>,
#[prost(message, optional, tag = "11")]
pub letters_per_second: ::core::option::Option<DescriptiveStatistics>,
#[prost(message, optional, tag = "12")]
pub words_per_utterance: ::core::option::Option<DescriptiveStatistics>,
#[prost(message, optional, tag = "13")]
pub letters_per_utterance: ::core::option::Option<DescriptiveStatistics>,
#[prost(int64, tag = "14")]
pub utterance_count: i64,
#[prost(message, optional, tag = "15")]
pub utterance_duration_estimation: ::core::option::Option<DescriptiveStatistics>,
}
pub mod speaker_analysis {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum WindowType {
Unspecified = 0,
Total = 1,
LastUtterance = 2,
}
impl WindowType {
pub fn as_str_name(&self) -> &'static str {
match self {
WindowType::Unspecified => "WINDOW_TYPE_UNSPECIFIED",
WindowType::Total => "TOTAL",
WindowType::LastUtterance => "LAST_UTTERANCE",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"WINDOW_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"TOTAL" => Some(Self::Total),
"LAST_UTTERANCE" => Some(Self::LastUtterance),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ConversationAnalysis {
#[prost(message, optional, tag = "1")]
pub conversation_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
#[prost(int64, tag = "2")]
pub total_simultaneous_silence_duration_ms: i64,
#[prost(double, tag = "3")]
pub total_simultaneous_silence_ratio: f64,
#[prost(message, optional, tag = "4")]
pub simultaneous_silence_duration_estimation: ::core::option::Option<
DescriptiveStatistics,
>,
#[prost(int64, tag = "5")]
pub total_simultaneous_speech_duration_ms: i64,
#[prost(double, tag = "6")]
pub total_simultaneous_speech_ratio: f64,
#[prost(message, optional, tag = "7")]
pub simultaneous_speech_duration_estimation: ::core::option::Option<
DescriptiveStatistics,
>,
#[prost(message, repeated, tag = "8")]
pub speaker_interrupts: ::prost::alloc::vec::Vec<
conversation_analysis::InterruptsEvaluation,
>,
#[prost(int64, tag = "9")]
pub total_speech_duration_ms: i64,
#[prost(double, tag = "10")]
pub total_speech_ratio: f64,
}
pub mod conversation_analysis {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InterruptsEvaluation {
#[prost(string, tag = "1")]
pub speaker_tag: ::prost::alloc::string::String,
#[prost(int64, tag = "2")]
pub interrupts_count: i64,
#[prost(int64, tag = "3")]
pub interrupts_duration_ms: i64,
#[prost(message, repeated, tag = "4")]
pub interrupts: ::prost::alloc::vec::Vec<super::AudioSegmentBoundaries>,
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingResponse {
#[prost(message, optional, tag = "1")]
pub session_uuid: ::core::option::Option<SessionUuid>,
#[prost(message, optional, tag = "2")]
pub audio_cursors: ::core::option::Option<AudioCursors>,
#[prost(int64, tag = "3")]
pub response_wall_time_ms: i64,
#[prost(string, tag = "9")]
pub channel_tag: ::prost::alloc::string::String,
#[prost(oneof = "streaming_response::Event", tags = "4, 5, 6, 7, 8, 10, 11, 12")]
pub event: ::core::option::Option<streaming_response::Event>,
}
pub mod streaming_response {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Event {
#[prost(message, tag = "4")]
Partial(super::AlternativeUpdate),
#[prost(message, tag = "5")]
Final(super::AlternativeUpdate),
#[prost(message, tag = "6")]
EouUpdate(super::EouUpdate),
#[prost(message, tag = "7")]
FinalRefinement(super::FinalRefinement),
#[prost(message, tag = "8")]
StatusCode(super::StatusCode),
#[prost(message, tag = "10")]
ClassifierUpdate(super::RecognitionClassifierUpdate),
#[prost(message, tag = "11")]
SpeakerAnalysis(super::SpeakerAnalysis),
#[prost(message, tag = "12")]
ConversationAnalysis(super::ConversationAnalysis),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteRecognitionRequest {
#[prost(string, tag = "1")]
pub operation_id: ::prost::alloc::string::String,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum CodeType {
Unspecified = 0,
Working = 1,
Warning = 2,
Closed = 3,
}
impl CodeType {
pub fn as_str_name(&self) -> &'static str {
match self {
CodeType::Unspecified => "CODE_TYPE_UNSPECIFIED",
CodeType::Working => "WORKING",
CodeType::Warning => "WARNING",
CodeType::Closed => "CLOSED",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"CODE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"WORKING" => Some(Self::Working),
"WARNING" => Some(Self::Warning),
"CLOSED" => Some(Self::Closed),
_ => None,
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetRecognitionRequest {
#[prost(string, tag = "1")]
pub operation_id: ::prost::alloc::string::String,
}
pub mod recognizer_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
#[derive(Debug, Clone)]
pub struct RecognizerClient<T> {
inner: tonic::client::Grpc<T>,
}
impl RecognizerClient<tonic::transport::Channel> {
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> RecognizerClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> RecognizerClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
RecognizerClient::new(InterceptedService::new(inner, interceptor))
}
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_decoding_message_size(limit);
self
}
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
pub async fn recognize_streaming(
&mut self,
request: impl tonic::IntoStreamingRequest<Message = super::StreamingRequest>,
) -> std::result::Result<
tonic::Response<tonic::codec::Streaming<super::StreamingResponse>>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/speechkit.stt.v3.Recognizer/RecognizeStreaming",
);
let mut req = request.into_streaming_request();
req.extensions_mut()
.insert(
GrpcMethod::new("speechkit.stt.v3.Recognizer", "RecognizeStreaming"),
);
self.inner.streaming(req, path, codec).await
}
}
}
pub mod async_recognizer_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
#[derive(Debug, Clone)]
pub struct AsyncRecognizerClient<T> {
inner: tonic::client::Grpc<T>,
}
impl AsyncRecognizerClient<tonic::transport::Channel> {
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> AsyncRecognizerClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> AsyncRecognizerClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
AsyncRecognizerClient::new(InterceptedService::new(inner, interceptor))
}
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_decoding_message_size(limit);
self
}
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
pub async fn recognize_file(
&mut self,
request: impl tonic::IntoRequest<super::RecognizeFileRequest>,
) -> std::result::Result<
tonic::Response<
super::super::super::super::yandex::cloud::operation::Operation,
>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/speechkit.stt.v3.AsyncRecognizer/RecognizeFile",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new("speechkit.stt.v3.AsyncRecognizer", "RecognizeFile"),
);
self.inner.unary(req, path, codec).await
}
pub async fn get_recognition(
&mut self,
request: impl tonic::IntoRequest<super::GetRecognitionRequest>,
) -> std::result::Result<
tonic::Response<tonic::codec::Streaming<super::StreamingResponse>>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/speechkit.stt.v3.AsyncRecognizer/GetRecognition",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new("speechkit.stt.v3.AsyncRecognizer", "GetRecognition"),
);
self.inner.server_streaming(req, path, codec).await
}
pub async fn delete_recognition(
&mut self,
request: impl tonic::IntoRequest<super::DeleteRecognitionRequest>,
) -> std::result::Result<tonic::Response<()>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/speechkit.stt.v3.AsyncRecognizer/DeleteRecognition",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"speechkit.stt.v3.AsyncRecognizer",
"DeleteRecognition",
),
);
self.inner.unary(req, path, codec).await
}
}
}