yandex-cloud 2025.4.14

Generated gRPC clients for the Yandex Cloud API
Documentation
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DescriptiveStatistics {
    /// Minimum observed value
    #[prost(double, tag = "1")]
    pub min: f64,
    /// Maximum observed value
    #[prost(double, tag = "2")]
    pub max: f64,
    /// Estimated mean of distribution
    #[prost(double, tag = "3")]
    pub mean: f64,
    /// Estimated standard deviation of distribution
    #[prost(double, tag = "4")]
    pub std: f64,
    /// List of evaluated quantiles
    #[prost(message, repeated, tag = "5")]
    pub quantiles: ::prost::alloc::vec::Vec<Quantile>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Quantile {
    /// Quantile level in range (0, 1)
    #[prost(double, tag = "1")]
    pub level: f64,
    /// Quantile value
    #[prost(double, tag = "2")]
    pub value: f64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AudioSegmentBoundaries {
    /// Audio segment start time
    #[prost(int64, tag = "1")]
    pub start_time_ms: i64,
    /// Audio segment end time
    #[prost(int64, tag = "2")]
    pub end_time_ms: i64,
    /// Duration in seconds
    #[prost(int64, tag = "3")]
    pub duration_seconds: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct UtteranceStatistics {
    #[prost(string, tag = "1")]
    pub speaker_tag: ::prost::alloc::string::String,
    /// Audio segment boundaries
    #[prost(message, optional, tag = "2")]
    pub speech_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
    /// Total speech duration
    #[prost(int64, tag = "3")]
    pub total_speech_ms: i64,
    /// Speech ratio within audio segment
    #[prost(double, tag = "4")]
    pub speech_ratio: f64,
    /// Total silence duration
    #[prost(int64, tag = "5")]
    pub total_silence_ms: i64,
    /// Silence ratio within audio segment
    #[prost(double, tag = "6")]
    pub silence_ratio: f64,
    /// Number of words in recognized speech
    #[prost(int64, tag = "7")]
    pub words_count: i64,
    /// Number of letters in recognized speech
    #[prost(int64, tag = "8")]
    pub letters_count: i64,
    /// Descriptive statistics for words per second distribution
    #[prost(message, optional, tag = "9")]
    pub words_per_second: ::core::option::Option<DescriptiveStatistics>,
    /// Descriptive statistics for letters per second distribution
    #[prost(message, optional, tag = "10")]
    pub letters_per_second: ::core::option::Option<DescriptiveStatistics>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeakerStatistics {
    /// Speaker tag
    #[prost(string, tag = "1")]
    pub speaker_tag: ::prost::alloc::string::String,
    /// analysis of all phrases in format of single utterance
    #[prost(message, optional, tag = "2")]
    pub complete_statistics: ::core::option::Option<UtteranceStatistics>,
    /// Descriptive statistics for words per utterance distribution
    #[prost(message, optional, tag = "3")]
    pub words_per_utterance: ::core::option::Option<DescriptiveStatistics>,
    /// Descriptive statistics for letters per utterance distribution
    #[prost(message, optional, tag = "4")]
    pub letters_per_utterance: ::core::option::Option<DescriptiveStatistics>,
    /// Number of utterances
    #[prost(int64, tag = "5")]
    pub utterance_count: i64,
    /// Descriptive statistics for utterance duration distribution
    #[prost(message, optional, tag = "6")]
    pub utterance_duration_estimation: ::core::option::Option<DescriptiveStatistics>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ConversationStatistics {
    /// Audio segment boundaries
    #[prost(message, optional, tag = "1")]
    pub conversation_boundaries: ::core::option::Option<AudioSegmentBoundaries>,
    /// Average statistics for each speaker
    #[prost(message, repeated, tag = "2")]
    pub speaker_statistics: ::prost::alloc::vec::Vec<SpeakerStatistics>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InterruptsStatistics {
    /// Interrupts description for every speaker
    #[prost(message, repeated, tag = "1")]
    pub speaker_interrupts: ::prost::alloc::vec::Vec<InterruptsEvaluation>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InterruptsEvaluation {
    /// Speaker tag
    #[prost(string, tag = "1")]
    pub speaker_tag: ::prost::alloc::string::String,
    /// Number of interrupts made by the speaker
    #[prost(int64, tag = "2")]
    pub interrupts_count: i64,
    /// Total duration of all interrupts
    #[prost(int64, tag = "3")]
    pub interrupts_duration_ms: i64,
    /// Boundaries for every interrupt
    #[prost(message, repeated, tag = "4")]
    pub interrupts: ::prost::alloc::vec::Vec<AudioSegmentBoundaries>,
    /// Total duration of all interrupts in seconds
    #[prost(int64, tag = "5")]
    pub interrupts_duration_seconds: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Points {
    #[prost(message, repeated, tag = "1")]
    pub quiz: ::prost::alloc::vec::Vec<Quiz>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Quiz {
    #[prost(string, tag = "1")]
    pub request: ::prost::alloc::string::String,
    #[prost(message, optional, tag = "2")]
    pub response: ::core::option::Option<::prost::alloc::string::String>,
    #[prost(string, tag = "3")]
    pub id: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionClassifierResult {
    /// Start time of the audio segment used for classification
    #[prost(int64, tag = "1")]
    pub start_time_ms: i64,
    /// End time of the audio segment used for classification
    #[prost(int64, tag = "2")]
    pub end_time_ms: i64,
    /// Name of the triggered classifier
    #[prost(string, tag = "3")]
    pub classifier: ::prost::alloc::string::String,
    /// List of highlights, i.e. parts of phrase that determine the result of the classification
    #[prost(message, repeated, tag = "4")]
    pub highlights: ::prost::alloc::vec::Vec<PhraseHighlight>,
    /// Classifier predictions
    #[prost(message, repeated, tag = "5")]
    pub labels: ::prost::alloc::vec::Vec<RecognitionClassifierLabel>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PhraseHighlight {
    /// Text transcription of the highlighted audio segment
    #[prost(string, tag = "1")]
    pub text: ::prost::alloc::string::String,
    /// offset in symbols from the beginning of whole phrase where highlight begins
    #[prost(int64, tag = "2")]
    pub offset: i64,
    /// count of symbols in highlighted text
    #[prost(int64, tag = "3")]
    pub count: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionClassifierLabel {
    /// The label of the class predicted by the classifier
    #[prost(string, tag = "1")]
    pub label: ::prost::alloc::string::String,
    /// The prediction confidence
    #[prost(double, tag = "2")]
    pub confidence: f64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SilenceStatistics {
    #[prost(int64, tag = "1")]
    pub total_simultaneous_silence_duration_ms: i64,
    /// Simultaneous silence ratio within audio segment
    #[prost(double, tag = "2")]
    pub total_simultaneous_silence_ratio: f64,
    /// Descriptive statistics for simultaneous silence duration distribution
    #[prost(message, optional, tag = "3")]
    pub simultaneous_silence_duration_estimation: ::core::option::Option<
        DescriptiveStatistics,
    >,
    #[prost(int64, tag = "4")]
    pub total_simultaneous_silence_duration_seconds: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeechStatistics {
    /// Total simultaneous speech duration in seconds
    #[prost(int64, tag = "1")]
    pub total_simultaneous_speech_duration_seconds: i64,
    /// Total simultaneous speech duration in ms
    #[prost(int64, tag = "2")]
    pub total_simultaneous_speech_duration_ms: i64,
    /// Simultaneous speech ratio within audio segment
    #[prost(double, tag = "3")]
    pub total_simultaneous_speech_ratio: f64,
    /// Descriptive statistics for simultaneous speech duration distribution
    #[prost(message, optional, tag = "4")]
    pub simultaneous_speech_duration_estimation: ::core::option::Option<
        DescriptiveStatistics,
    >,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Summarization {
    #[prost(message, repeated, tag = "1")]
    pub statements: ::prost::alloc::vec::Vec<SummarizationStatement>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SummarizationStatement {
    #[prost(message, optional, tag = "1")]
    pub field: ::core::option::Option<SummarizationField>,
    #[prost(string, repeated, tag = "2")]
    pub response: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SummarizationField {
    #[prost(string, tag = "3")]
    pub id: ::prost::alloc::string::String,
    #[prost(string, tag = "1")]
    pub name: ::prost::alloc::string::String,
    #[prost(enumeration = "SummarizationFieldType", tag = "2")]
    pub r#type: i32,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum SummarizationFieldType {
    Unspecified = 0,
    Text = 1,
    TextArray = 2,
}
impl SummarizationFieldType {
    /// String value of the enum field names used in the ProtoBuf definition.
    ///
    /// The values are not transformed in any way and thus are considered stable
    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
    pub fn as_str_name(&self) -> &'static str {
        match self {
            SummarizationFieldType::Unspecified => "SUMMARIZATION_FIELD_TYPE_UNSPECIFIED",
            SummarizationFieldType::Text => "TEXT",
            SummarizationFieldType::TextArray => "TEXT_ARRAY",
        }
    }
    /// Creates an enum from field names used in the ProtoBuf definition.
    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
        match value {
            "SUMMARIZATION_FIELD_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
            "TEXT" => Some(Self::Text),
            "TEXT_ARRAY" => Some(Self::TextArray),
            _ => None,
        }
    }
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TextClassifiers {
    #[prost(message, repeated, tag = "1")]
    pub classification_result: ::prost::alloc::vec::Vec<ClassificationResult>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ClassificationResult {
    /// Classifier name
    #[prost(string, tag = "1")]
    pub classifier: ::prost::alloc::string::String,
    /// Classifier statistics
    #[prost(message, repeated, tag = "2")]
    pub classifier_statistics: ::prost::alloc::vec::Vec<ClassifierStatistics>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ClassifierStatistics {
    /// Channel number, null for whole talk
    #[prost(message, optional, tag = "1")]
    pub channel_number: ::core::option::Option<i64>,
    /// classifier total count
    #[prost(int64, tag = "2")]
    pub total_count: i64,
    /// Represents various histograms build on top of classifiers
    #[prost(message, repeated, tag = "3")]
    pub histograms: ::prost::alloc::vec::Vec<Histogram>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Histogram {
    /// histogram count values. For example:
    /// if len(count_values) = 2, it means that histogram is 50/50,
    /// if len(count_values) = 3 - \[0\] value represents first third, \[1\] - second third, \[2\] - last third, etc.
    #[prost(int64, repeated, tag = "1")]
    pub count_values: ::prost::alloc::vec::Vec<i64>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Transcription {
    #[prost(message, repeated, tag = "1")]
    pub phrases: ::prost::alloc::vec::Vec<Phrase>,
    /// Their might be several algorithms that work on talk transcription. For example: speechkit and translator
    /// So there might be other fields here for tracing
    #[prost(message, repeated, tag = "2")]
    pub algorithms_metadata: ::prost::alloc::vec::Vec<AlgorithmMetadata>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Phrase {
    #[prost(int64, tag = "1")]
    pub channel_number: i64,
    #[prost(int64, tag = "2")]
    pub start_time_ms: i64,
    #[prost(int64, tag = "3")]
    pub end_time_ms: i64,
    #[prost(message, optional, tag = "4")]
    pub phrase: ::core::option::Option<PhraseText>,
    #[prost(message, optional, tag = "5")]
    pub statistics: ::core::option::Option<PhraseStatistics>,
    #[prost(message, repeated, tag = "6")]
    pub classifiers: ::prost::alloc::vec::Vec<RecognitionClassifierResult>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PhraseText {
    #[prost(string, tag = "1")]
    pub text: ::prost::alloc::string::String,
    #[prost(string, tag = "2")]
    pub language: ::prost::alloc::string::String,
    #[prost(string, tag = "3")]
    pub normalized_text: ::prost::alloc::string::String,
    #[prost(message, repeated, tag = "4")]
    pub words: ::prost::alloc::vec::Vec<Word>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Word {
    #[prost(string, tag = "1")]
    pub word: ::prost::alloc::string::String,
    #[prost(int64, tag = "2")]
    pub start_time_ms: i64,
    #[prost(int64, tag = "3")]
    pub end_time_ms: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AlgorithmMetadata {
    #[prost(message, optional, tag = "1")]
    pub created_task_date: ::core::option::Option<::prost_types::Timestamp>,
    #[prost(message, optional, tag = "2")]
    pub completed_task_date: ::core::option::Option<::prost_types::Timestamp>,
    #[prost(message, optional, tag = "3")]
    pub error: ::core::option::Option<Error>,
    #[prost(string, tag = "4")]
    pub trace_id: ::prost::alloc::string::String,
    #[prost(string, tag = "5")]
    pub name: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Error {
    #[prost(string, tag = "1")]
    pub code: ::prost::alloc::string::String,
    #[prost(string, tag = "2")]
    pub message: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PhraseStatistics {
    #[prost(message, optional, tag = "1")]
    pub statistics: ::core::option::Option<UtteranceStatistics>,
}