#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LongRunningRecognitionRequest {
#[prost(message, optional, tag = "1")]
pub config: ::core::option::Option<RecognitionConfig>,
#[prost(message, optional, tag = "2")]
pub audio: ::core::option::Option<RecognitionAudio>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LongRunningRecognitionResponse {
#[prost(message, repeated, tag = "1")]
pub chunks: ::prost::alloc::vec::Vec<SpeechRecognitionResult>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingRecognitionRequest {
#[prost(oneof = "streaming_recognition_request::StreamingRequest", tags = "1, 2")]
pub streaming_request: ::core::option::Option<
streaming_recognition_request::StreamingRequest,
>,
}
pub mod streaming_recognition_request {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum StreamingRequest {
#[prost(message, tag = "1")]
Config(super::RecognitionConfig),
#[prost(bytes, tag = "2")]
AudioContent(::prost::alloc::vec::Vec<u8>),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingRecognitionResponse {
#[prost(message, repeated, tag = "1")]
pub chunks: ::prost::alloc::vec::Vec<SpeechRecognitionChunk>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionAudio {
#[prost(oneof = "recognition_audio::AudioSource", tags = "1, 2")]
pub audio_source: ::core::option::Option<recognition_audio::AudioSource>,
}
pub mod recognition_audio {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum AudioSource {
#[prost(bytes, tag = "1")]
Content(::prost::alloc::vec::Vec<u8>),
#[prost(string, tag = "2")]
Uri(::prost::alloc::string::String),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionConfig {
#[prost(message, optional, tag = "1")]
pub specification: ::core::option::Option<RecognitionSpec>,
#[prost(string, tag = "2")]
pub folder_id: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RecognitionSpec {
#[prost(enumeration = "recognition_spec::AudioEncoding", tag = "1")]
pub audio_encoding: i32,
#[prost(int64, tag = "2")]
pub sample_rate_hertz: i64,
#[prost(string, tag = "3")]
pub language_code: ::prost::alloc::string::String,
#[prost(bool, tag = "4")]
pub profanity_filter: bool,
#[prost(string, tag = "5")]
pub model: ::prost::alloc::string::String,
#[prost(bool, tag = "7")]
pub partial_results: bool,
#[prost(bool, tag = "8")]
pub single_utterance: bool,
#[prost(int64, tag = "9")]
pub audio_channel_count: i64,
#[prost(bool, tag = "10")]
pub raw_results: bool,
#[prost(bool, tag = "11")]
pub literature_text: bool,
}
pub mod recognition_spec {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum AudioEncoding {
Unspecified = 0,
Linear16Pcm = 1,
OggOpus = 2,
Mp3 = 3,
}
impl AudioEncoding {
pub fn as_str_name(&self) -> &'static str {
match self {
AudioEncoding::Unspecified => "AUDIO_ENCODING_UNSPECIFIED",
AudioEncoding::Linear16Pcm => "LINEAR16_PCM",
AudioEncoding::OggOpus => "OGG_OPUS",
AudioEncoding::Mp3 => "MP3",
}
}
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"AUDIO_ENCODING_UNSPECIFIED" => Some(Self::Unspecified),
"LINEAR16_PCM" => Some(Self::Linear16Pcm),
"OGG_OPUS" => Some(Self::OggOpus),
"MP3" => Some(Self::Mp3),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeechRecognitionChunk {
#[prost(message, repeated, tag = "1")]
pub alternatives: ::prost::alloc::vec::Vec<SpeechRecognitionAlternative>,
#[prost(bool, tag = "2")]
pub r#final: bool,
#[prost(bool, tag = "3")]
pub end_of_utterance: bool,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeechRecognitionResult {
#[prost(message, repeated, tag = "1")]
pub alternatives: ::prost::alloc::vec::Vec<SpeechRecognitionAlternative>,
#[prost(int64, tag = "2")]
pub channel_tag: i64,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpeechRecognitionAlternative {
#[prost(string, tag = "1")]
pub text: ::prost::alloc::string::String,
#[prost(float, tag = "2")]
pub confidence: f32,
#[prost(message, repeated, tag = "3")]
pub words: ::prost::alloc::vec::Vec<WordInfo>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WordInfo {
#[prost(message, optional, tag = "1")]
pub start_time: ::core::option::Option<::prost_types::Duration>,
#[prost(message, optional, tag = "2")]
pub end_time: ::core::option::Option<::prost_types::Duration>,
#[prost(string, tag = "3")]
pub word: ::prost::alloc::string::String,
#[prost(float, tag = "4")]
pub confidence: f32,
}
pub mod stt_service_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
#[derive(Debug, Clone)]
pub struct SttServiceClient<T> {
inner: tonic::client::Grpc<T>,
}
impl SttServiceClient<tonic::transport::Channel> {
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> SttServiceClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> SttServiceClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
SttServiceClient::new(InterceptedService::new(inner, interceptor))
}
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_decoding_message_size(limit);
self
}
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
pub async fn long_running_recognize(
&mut self,
request: impl tonic::IntoRequest<super::LongRunningRecognitionRequest>,
) -> std::result::Result<
tonic::Response<super::super::super::super::operation::Operation>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/yandex.cloud.ai.stt.v2.SttService/LongRunningRecognize",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"yandex.cloud.ai.stt.v2.SttService",
"LongRunningRecognize",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn streaming_recognize(
&mut self,
request: impl tonic::IntoStreamingRequest<
Message = super::StreamingRecognitionRequest,
>,
) -> std::result::Result<
tonic::Response<
tonic::codec::Streaming<super::StreamingRecognitionResponse>,
>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/yandex.cloud.ai.stt.v2.SttService/StreamingRecognize",
);
let mut req = request.into_streaming_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"yandex.cloud.ai.stt.v2.SttService",
"StreamingRecognize",
),
);
self.inner.streaming(req, path, codec).await
}
}
}