#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingTranslateSpeechToSpeechConfig {
#[prost(message, optional, tag = "1")]
pub asr_config: ::core::option::Option<super::asr::StreamingRecognitionConfig>,
#[prost(message, optional, tag = "2")]
pub tts_config: ::core::option::Option<SynthesizeSpeechConfig>,
#[prost(message, optional, tag = "3")]
pub translation_config: ::core::option::Option<TranslationConfig>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingTranslateSpeechToSpeechRequest {
#[prost(
oneof = "streaming_translate_speech_to_speech_request::StreamingRequest",
tags = "1, 2"
)]
pub streaming_request: ::core::option::Option<
streaming_translate_speech_to_speech_request::StreamingRequest,
>,
}
pub mod streaming_translate_speech_to_speech_request {
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum StreamingRequest {
#[prost(message, tag = "1")]
Config(super::StreamingTranslateSpeechToSpeechConfig),
#[prost(bytes, tag = "2")]
AudioContent(::prost::alloc::vec::Vec<u8>),
}
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TranslationConfig {
#[prost(string, tag = "1")]
pub source_language_code: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub target_language_code: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub model_name: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SynthesizeSpeechConfig {
#[prost(enumeration = "super::AudioEncoding", tag = "1")]
pub encoding: i32,
#[prost(int32, tag = "2")]
pub sample_rate_hz: i32,
#[prost(string, tag = "3")]
pub voice_name: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub language_code: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingTranslateSpeechToSpeechResponse {
#[prost(message, optional, tag = "1")]
pub speech: ::core::option::Option<super::tts::SynthesizeSpeechResponse>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingTranslateSpeechToTextRequest {
#[prost(
oneof = "streaming_translate_speech_to_text_request::StreamingRequest",
tags = "1, 2"
)]
pub streaming_request: ::core::option::Option<
streaming_translate_speech_to_text_request::StreamingRequest,
>,
}
pub mod streaming_translate_speech_to_text_request {
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum StreamingRequest {
#[prost(message, tag = "1")]
Config(super::StreamingTranslateSpeechToTextConfig),
#[prost(bytes, tag = "2")]
AudioContent(::prost::alloc::vec::Vec<u8>),
}
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingTranslateSpeechToTextResponse {
#[prost(message, repeated, tag = "1")]
pub results: ::prost::alloc::vec::Vec<super::asr::StreamingRecognitionResult>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamingTranslateSpeechToTextConfig {
#[prost(message, optional, tag = "1")]
pub asr_config: ::core::option::Option<super::asr::StreamingRecognitionConfig>,
#[prost(message, optional, tag = "2")]
pub translation_config: ::core::option::Option<TranslationConfig>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TranslateTextRequest {
#[prost(string, repeated, tag = "1")]
pub texts: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(string, tag = "2")]
pub model: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub source_language: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub target_language: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Translation {
#[prost(string, tag = "1")]
pub text: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub language: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TranslateTextResponse {
#[prost(message, repeated, tag = "1")]
pub translations: ::prost::alloc::vec::Vec<Translation>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvailableLanguageRequest {
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvailableLanguageResponse {
#[prost(map = "string, message", tag = "1")]
pub languages: ::std::collections::HashMap<
::prost::alloc::string::String,
available_language_response::LanguagePair,
>,
}
pub mod available_language_response {
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LanguagePair {
#[prost(string, repeated, tag = "1")]
pub src_lang: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(string, repeated, tag = "2")]
pub tgt_lang: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
}
pub mod riva_translation_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
#[derive(Debug, Clone)]
pub struct RivaTranslationClient<T> {
inner: tonic::client::Grpc<T>,
}
impl RivaTranslationClient<tonic::transport::Channel> {
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> RivaTranslationClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> RivaTranslationClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
RivaTranslationClient::new(InterceptedService::new(inner, interceptor))
}
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_decoding_message_size(limit);
self
}
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
pub async fn translate_text(
&mut self,
request: impl tonic::IntoRequest<super::TranslateTextRequest>,
) -> std::result::Result<
tonic::Response<super::TranslateTextResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nmt.RivaTranslation/TranslateText",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new("nvidia.riva.nmt.RivaTranslation", "TranslateText"),
);
self.inner.unary(req, path, codec).await
}
pub async fn list_supported_language_pairs(
&mut self,
request: impl tonic::IntoRequest<super::AvailableLanguageRequest>,
) -> std::result::Result<
tonic::Response<super::AvailableLanguageResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nmt.RivaTranslation/ListSupportedLanguagePairs",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nmt.RivaTranslation",
"ListSupportedLanguagePairs",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn streaming_translate_speech_to_text(
&mut self,
request: impl tonic::IntoStreamingRequest<
Message = super::StreamingTranslateSpeechToTextRequest,
>,
) -> std::result::Result<
tonic::Response<
tonic::codec::Streaming<super::StreamingTranslateSpeechToTextResponse>,
>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nmt.RivaTranslation/StreamingTranslateSpeechToText",
);
let mut req = request.into_streaming_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nmt.RivaTranslation",
"StreamingTranslateSpeechToText",
),
);
self.inner.streaming(req, path, codec).await
}
pub async fn streaming_translate_speech_to_speech(
&mut self,
request: impl tonic::IntoStreamingRequest<
Message = super::StreamingTranslateSpeechToSpeechRequest,
>,
) -> std::result::Result<
tonic::Response<
tonic::codec::Streaming<super::StreamingTranslateSpeechToSpeechResponse>,
>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nmt.RivaTranslation/StreamingTranslateSpeechToSpeech",
);
let mut req = request.into_streaming_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nmt.RivaTranslation",
"StreamingTranslateSpeechToSpeech",
),
);
self.inner.streaming(req, path, codec).await
}
}
}
pub mod riva_translation_server {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[async_trait]
pub trait RivaTranslation: Send + Sync + 'static {
async fn translate_text(
&self,
request: tonic::Request<super::TranslateTextRequest>,
) -> std::result::Result<
tonic::Response<super::TranslateTextResponse>,
tonic::Status,
>;
async fn list_supported_language_pairs(
&self,
request: tonic::Request<super::AvailableLanguageRequest>,
) -> std::result::Result<
tonic::Response<super::AvailableLanguageResponse>,
tonic::Status,
>;
type StreamingTranslateSpeechToTextStream: futures_core::Stream<
Item = std::result::Result<
super::StreamingTranslateSpeechToTextResponse,
tonic::Status,
>,
>
+ Send
+ 'static;
async fn streaming_translate_speech_to_text(
&self,
request: tonic::Request<
tonic::Streaming<super::StreamingTranslateSpeechToTextRequest>,
>,
) -> std::result::Result<
tonic::Response<Self::StreamingTranslateSpeechToTextStream>,
tonic::Status,
>;
type StreamingTranslateSpeechToSpeechStream: futures_core::Stream<
Item = std::result::Result<
super::StreamingTranslateSpeechToSpeechResponse,
tonic::Status,
>,
>
+ Send
+ 'static;
async fn streaming_translate_speech_to_speech(
&self,
request: tonic::Request<
tonic::Streaming<super::StreamingTranslateSpeechToSpeechRequest>,
>,
) -> std::result::Result<
tonic::Response<Self::StreamingTranslateSpeechToSpeechStream>,
tonic::Status,
>;
}
#[derive(Debug)]
pub struct RivaTranslationServer<T: RivaTranslation> {
inner: _Inner<T>,
accept_compression_encodings: EnabledCompressionEncodings,
send_compression_encodings: EnabledCompressionEncodings,
max_decoding_message_size: Option<usize>,
max_encoding_message_size: Option<usize>,
}
struct _Inner<T>(Arc<T>);
impl<T: RivaTranslation> RivaTranslationServer<T> {
pub fn new(inner: T) -> Self {
Self::from_arc(Arc::new(inner))
}
pub fn from_arc(inner: Arc<T>) -> Self {
let inner = _Inner(inner);
Self {
inner,
accept_compression_encodings: Default::default(),
send_compression_encodings: Default::default(),
max_decoding_message_size: None,
max_encoding_message_size: None,
}
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> InterceptedService<Self, F>
where
F: tonic::service::Interceptor,
{
InterceptedService::new(Self::new(inner), interceptor)
}
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.accept_compression_encodings.enable(encoding);
self
}
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.send_compression_encodings.enable(encoding);
self
}
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.max_decoding_message_size = Some(limit);
self
}
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.max_encoding_message_size = Some(limit);
self
}
}
impl<T, B> tonic::codegen::Service<http::Request<B>> for RivaTranslationServer<T>
where
T: RivaTranslation,
B: Body + Send + 'static,
B::Error: Into<StdError> + Send + 'static,
{
type Response = http::Response<tonic::body::BoxBody>;
type Error = std::convert::Infallible;
type Future = BoxFuture<Self::Response, Self::Error>;
fn poll_ready(
&mut self,
_cx: &mut Context<'_>,
) -> Poll<std::result::Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: http::Request<B>) -> Self::Future {
let inner = self.inner.clone();
match req.uri().path() {
"/nvidia.riva.nmt.RivaTranslation/TranslateText" => {
#[allow(non_camel_case_types)]
struct TranslateTextSvc<T: RivaTranslation>(pub Arc<T>);
impl<
T: RivaTranslation,
> tonic::server::UnaryService<super::TranslateTextRequest>
for TranslateTextSvc<T> {
type Response = super::TranslateTextResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::TranslateTextRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).translate_text(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = TranslateTextSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nmt.RivaTranslation/ListSupportedLanguagePairs" => {
#[allow(non_camel_case_types)]
struct ListSupportedLanguagePairsSvc<T: RivaTranslation>(pub Arc<T>);
impl<
T: RivaTranslation,
> tonic::server::UnaryService<super::AvailableLanguageRequest>
for ListSupportedLanguagePairsSvc<T> {
type Response = super::AvailableLanguageResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::AvailableLanguageRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).list_supported_language_pairs(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = ListSupportedLanguagePairsSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nmt.RivaTranslation/StreamingTranslateSpeechToText" => {
#[allow(non_camel_case_types)]
struct StreamingTranslateSpeechToTextSvc<T: RivaTranslation>(
pub Arc<T>,
);
impl<
T: RivaTranslation,
> tonic::server::StreamingService<
super::StreamingTranslateSpeechToTextRequest,
> for StreamingTranslateSpeechToTextSvc<T> {
type Response = super::StreamingTranslateSpeechToTextResponse;
type ResponseStream = T::StreamingTranslateSpeechToTextStream;
type Future = BoxFuture<
tonic::Response<Self::ResponseStream>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<
tonic::Streaming<
super::StreamingTranslateSpeechToTextRequest,
>,
>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).streaming_translate_speech_to_text(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = StreamingTranslateSpeechToTextSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.streaming(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nmt.RivaTranslation/StreamingTranslateSpeechToSpeech" => {
#[allow(non_camel_case_types)]
struct StreamingTranslateSpeechToSpeechSvc<T: RivaTranslation>(
pub Arc<T>,
);
impl<
T: RivaTranslation,
> tonic::server::StreamingService<
super::StreamingTranslateSpeechToSpeechRequest,
> for StreamingTranslateSpeechToSpeechSvc<T> {
type Response = super::StreamingTranslateSpeechToSpeechResponse;
type ResponseStream = T::StreamingTranslateSpeechToSpeechStream;
type Future = BoxFuture<
tonic::Response<Self::ResponseStream>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<
tonic::Streaming<
super::StreamingTranslateSpeechToSpeechRequest,
>,
>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).streaming_translate_speech_to_speech(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = StreamingTranslateSpeechToSpeechSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.streaming(method, req).await;
Ok(res)
};
Box::pin(fut)
}
_ => {
Box::pin(async move {
Ok(
http::Response::builder()
.status(200)
.header("grpc-status", "12")
.header("content-type", "application/grpc")
.body(empty_body())
.unwrap(),
)
})
}
}
}
}
impl<T: RivaTranslation> Clone for RivaTranslationServer<T> {
fn clone(&self) -> Self {
let inner = self.inner.clone();
Self {
inner,
accept_compression_encodings: self.accept_compression_encodings,
send_compression_encodings: self.send_compression_encodings,
max_decoding_message_size: self.max_decoding_message_size,
max_encoding_message_size: self.max_encoding_message_size,
}
}
}
impl<T: RivaTranslation> Clone for _Inner<T> {
fn clone(&self) -> Self {
Self(Arc::clone(&self.0))
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl<T: RivaTranslation> tonic::server::NamedService for RivaTranslationServer<T> {
const NAME: &'static str = "nvidia.riva.nmt.RivaTranslation";
}
}