#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RivaNlpConfigRequest {
#[prost(string, tag = "1")]
pub model_name: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RivaNlpConfigResponse {
#[prost(message, repeated, tag = "1")]
pub model_config: ::prost::alloc::vec::Vec<riva_nlp_config_response::Config>,
}
pub mod riva_nlp_config_response {
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Config {
#[prost(string, tag = "1")]
pub model_name: ::prost::alloc::string::String,
#[prost(map = "string, string", tag = "2")]
pub parameters: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
}
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct NlpModelParams {
#[prost(string, tag = "1")]
pub model_name: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub language_code: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TextTransformRequest {
#[prost(string, repeated, tag = "1")]
pub text: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(uint32, tag = "2")]
pub top_n: u32,
#[prost(message, optional, tag = "3")]
pub model: ::core::option::Option<NlpModelParams>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TextTransformResponse {
#[prost(string, repeated, tag = "1")]
pub text: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TextClassRequest {
#[prost(string, repeated, tag = "1")]
pub text: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(uint32, tag = "2")]
pub top_n: u32,
#[prost(message, optional, tag = "3")]
pub model: ::core::option::Option<NlpModelParams>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Classification {
#[prost(string, tag = "1")]
pub class_name: ::prost::alloc::string::String,
#[prost(float, tag = "2")]
pub score: f32,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Span {
#[prost(uint32, tag = "1")]
pub start: u32,
#[prost(uint32, tag = "2")]
pub end: u32,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ClassificationResult {
#[prost(message, repeated, tag = "1")]
pub labels: ::prost::alloc::vec::Vec<Classification>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TextClassResponse {
#[prost(message, repeated, tag = "1")]
pub results: ::prost::alloc::vec::Vec<ClassificationResult>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TokenClassRequest {
#[prost(string, repeated, tag = "1")]
pub text: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(uint32, tag = "3")]
pub top_n: u32,
#[prost(message, optional, tag = "4")]
pub model: ::core::option::Option<NlpModelParams>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TokenClassValue {
#[prost(string, tag = "1")]
pub token: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "2")]
pub label: ::prost::alloc::vec::Vec<Classification>,
#[prost(message, repeated, tag = "3")]
pub span: ::prost::alloc::vec::Vec<Span>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TokenClassSequence {
#[prost(message, repeated, tag = "1")]
pub results: ::prost::alloc::vec::Vec<TokenClassValue>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TokenClassResponse {
#[prost(message, repeated, tag = "1")]
pub results: ::prost::alloc::vec::Vec<TokenClassSequence>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnalyzeIntentContext {}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnalyzeIntentOptions {
#[prost(string, tag = "3")]
pub domain: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub lang: ::prost::alloc::string::String,
#[prost(oneof = "analyze_intent_options::Context", tags = "1, 2")]
pub context: ::core::option::Option<analyze_intent_options::Context>,
}
pub mod analyze_intent_options {
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Context {
#[prost(string, tag = "1")]
PreviousIntent(::prost::alloc::string::String),
#[prost(message, tag = "2")]
Vectors(super::AnalyzeIntentContext),
}
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnalyzeIntentRequest {
#[prost(string, tag = "1")]
pub query: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub options: ::core::option::Option<AnalyzeIntentOptions>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnalyzeIntentResponse {
#[prost(message, optional, tag = "1")]
pub intent: ::core::option::Option<Classification>,
#[prost(message, repeated, tag = "2")]
pub slots: ::prost::alloc::vec::Vec<TokenClassValue>,
#[prost(string, tag = "3")]
pub domain_str: ::prost::alloc::string::String,
#[prost(message, optional, tag = "4")]
pub domain: ::core::option::Option<Classification>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnalyzeEntitiesOptions {
#[prost(string, tag = "4")]
pub lang: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnalyzeEntitiesRequest {
#[prost(string, tag = "1")]
pub query: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub options: ::core::option::Option<AnalyzeEntitiesOptions>,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct NaturalQueryRequest {
#[prost(string, tag = "1")]
pub query: ::prost::alloc::string::String,
#[prost(uint32, tag = "2")]
pub top_n: u32,
#[prost(string, tag = "3")]
pub context: ::prost::alloc::string::String,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct NaturalQueryResult {
#[prost(string, tag = "1")]
pub answer: ::prost::alloc::string::String,
#[prost(float, tag = "2")]
pub score: f32,
}
#[allow(unknown_lints, clippy::derive_partial_eq_without_eq)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct NaturalQueryResponse {
#[prost(message, repeated, tag = "1")]
pub results: ::prost::alloc::vec::Vec<NaturalQueryResult>,
}
pub mod riva_language_understanding_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
#[derive(Debug, Clone)]
pub struct RivaLanguageUnderstandingClient<T> {
inner: tonic::client::Grpc<T>,
}
impl RivaLanguageUnderstandingClient<tonic::transport::Channel> {
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> RivaLanguageUnderstandingClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> RivaLanguageUnderstandingClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
RivaLanguageUnderstandingClient::new(
InterceptedService::new(inner, interceptor),
)
}
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_decoding_message_size(limit);
self
}
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
pub async fn classify_text(
&mut self,
request: impl tonic::IntoRequest<super::TextClassRequest>,
) -> std::result::Result<
tonic::Response<super::TextClassResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/ClassifyText",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"ClassifyText",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn classify_tokens(
&mut self,
request: impl tonic::IntoRequest<super::TokenClassRequest>,
) -> std::result::Result<
tonic::Response<super::TokenClassResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/ClassifyTokens",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"ClassifyTokens",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn transform_text(
&mut self,
request: impl tonic::IntoRequest<super::TextTransformRequest>,
) -> std::result::Result<
tonic::Response<super::TextTransformResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/TransformText",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"TransformText",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn analyze_entities(
&mut self,
request: impl tonic::IntoRequest<super::AnalyzeEntitiesRequest>,
) -> std::result::Result<
tonic::Response<super::TokenClassResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/AnalyzeEntities",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"AnalyzeEntities",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn analyze_intent(
&mut self,
request: impl tonic::IntoRequest<super::AnalyzeIntentRequest>,
) -> std::result::Result<
tonic::Response<super::AnalyzeIntentResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/AnalyzeIntent",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"AnalyzeIntent",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn punctuate_text(
&mut self,
request: impl tonic::IntoRequest<super::TextTransformRequest>,
) -> std::result::Result<
tonic::Response<super::TextTransformResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/PunctuateText",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"PunctuateText",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn natural_query(
&mut self,
request: impl tonic::IntoRequest<super::NaturalQueryRequest>,
) -> std::result::Result<
tonic::Response<super::NaturalQueryResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/NaturalQuery",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"NaturalQuery",
),
);
self.inner.unary(req, path, codec).await
}
pub async fn get_riva_nlp_config(
&mut self,
request: impl tonic::IntoRequest<super::RivaNlpConfigRequest>,
) -> std::result::Result<
tonic::Response<super::RivaNlpConfigResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/nvidia.riva.nlp.RivaLanguageUnderstanding/GetRivaNLPConfig",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"nvidia.riva.nlp.RivaLanguageUnderstanding",
"GetRivaNLPConfig",
),
);
self.inner.unary(req, path, codec).await
}
}
}
pub mod riva_language_understanding_server {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[async_trait]
pub trait RivaLanguageUnderstanding: Send + Sync + 'static {
async fn classify_text(
&self,
request: tonic::Request<super::TextClassRequest>,
) -> std::result::Result<
tonic::Response<super::TextClassResponse>,
tonic::Status,
>;
async fn classify_tokens(
&self,
request: tonic::Request<super::TokenClassRequest>,
) -> std::result::Result<
tonic::Response<super::TokenClassResponse>,
tonic::Status,
>;
async fn transform_text(
&self,
request: tonic::Request<super::TextTransformRequest>,
) -> std::result::Result<
tonic::Response<super::TextTransformResponse>,
tonic::Status,
>;
async fn analyze_entities(
&self,
request: tonic::Request<super::AnalyzeEntitiesRequest>,
) -> std::result::Result<
tonic::Response<super::TokenClassResponse>,
tonic::Status,
>;
async fn analyze_intent(
&self,
request: tonic::Request<super::AnalyzeIntentRequest>,
) -> std::result::Result<
tonic::Response<super::AnalyzeIntentResponse>,
tonic::Status,
>;
async fn punctuate_text(
&self,
request: tonic::Request<super::TextTransformRequest>,
) -> std::result::Result<
tonic::Response<super::TextTransformResponse>,
tonic::Status,
>;
async fn natural_query(
&self,
request: tonic::Request<super::NaturalQueryRequest>,
) -> std::result::Result<
tonic::Response<super::NaturalQueryResponse>,
tonic::Status,
>;
async fn get_riva_nlp_config(
&self,
request: tonic::Request<super::RivaNlpConfigRequest>,
) -> std::result::Result<
tonic::Response<super::RivaNlpConfigResponse>,
tonic::Status,
>;
}
#[derive(Debug)]
pub struct RivaLanguageUnderstandingServer<T: RivaLanguageUnderstanding> {
inner: _Inner<T>,
accept_compression_encodings: EnabledCompressionEncodings,
send_compression_encodings: EnabledCompressionEncodings,
max_decoding_message_size: Option<usize>,
max_encoding_message_size: Option<usize>,
}
struct _Inner<T>(Arc<T>);
impl<T: RivaLanguageUnderstanding> RivaLanguageUnderstandingServer<T> {
pub fn new(inner: T) -> Self {
Self::from_arc(Arc::new(inner))
}
pub fn from_arc(inner: Arc<T>) -> Self {
let inner = _Inner(inner);
Self {
inner,
accept_compression_encodings: Default::default(),
send_compression_encodings: Default::default(),
max_decoding_message_size: None,
max_encoding_message_size: None,
}
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> InterceptedService<Self, F>
where
F: tonic::service::Interceptor,
{
InterceptedService::new(Self::new(inner), interceptor)
}
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.accept_compression_encodings.enable(encoding);
self
}
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.send_compression_encodings.enable(encoding);
self
}
#[must_use]
pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
self.max_decoding_message_size = Some(limit);
self
}
#[must_use]
pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
self.max_encoding_message_size = Some(limit);
self
}
}
impl<T, B> tonic::codegen::Service<http::Request<B>>
for RivaLanguageUnderstandingServer<T>
where
T: RivaLanguageUnderstanding,
B: Body + Send + 'static,
B::Error: Into<StdError> + Send + 'static,
{
type Response = http::Response<tonic::body::BoxBody>;
type Error = std::convert::Infallible;
type Future = BoxFuture<Self::Response, Self::Error>;
fn poll_ready(
&mut self,
_cx: &mut Context<'_>,
) -> Poll<std::result::Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: http::Request<B>) -> Self::Future {
let inner = self.inner.clone();
match req.uri().path() {
"/nvidia.riva.nlp.RivaLanguageUnderstanding/ClassifyText" => {
#[allow(non_camel_case_types)]
struct ClassifyTextSvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::TextClassRequest>
for ClassifyTextSvc<T> {
type Response = super::TextClassResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::TextClassRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).classify_text(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = ClassifyTextSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nlp.RivaLanguageUnderstanding/ClassifyTokens" => {
#[allow(non_camel_case_types)]
struct ClassifyTokensSvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::TokenClassRequest>
for ClassifyTokensSvc<T> {
type Response = super::TokenClassResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::TokenClassRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).classify_tokens(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = ClassifyTokensSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nlp.RivaLanguageUnderstanding/TransformText" => {
#[allow(non_camel_case_types)]
struct TransformTextSvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::TextTransformRequest>
for TransformTextSvc<T> {
type Response = super::TextTransformResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::TextTransformRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).transform_text(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = TransformTextSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nlp.RivaLanguageUnderstanding/AnalyzeEntities" => {
#[allow(non_camel_case_types)]
struct AnalyzeEntitiesSvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::AnalyzeEntitiesRequest>
for AnalyzeEntitiesSvc<T> {
type Response = super::TokenClassResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::AnalyzeEntitiesRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).analyze_entities(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = AnalyzeEntitiesSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nlp.RivaLanguageUnderstanding/AnalyzeIntent" => {
#[allow(non_camel_case_types)]
struct AnalyzeIntentSvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::AnalyzeIntentRequest>
for AnalyzeIntentSvc<T> {
type Response = super::AnalyzeIntentResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::AnalyzeIntentRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).analyze_intent(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = AnalyzeIntentSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nlp.RivaLanguageUnderstanding/PunctuateText" => {
#[allow(non_camel_case_types)]
struct PunctuateTextSvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::TextTransformRequest>
for PunctuateTextSvc<T> {
type Response = super::TextTransformResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::TextTransformRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).punctuate_text(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = PunctuateTextSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nlp.RivaLanguageUnderstanding/NaturalQuery" => {
#[allow(non_camel_case_types)]
struct NaturalQuerySvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::NaturalQueryRequest>
for NaturalQuerySvc<T> {
type Response = super::NaturalQueryResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::NaturalQueryRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).natural_query(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = NaturalQuerySvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/nvidia.riva.nlp.RivaLanguageUnderstanding/GetRivaNLPConfig" => {
#[allow(non_camel_case_types)]
struct GetRivaNLPConfigSvc<T: RivaLanguageUnderstanding>(pub Arc<T>);
impl<
T: RivaLanguageUnderstanding,
> tonic::server::UnaryService<super::RivaNlpConfigRequest>
for GetRivaNLPConfigSvc<T> {
type Response = super::RivaNlpConfigResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::RivaNlpConfigRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).get_riva_nlp_config(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = GetRivaNLPConfigSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
_ => {
Box::pin(async move {
Ok(
http::Response::builder()
.status(200)
.header("grpc-status", "12")
.header("content-type", "application/grpc")
.body(empty_body())
.unwrap(),
)
})
}
}
}
}
impl<T: RivaLanguageUnderstanding> Clone for RivaLanguageUnderstandingServer<T> {
fn clone(&self) -> Self {
let inner = self.inner.clone();
Self {
inner,
accept_compression_encodings: self.accept_compression_encodings,
send_compression_encodings: self.send_compression_encodings,
max_decoding_message_size: self.max_decoding_message_size,
max_encoding_message_size: self.max_encoding_message_size,
}
}
}
impl<T: RivaLanguageUnderstanding> Clone for _Inner<T> {
fn clone(&self) -> Self {
Self(Arc::clone(&self.0))
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl<T: RivaLanguageUnderstanding> tonic::server::NamedService
for RivaLanguageUnderstandingServer<T> {
const NAME: &'static str = "nvidia.riva.nlp.RivaLanguageUnderstanding";
}
}