pub trait RivaSpeechRecognition: Send + Sync + 'static {
    type StreamingRecognizeStream: Stream<Item = Result<StreamingRecognizeResponse, Status>> + Send + 'static;

    // Required methods
    fn recognize<'life0, 'async_trait>(
        &'life0 self,
        request: Request<RecognizeRequest>
    ) -> Pin<Box<dyn Future<Output = Result<Response<RecognizeResponse>, Status>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait;
    fn streaming_recognize<'life0, 'async_trait>(
        &'life0 self,
        request: Request<Streaming<StreamingRecognizeRequest>>
    ) -> Pin<Box<dyn Future<Output = Result<Response<Self::StreamingRecognizeStream>, Status>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait;
    fn get_riva_speech_recognition_config<'life0, 'async_trait>(
        &'life0 self,
        request: Request<RivaSpeechRecognitionConfigRequest>
    ) -> Pin<Box<dyn Future<Output = Result<Response<RivaSpeechRecognitionConfigResponse>, Status>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait;
}
Expand description

Generated trait containing gRPC methods that should be implemented for use with RivaSpeechRecognitionServer.

Required Associated Types§

source

type StreamingRecognizeStream: Stream<Item = Result<StreamingRecognizeResponse, Status>> + Send + 'static

Server streaming response type for the StreamingRecognize method.

Required Methods§

source

fn recognize<'life0, 'async_trait>( &'life0 self, request: Request<RecognizeRequest> ) -> Pin<Box<dyn Future<Output = Result<Response<RecognizeResponse>, Status>> + Send + 'async_trait>>where Self: 'async_trait, 'life0: 'async_trait,

Recognize expects a RecognizeRequest and returns a RecognizeResponse. This request will block until the audio is uploaded, processed, and a transcript is returned.

source

fn streaming_recognize<'life0, 'async_trait>( &'life0 self, request: Request<Streaming<StreamingRecognizeRequest>> ) -> Pin<Box<dyn Future<Output = Result<Response<Self::StreamingRecognizeStream>, Status>> + Send + 'async_trait>>where Self: 'async_trait, 'life0: 'async_trait,

StreamingRecognize is a non-blocking API call that allows audio data to be fed to the server in chunks as it becomes available. Depending on the configuration in the StreamingRecognizeRequest, intermediate results can be sent back to the client. Recognition ends when the stream is closed by the client.

source

fn get_riva_speech_recognition_config<'life0, 'async_trait>( &'life0 self, request: Request<RivaSpeechRecognitionConfigRequest> ) -> Pin<Box<dyn Future<Output = Result<Response<RivaSpeechRecognitionConfigResponse>, Status>> + Send + 'async_trait>>where Self: 'async_trait, 'life0: 'async_trait,

Enables clients to request the configuration of the current ASR service, or a specific model within the service.

Implementors§