#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ArrowSchema {
#[prost(bytes, tag="1")]
pub serialized_schema: std::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ArrowRecordBatch {
#[prost(bytes, tag="1")]
pub serialized_record_batch: std::vec::Vec<u8>,
#[prost(int64, tag="2")]
pub row_count: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvroSchema {
#[prost(string, tag="1")]
pub schema: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvroRows {
#[prost(bytes, tag="1")]
pub serialized_binary_rows: std::vec::Vec<u8>,
#[prost(int64, tag="2")]
pub row_count: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadSession {
#[prost(string, tag="1")]
pub name: std::string::String,
#[prost(message, optional, tag="2")]
pub expire_time: ::std::option::Option<::prost_types::Timestamp>,
#[prost(enumeration="DataFormat", tag="3")]
pub data_format: i32,
#[prost(string, tag="6")]
pub table: std::string::String,
#[prost(message, optional, tag="7")]
pub table_modifiers: ::std::option::Option<read_session::TableModifiers>,
#[prost(message, optional, tag="8")]
pub read_options: ::std::option::Option<read_session::TableReadOptions>,
#[prost(message, repeated, tag="10")]
pub streams: ::std::vec::Vec<ReadStream>,
#[prost(oneof="read_session::Schema", tags="4, 5")]
pub schema: ::std::option::Option<read_session::Schema>,
}
pub mod read_session {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableModifiers {
#[prost(message, optional, tag="1")]
pub snapshot_time: ::std::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableReadOptions {
#[prost(string, repeated, tag="1")]
pub selected_fields: ::std::vec::Vec<std::string::String>,
#[prost(string, tag="2")]
pub row_restriction: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Schema {
#[prost(message, tag="4")]
AvroSchema(super::AvroSchema),
#[prost(message, tag="5")]
ArrowSchema(super::ArrowSchema),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadStream {
#[prost(string, tag="1")]
pub name: std::string::String,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum DataFormat {
Unspecified = 0,
Avro = 1,
Arrow = 2,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateReadSessionRequest {
#[prost(string, tag="1")]
pub parent: std::string::String,
#[prost(message, optional, tag="2")]
pub read_session: ::std::option::Option<ReadSession>,
#[prost(int32, tag="3")]
pub max_stream_count: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadRowsRequest {
#[prost(string, tag="1")]
pub read_stream: std::string::String,
#[prost(int64, tag="2")]
pub offset: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ThrottleState {
#[prost(int32, tag="1")]
pub throttle_percent: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamStats {
#[prost(message, optional, tag="2")]
pub progress: ::std::option::Option<stream_stats::Progress>,
}
pub mod stream_stats {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Progress {
#[prost(double, tag="1")]
pub at_response_start: f64,
#[prost(double, tag="2")]
pub at_response_end: f64,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadRowsResponse {
#[prost(int64, tag="6")]
pub row_count: i64,
#[prost(message, optional, tag="2")]
pub stats: ::std::option::Option<StreamStats>,
#[prost(message, optional, tag="5")]
pub throttle_state: ::std::option::Option<ThrottleState>,
#[prost(oneof="read_rows_response::Rows", tags="3, 4")]
pub rows: ::std::option::Option<read_rows_response::Rows>,
}
pub mod read_rows_response {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Rows {
#[prost(message, tag="3")]
AvroRows(super::AvroRows),
#[prost(message, tag="4")]
ArrowRecordBatch(super::ArrowRecordBatch),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SplitReadStreamRequest {
#[prost(string, tag="1")]
pub name: std::string::String,
#[prost(double, tag="2")]
pub fraction: f64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SplitReadStreamResponse {
#[prost(message, optional, tag="1")]
pub primary_stream: ::std::option::Option<ReadStream>,
#[prost(message, optional, tag="2")]
pub remainder_stream: ::std::option::Option<ReadStream>,
}
# [ doc = r" Generated client implementations." ] pub mod big_query_read_client { # ! [ allow ( unused_variables , dead_code , missing_docs ) ] use tonic :: codegen :: * ; # [ doc = " BigQuery Read API." ] # [ doc = "" ] # [ doc = " The Read API can be used to read data from BigQuery." ] pub struct BigQueryReadClient < T > { inner : tonic :: client :: Grpc < T > , } impl BigQueryReadClient < tonic :: transport :: Channel > { # [ doc = r" Attempt to create a new client by connecting to a given endpoint." ] pub async fn connect < D > ( dst : D ) -> Result < Self , tonic :: transport :: Error > where D : std :: convert :: TryInto < tonic :: transport :: Endpoint > , D :: Error : Into < StdError > , { let conn = tonic :: transport :: Endpoint :: new ( dst ) ? . connect ( ) . await ? ; Ok ( Self :: new ( conn ) ) } } impl < T > BigQueryReadClient < T > where T : tonic :: client :: GrpcService < tonic :: body :: BoxBody > , T :: ResponseBody : Body + HttpBody + Send + 'static , T :: Error : Into < StdError > , < T :: ResponseBody as HttpBody > :: Error : Into < StdError > + Send , { pub fn new ( inner : T ) -> Self { let inner = tonic :: client :: Grpc :: new ( inner ) ; Self { inner } } pub fn with_interceptor ( inner : T , interceptor : impl Into < tonic :: Interceptor > ) -> Self { let inner = tonic :: client :: Grpc :: with_interceptor ( inner , interceptor ) ; Self { inner } } # [ doc = " Creates a new read session. A read session divides the contents of a" ] # [ doc = " BigQuery table into one or more streams, which can then be used to read" ] # [ doc = " data from the table. The read session also specifies properties of the" ] # [ doc = " data to be read, such as a list of columns or a push-down filter describing" ] # [ doc = " the rows to be returned." ] # [ doc = "" ] # [ doc = " A particular row can be read by at most one stream. When the caller has" ] # [ doc = " reached the end of each stream in the session, then all the data in the" ] # [ doc = " table has been read." ] # [ doc = "" ] # [ doc = " Data is assigned to each stream such that roughly the same number of" ] # [ doc = " rows can be read from each stream. Because the server-side unit for" ] # [ doc = " assigning data is collections of rows, the API does not guarantee that" ] # [ doc = " each stream will return the same number or rows. Additionally, the" ] # [ doc = " limits are enforced based on the number of pre-filtered rows, so some" ] # [ doc = " filters can lead to lopsided assignments." ] # [ doc = "" ] # [ doc = " Read sessions automatically expire 24 hours after they are created and do" ] # [ doc = " not require manual clean-up by the caller." ] pub async fn create_read_session ( & mut self , request : impl tonic :: IntoRequest < super :: CreateReadSessionRequest > , ) -> Result < tonic :: Response < super :: ReadSession > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.bigquery.storage.v1.BigQueryRead/CreateReadSession" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Reads rows from the stream in the format prescribed by the ReadSession." ] # [ doc = " Each response contains one or more table rows, up to a maximum of 100 MiB" ] # [ doc = " per response; read requests which attempt to read individual rows larger" ] # [ doc = " than 100 MiB will fail." ] # [ doc = "" ] # [ doc = " Each request also returns a set of stream statistics reflecting the current" ] # [ doc = " state of the stream." ] pub async fn read_rows ( & mut self , request : impl tonic :: IntoRequest < super :: ReadRowsRequest > , ) -> Result < tonic :: Response < tonic :: codec :: Streaming < super :: ReadRowsResponse >> , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.bigquery.storage.v1.BigQueryRead/ReadRows" ) ; self . inner . server_streaming ( request . into_request ( ) , path , codec ) . await } # [ doc = " Splits a given `ReadStream` into two `ReadStream` objects. These" ] # [ doc = " `ReadStream` objects are referred to as the primary and the residual" ] # [ doc = " streams of the split. The original `ReadStream` can still be read from in" ] # [ doc = " the same manner as before. Both of the returned `ReadStream` objects can" ] # [ doc = " also be read from, and the rows returned by both child streams will be" ] # [ doc = " the same as the rows read from the original stream." ] # [ doc = "" ] # [ doc = " Moreover, the two child streams will be allocated back-to-back in the" ] # [ doc = " original `ReadStream`. Concretely, it is guaranteed that for streams" ] # [ doc = " original, primary, and residual, that original[0-j] = primary[0-j] and" ] # [ doc = " original[j-n] = residual[0-m] once the streams have been read to" ] # [ doc = " completion." ] pub async fn split_read_stream ( & mut self , request : impl tonic :: IntoRequest < super :: SplitReadStreamRequest > , ) -> Result < tonic :: Response < super :: SplitReadStreamResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.bigquery.storage.v1.BigQueryRead/SplitReadStream" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } } impl < T : Clone > Clone for BigQueryReadClient < T > { fn clone ( & self ) -> Self { Self { inner : self . inner . clone ( ) , } } } impl < T > std :: fmt :: Debug for BigQueryReadClient < T > { fn fmt ( & self , f : & mut std :: fmt :: Formatter < '_ > ) -> std :: fmt :: Result { write ! ( f , "BigQueryReadClient {{ ... }}" ) } } }# [ doc = r" Generated server implementations." ] pub mod big_query_read_server { # ! [ allow ( unused_variables , dead_code , missing_docs ) ] use tonic :: codegen :: * ; # [ doc = "Generated trait containing gRPC methods that should be implemented for use with BigQueryReadServer." ] # [ async_trait ] pub trait BigQueryRead : Send + Sync + 'static { # [ doc = " Creates a new read session. A read session divides the contents of a" ] # [ doc = " BigQuery table into one or more streams, which can then be used to read" ] # [ doc = " data from the table. The read session also specifies properties of the" ] # [ doc = " data to be read, such as a list of columns or a push-down filter describing" ] # [ doc = " the rows to be returned." ] # [ doc = "" ] # [ doc = " A particular row can be read by at most one stream. When the caller has" ] # [ doc = " reached the end of each stream in the session, then all the data in the" ] # [ doc = " table has been read." ] # [ doc = "" ] # [ doc = " Data is assigned to each stream such that roughly the same number of" ] # [ doc = " rows can be read from each stream. Because the server-side unit for" ] # [ doc = " assigning data is collections of rows, the API does not guarantee that" ] # [ doc = " each stream will return the same number or rows. Additionally, the" ] # [ doc = " limits are enforced based on the number of pre-filtered rows, so some" ] # [ doc = " filters can lead to lopsided assignments." ] # [ doc = "" ] # [ doc = " Read sessions automatically expire 24 hours after they are created and do" ] # [ doc = " not require manual clean-up by the caller." ] async fn create_read_session ( & self , request : tonic :: Request < super :: CreateReadSessionRequest > ) -> Result < tonic :: Response < super :: ReadSession > , tonic :: Status > ; # [ doc = "Server streaming response type for the ReadRows method." ] type ReadRowsStream : Stream < Item = Result < super :: ReadRowsResponse , tonic :: Status >> + Send + Sync + 'static ; # [ doc = " Reads rows from the stream in the format prescribed by the ReadSession." ] # [ doc = " Each response contains one or more table rows, up to a maximum of 100 MiB" ] # [ doc = " per response; read requests which attempt to read individual rows larger" ] # [ doc = " than 100 MiB will fail." ] # [ doc = "" ] # [ doc = " Each request also returns a set of stream statistics reflecting the current" ] # [ doc = " state of the stream." ] async fn read_rows ( & self , request : tonic :: Request < super :: ReadRowsRequest > ) -> Result < tonic :: Response < Self :: ReadRowsStream > , tonic :: Status > ; # [ doc = " Splits a given `ReadStream` into two `ReadStream` objects. These" ] # [ doc = " `ReadStream` objects are referred to as the primary and the residual" ] # [ doc = " streams of the split. The original `ReadStream` can still be read from in" ] # [ doc = " the same manner as before. Both of the returned `ReadStream` objects can" ] # [ doc = " also be read from, and the rows returned by both child streams will be" ] # [ doc = " the same as the rows read from the original stream." ] # [ doc = "" ] # [ doc = " Moreover, the two child streams will be allocated back-to-back in the" ] # [ doc = " original `ReadStream`. Concretely, it is guaranteed that for streams" ] # [ doc = " original, primary, and residual, that original[0-j] = primary[0-j] and" ] # [ doc = " original[j-n] = residual[0-m] once the streams have been read to" ] # [ doc = " completion." ] async fn split_read_stream ( & self , request : tonic :: Request < super :: SplitReadStreamRequest > ) -> Result < tonic :: Response < super :: SplitReadStreamResponse > , tonic :: Status > ; } # [ doc = " BigQuery Read API." ] # [ doc = "" ] # [ doc = " The Read API can be used to read data from BigQuery." ] # [ derive ( Debug ) ] # [ doc ( hidden ) ] pub struct BigQueryReadServer < T : BigQueryRead > { inner : _Inner < T > , } struct _Inner < T > ( Arc < T > , Option < tonic :: Interceptor > ) ; impl < T : BigQueryRead > BigQueryReadServer < T > { pub fn new ( inner : T ) -> Self { let inner = Arc :: new ( inner ) ; let inner = _Inner ( inner , None ) ; Self { inner } } pub fn with_interceptor ( inner : T , interceptor : impl Into < tonic :: Interceptor > ) -> Self { let inner = Arc :: new ( inner ) ; let inner = _Inner ( inner , Some ( interceptor . into ( ) ) ) ; Self { inner } } } impl < T , B > Service < http :: Request < B >> for BigQueryReadServer < T > where T : BigQueryRead , B : HttpBody + Send + Sync + 'static , B :: Error : Into < StdError > + Send + 'static , { type Response = http :: Response < tonic :: body :: BoxBody > ; type Error = Never ; type Future = BoxFuture < Self :: Response , Self :: Error > ; fn poll_ready ( & mut self , _cx : & mut Context < '_ > ) -> Poll < Result < ( ) , Self :: Error >> { Poll :: Ready ( Ok ( ( ) ) ) } fn call ( & mut self , req : http :: Request < B > ) -> Self :: Future { let inner = self . inner . clone ( ) ; match req . uri ( ) . path ( ) { "/google.cloud.bigquery.storage.v1.BigQueryRead/CreateReadSession" => { # [ allow ( non_camel_case_types ) ] struct CreateReadSessionSvc < T : BigQueryRead > ( pub Arc < T > ) ; impl < T : BigQueryRead > tonic :: server :: UnaryService < super :: CreateReadSessionRequest > for CreateReadSessionSvc < T > { type Response = super :: ReadSession ; type Future = BoxFuture < tonic :: Response < Self :: Response > , tonic :: Status > ; fn call ( & mut self , request : tonic :: Request < super :: CreateReadSessionRequest > ) -> Self :: Future { let inner = self . 0 . clone ( ) ; let fut = async move { inner . create_read_session ( request ) . await } ; Box :: pin ( fut ) } } let inner = self . inner . clone ( ) ; let fut = async move { let interceptor = inner . 1 . clone ( ) ; let inner = inner . 0 ; let method = CreateReadSessionSvc ( inner ) ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let mut grpc = if let Some ( interceptor ) = interceptor { tonic :: server :: Grpc :: with_interceptor ( codec , interceptor ) } else { tonic :: server :: Grpc :: new ( codec ) } ; let res = grpc . unary ( method , req ) . await ; Ok ( res ) } ; Box :: pin ( fut ) } "/google.cloud.bigquery.storage.v1.BigQueryRead/ReadRows" => { # [ allow ( non_camel_case_types ) ] struct ReadRowsSvc < T : BigQueryRead > ( pub Arc < T > ) ; impl < T : BigQueryRead > tonic :: server :: ServerStreamingService < super :: ReadRowsRequest > for ReadRowsSvc < T > { type Response = super :: ReadRowsResponse ; type ResponseStream = T :: ReadRowsStream ; type Future = BoxFuture < tonic :: Response < Self :: ResponseStream > , tonic :: Status > ; fn call ( & mut self , request : tonic :: Request < super :: ReadRowsRequest > ) -> Self :: Future { let inner = self . 0 . clone ( ) ; let fut = async move { inner . read_rows ( request ) . await } ; Box :: pin ( fut ) } } let inner = self . inner . clone ( ) ; let fut = async move { let interceptor = inner . 1 ; let inner = inner . 0 ; let method = ReadRowsSvc ( inner ) ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let mut grpc = if let Some ( interceptor ) = interceptor { tonic :: server :: Grpc :: with_interceptor ( codec , interceptor ) } else { tonic :: server :: Grpc :: new ( codec ) } ; let res = grpc . server_streaming ( method , req ) . await ; Ok ( res ) } ; Box :: pin ( fut ) } "/google.cloud.bigquery.storage.v1.BigQueryRead/SplitReadStream" => { # [ allow ( non_camel_case_types ) ] struct SplitReadStreamSvc < T : BigQueryRead > ( pub Arc < T > ) ; impl < T : BigQueryRead > tonic :: server :: UnaryService < super :: SplitReadStreamRequest > for SplitReadStreamSvc < T > { type Response = super :: SplitReadStreamResponse ; type Future = BoxFuture < tonic :: Response < Self :: Response > , tonic :: Status > ; fn call ( & mut self , request : tonic :: Request < super :: SplitReadStreamRequest > ) -> Self :: Future { let inner = self . 0 . clone ( ) ; let fut = async move { inner . split_read_stream ( request ) . await } ; Box :: pin ( fut ) } } let inner = self . inner . clone ( ) ; let fut = async move { let interceptor = inner . 1 . clone ( ) ; let inner = inner . 0 ; let method = SplitReadStreamSvc ( inner ) ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let mut grpc = if let Some ( interceptor ) = interceptor { tonic :: server :: Grpc :: with_interceptor ( codec , interceptor ) } else { tonic :: server :: Grpc :: new ( codec ) } ; let res = grpc . unary ( method , req ) . await ; Ok ( res ) } ; Box :: pin ( fut ) } _ => Box :: pin ( async move { Ok ( http :: Response :: builder ( ) . status ( 200 ) . header ( "grpc-status" , "12" ) . body ( tonic :: body :: BoxBody :: empty ( ) ) . unwrap ( ) ) } ) , } } } impl < T : BigQueryRead > Clone for BigQueryReadServer < T > { fn clone ( & self ) -> Self { let inner = self . inner . clone ( ) ; Self { inner } } } impl < T : BigQueryRead > Clone for _Inner < T > { fn clone ( & self ) -> Self { Self ( self . 0 . clone ( ) , self . 1 . clone ( ) ) } } impl < T : std :: fmt :: Debug > std :: fmt :: Debug for _Inner < T > { fn fmt ( & self , f : & mut std :: fmt :: Formatter < '_ > ) -> std :: fmt :: Result { write ! ( f , "{:?}" , self . 0 ) } } impl < T : BigQueryRead > tonic :: transport :: NamedService for BigQueryReadServer < T > { const NAME : & 'static str = "google.cloud.bigquery.storage.v1.BigQueryRead" ; } }