#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ArrowSchema {
#[prost(bytes = "vec", tag = "1")]
pub serialized_schema: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ArrowRecordBatch {
#[prost(bytes = "vec", tag = "1")]
pub serialized_record_batch: ::prost::alloc::vec::Vec<u8>,
#[prost(int64, tag = "2")]
pub row_count: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvroSchema {
#[prost(string, tag = "1")]
pub schema: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvroRows {
#[prost(bytes = "vec", tag = "1")]
pub serialized_binary_rows: ::prost::alloc::vec::Vec<u8>,
#[prost(int64, tag = "2")]
pub row_count: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableReadOptions {
#[prost(string, repeated, tag = "1")]
pub selected_fields: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(string, tag = "2")]
pub row_restriction: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableReference {
#[prost(string, tag = "1")]
pub project_id: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub dataset_id: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub table_id: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableModifiers {
#[prost(message, optional, tag = "1")]
pub snapshot_time: ::core::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Stream {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamPosition {
#[prost(message, optional, tag = "1")]
pub stream: ::core::option::Option<Stream>,
#[prost(int64, tag = "2")]
pub offset: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadSession {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub expire_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, repeated, tag = "4")]
pub streams: ::prost::alloc::vec::Vec<Stream>,
#[prost(message, optional, tag = "7")]
pub table_reference: ::core::option::Option<TableReference>,
#[prost(message, optional, tag = "8")]
pub table_modifiers: ::core::option::Option<TableModifiers>,
#[prost(enumeration = "ShardingStrategy", tag = "9")]
pub sharding_strategy: i32,
#[prost(oneof = "read_session::Schema", tags = "5, 6")]
pub schema: ::core::option::Option<read_session::Schema>,
}
pub mod read_session {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Schema {
#[prost(message, tag = "5")]
AvroSchema(super::AvroSchema),
#[prost(message, tag = "6")]
ArrowSchema(super::ArrowSchema),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateReadSessionRequest {
#[prost(message, optional, tag = "1")]
pub table_reference: ::core::option::Option<TableReference>,
#[prost(string, tag = "6")]
pub parent: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub table_modifiers: ::core::option::Option<TableModifiers>,
#[prost(int32, tag = "3")]
pub requested_streams: i32,
#[prost(message, optional, tag = "4")]
pub read_options: ::core::option::Option<TableReadOptions>,
#[prost(enumeration = "DataFormat", tag = "5")]
pub format: i32,
#[prost(enumeration = "ShardingStrategy", tag = "7")]
pub sharding_strategy: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadRowsRequest {
#[prost(message, optional, tag = "1")]
pub read_position: ::core::option::Option<StreamPosition>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamStatus {
#[prost(int64, tag = "1")]
pub estimated_row_count: i64,
#[prost(float, tag = "2")]
pub fraction_consumed: f32,
#[prost(message, optional, tag = "4")]
pub progress: ::core::option::Option<Progress>,
#[prost(bool, tag = "3")]
pub is_splittable: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Progress {
#[prost(float, tag = "1")]
pub at_response_start: f32,
#[prost(float, tag = "2")]
pub at_response_end: f32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ThrottleStatus {
#[prost(int32, tag = "1")]
pub throttle_percent: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadRowsResponse {
#[prost(int64, tag = "6")]
pub row_count: i64,
#[prost(message, optional, tag = "2")]
pub status: ::core::option::Option<StreamStatus>,
#[prost(message, optional, tag = "5")]
pub throttle_status: ::core::option::Option<ThrottleStatus>,
#[prost(oneof = "read_rows_response::Rows", tags = "3, 4")]
pub rows: ::core::option::Option<read_rows_response::Rows>,
}
pub mod read_rows_response {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Rows {
#[prost(message, tag = "3")]
AvroRows(super::AvroRows),
#[prost(message, tag = "4")]
ArrowRecordBatch(super::ArrowRecordBatch),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCreateReadSessionStreamsRequest {
#[prost(message, optional, tag = "1")]
pub session: ::core::option::Option<ReadSession>,
#[prost(int32, tag = "2")]
pub requested_streams: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCreateReadSessionStreamsResponse {
#[prost(message, repeated, tag = "1")]
pub streams: ::prost::alloc::vec::Vec<Stream>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FinalizeStreamRequest {
#[prost(message, optional, tag = "2")]
pub stream: ::core::option::Option<Stream>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SplitReadStreamRequest {
#[prost(message, optional, tag = "1")]
pub original_stream: ::core::option::Option<Stream>,
#[prost(float, tag = "2")]
pub fraction: f32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SplitReadStreamResponse {
#[prost(message, optional, tag = "1")]
pub primary_stream: ::core::option::Option<Stream>,
#[prost(message, optional, tag = "2")]
pub remainder_stream: ::core::option::Option<Stream>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum DataFormat {
Unspecified = 0,
Avro = 1,
Arrow = 3,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum ShardingStrategy {
Unspecified = 0,
Liquid = 1,
Balanced = 2,
}
#[doc = r" Generated client implementations."]
pub mod big_query_storage_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " BigQuery storage API."]
#[doc = ""]
#[doc = " The BigQuery storage API can be used to read data stored in BigQuery."]
#[derive(Debug, Clone)]
pub struct BigQueryStorageClient<T> {
inner: tonic::client::Grpc<T>,
}
impl<T> BigQueryStorageClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> BigQueryStorageClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
BigQueryStorageClient::new(InterceptedService::new(inner, interceptor))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " Creates a new read session. A read session divides the contents of a"]
#[doc = " BigQuery table into one or more streams, which can then be used to read"]
#[doc = " data from the table. The read session also specifies properties of the"]
#[doc = " data to be read, such as a list of columns or a push-down filter describing"]
#[doc = " the rows to be returned."]
#[doc = ""]
#[doc = " A particular row can be read by at most one stream. When the caller has"]
#[doc = " reached the end of each stream in the session, then all the data in the"]
#[doc = " table has been read."]
#[doc = ""]
#[doc = " Read sessions automatically expire 24 hours after they are created and do"]
#[doc = " not require manual clean-up by the caller."]
pub async fn create_read_session(
&mut self,
request: impl tonic::IntoRequest<super::CreateReadSessionRequest>,
) -> Result<tonic::Response<super::ReadSession>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Reads rows from the table in the format prescribed by the read session."]
#[doc = " Each response contains one or more table rows, up to a maximum of 10 MiB"]
#[doc = " per response; read requests which attempt to read individual rows larger"]
#[doc = " than this will fail."]
#[doc = ""]
#[doc = " Each request also returns a set of stream statistics reflecting the"]
#[doc = " estimated total number of rows in the read stream. This number is computed"]
#[doc = " based on the total table size and the number of active streams in the read"]
#[doc = " session, and may change as other streams continue to read data."]
pub async fn read_rows(
&mut self,
request: impl tonic::IntoRequest<super::ReadRowsRequest>,
) -> Result<tonic::Response<tonic::codec::Streaming<super::ReadRowsResponse>>, tonic::Status>
{
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows",
);
self.inner.server_streaming(request.into_request(), path, codec).await
}
#[doc = " Creates additional streams for a ReadSession. This API can be used to"]
#[doc = " dynamically adjust the parallelism of a batch processing task upwards by"]
#[doc = " adding additional workers."]
pub async fn batch_create_read_session_streams(
&mut self,
request: impl tonic::IntoRequest<super::BatchCreateReadSessionStreamsRequest>,
) -> Result<tonic::Response<super::BatchCreateReadSessionStreamsResponse>, tonic::Status>
{
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams") ;
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Triggers the graceful termination of a single stream in a ReadSession. This"]
#[doc = " API can be used to dynamically adjust the parallelism of a batch processing"]
#[doc = " task downwards without losing data."]
#[doc = ""]
#[doc = " This API does not delete the stream -- it remains visible in the"]
#[doc = " ReadSession, and any data processed by the stream is not released to other"]
#[doc = " streams. However, no additional data will be assigned to the stream once"]
#[doc = " this call completes. Callers must continue reading data on the stream until"]
#[doc = " the end of the stream is reached so that data which has already been"]
#[doc = " assigned to the stream will be processed."]
#[doc = ""]
#[doc = " This method will return an error if there are no other live streams"]
#[doc = " in the Session, or if SplitReadStream() has been called on the given"]
#[doc = " Stream."]
pub async fn finalize_stream(
&mut self,
request: impl tonic::IntoRequest<super::FinalizeStreamRequest>,
) -> Result<tonic::Response<()>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Splits a given read stream into two Streams. These streams are referred to"]
#[doc = " as the primary and the residual of the split. The original stream can still"]
#[doc = " be read from in the same manner as before. Both of the returned streams can"]
#[doc = " also be read from, and the total rows return by both child streams will be"]
#[doc = " the same as the rows read from the original stream."]
#[doc = ""]
#[doc = " Moreover, the two child streams will be allocated back to back in the"]
#[doc = " original Stream. Concretely, it is guaranteed that for streams Original,"]
#[doc = " Primary, and Residual, that Original[0-j] = Primary[0-j] and"]
#[doc = " Original[j-n] = Residual[0-m] once the streams have been read to"]
#[doc = " completion."]
#[doc = ""]
#[doc = " This method is guaranteed to be idempotent."]
pub async fn split_read_stream(
&mut self,
request: impl tonic::IntoRequest<super::SplitReadStreamRequest>,
) -> Result<tonic::Response<super::SplitReadStreamResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
}