#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ArrowSchema {
#[prost(bytes = "vec", tag = "1")]
pub serialized_schema: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ArrowRecordBatch {
#[prost(bytes = "vec", tag = "1")]
pub serialized_record_batch: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ArrowSerializationOptions {
#[prost(enumeration = "arrow_serialization_options::Format", tag = "1")]
pub format: i32,
}
pub mod arrow_serialization_options {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Format {
Unspecified = 0,
Arrow014 = 1,
Arrow015 = 2,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvroSchema {
#[prost(string, tag = "1")]
pub schema: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AvroRows {
#[prost(bytes = "vec", tag = "1")]
pub serialized_binary_rows: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ProtoSchema {
#[prost(message, optional, tag = "1")]
pub proto_descriptor: ::core::option::Option<::prost_types::DescriptorProto>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ProtoRows {
#[prost(bytes = "vec", repeated, tag = "1")]
pub serialized_rows: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableSchema {
#[prost(message, repeated, tag = "1")]
pub fields: ::prost::alloc::vec::Vec<TableFieldSchema>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableFieldSchema {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(enumeration = "table_field_schema::Type", tag = "2")]
pub r#type: i32,
#[prost(enumeration = "table_field_schema::Mode", tag = "3")]
pub mode: i32,
#[prost(message, repeated, tag = "4")]
pub fields: ::prost::alloc::vec::Vec<TableFieldSchema>,
#[prost(string, tag = "6")]
pub description: ::prost::alloc::string::String,
}
pub mod table_field_schema {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Type {
Unspecified = 0,
String = 1,
Int64 = 2,
Double = 3,
Struct = 4,
Bytes = 5,
Bool = 6,
Timestamp = 7,
Date = 8,
Time = 9,
Datetime = 10,
Geography = 11,
Numeric = 12,
Bignumeric = 13,
Interval = 14,
Json = 15,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Mode {
Unspecified = 0,
Nullable = 1,
Required = 2,
Repeated = 3,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadSession {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub expire_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(enumeration = "DataFormat", tag = "3")]
pub data_format: i32,
#[prost(string, tag = "6")]
pub table: ::prost::alloc::string::String,
#[prost(message, optional, tag = "7")]
pub table_modifiers: ::core::option::Option<read_session::TableModifiers>,
#[prost(message, optional, tag = "8")]
pub read_options: ::core::option::Option<read_session::TableReadOptions>,
#[prost(message, repeated, tag = "10")]
pub streams: ::prost::alloc::vec::Vec<ReadStream>,
#[prost(oneof = "read_session::Schema", tags = "4, 5")]
pub schema: ::core::option::Option<read_session::Schema>,
}
pub mod read_session {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableModifiers {
#[prost(message, optional, tag = "1")]
pub snapshot_time: ::core::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableReadOptions {
#[prost(string, repeated, tag = "1")]
pub selected_fields: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(string, tag = "2")]
pub row_restriction: ::prost::alloc::string::String,
#[prost(message, optional, tag = "3")]
pub arrow_serialization_options: ::core::option::Option<super::ArrowSerializationOptions>,
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Schema {
#[prost(message, tag = "4")]
AvroSchema(super::AvroSchema),
#[prost(message, tag = "5")]
ArrowSchema(super::ArrowSchema),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadStream {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WriteStream {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(enumeration = "write_stream::Type", tag = "2")]
pub r#type: i32,
#[prost(message, optional, tag = "3")]
pub create_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, optional, tag = "4")]
pub commit_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, optional, tag = "5")]
pub table_schema: ::core::option::Option<TableSchema>,
}
pub mod write_stream {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Type {
Unspecified = 0,
Committed = 1,
Pending = 2,
Buffered = 3,
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum DataFormat {
Unspecified = 0,
Avro = 1,
Arrow = 2,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateReadSessionRequest {
#[prost(string, tag = "1")]
pub parent: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub read_session: ::core::option::Option<ReadSession>,
#[prost(int32, tag = "3")]
pub max_stream_count: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadRowsRequest {
#[prost(string, tag = "1")]
pub read_stream: ::prost::alloc::string::String,
#[prost(int64, tag = "2")]
pub offset: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ThrottleState {
#[prost(int32, tag = "1")]
pub throttle_percent: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StreamStats {
#[prost(message, optional, tag = "2")]
pub progress: ::core::option::Option<stream_stats::Progress>,
}
pub mod stream_stats {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Progress {
#[prost(double, tag = "1")]
pub at_response_start: f64,
#[prost(double, tag = "2")]
pub at_response_end: f64,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ReadRowsResponse {
#[prost(int64, tag = "6")]
pub row_count: i64,
#[prost(message, optional, tag = "2")]
pub stats: ::core::option::Option<StreamStats>,
#[prost(message, optional, tag = "5")]
pub throttle_state: ::core::option::Option<ThrottleState>,
#[prost(oneof = "read_rows_response::Rows", tags = "3, 4")]
pub rows: ::core::option::Option<read_rows_response::Rows>,
#[prost(oneof = "read_rows_response::Schema", tags = "7, 8")]
pub schema: ::core::option::Option<read_rows_response::Schema>,
}
pub mod read_rows_response {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Rows {
#[prost(message, tag = "3")]
AvroRows(super::AvroRows),
#[prost(message, tag = "4")]
ArrowRecordBatch(super::ArrowRecordBatch),
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Schema {
#[prost(message, tag = "7")]
AvroSchema(super::AvroSchema),
#[prost(message, tag = "8")]
ArrowSchema(super::ArrowSchema),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SplitReadStreamRequest {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(double, tag = "2")]
pub fraction: f64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SplitReadStreamResponse {
#[prost(message, optional, tag = "1")]
pub primary_stream: ::core::option::Option<ReadStream>,
#[prost(message, optional, tag = "2")]
pub remainder_stream: ::core::option::Option<ReadStream>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateWriteStreamRequest {
#[prost(string, tag = "1")]
pub parent: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub write_stream: ::core::option::Option<WriteStream>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AppendRowsRequest {
#[prost(string, tag = "1")]
pub write_stream: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub offset: ::core::option::Option<i64>,
#[prost(string, tag = "6")]
pub trace_id: ::prost::alloc::string::String,
#[prost(oneof = "append_rows_request::Rows", tags = "4")]
pub rows: ::core::option::Option<append_rows_request::Rows>,
}
pub mod append_rows_request {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ProtoData {
#[prost(message, optional, tag = "1")]
pub writer_schema: ::core::option::Option<super::ProtoSchema>,
#[prost(message, optional, tag = "2")]
pub rows: ::core::option::Option<super::ProtoRows>,
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Rows {
#[prost(message, tag = "4")]
ProtoRows(ProtoData),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AppendRowsResponse {
#[prost(message, optional, tag = "3")]
pub updated_schema: ::core::option::Option<TableSchema>,
#[prost(oneof = "append_rows_response::Response", tags = "1, 2")]
pub response: ::core::option::Option<append_rows_response::Response>,
}
pub mod append_rows_response {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AppendResult {
#[prost(message, optional, tag = "1")]
pub offset: ::core::option::Option<i64>,
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Response {
#[prost(message, tag = "1")]
AppendResult(AppendResult),
#[prost(message, tag = "2")]
Error(super::super::super::super::super::rpc::Status),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetWriteStreamRequest {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCommitWriteStreamsRequest {
#[prost(string, tag = "1")]
pub parent: ::prost::alloc::string::String,
#[prost(string, repeated, tag = "2")]
pub write_streams: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchCommitWriteStreamsResponse {
#[prost(message, optional, tag = "1")]
pub commit_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, repeated, tag = "2")]
pub stream_errors: ::prost::alloc::vec::Vec<StorageError>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FinalizeWriteStreamRequest {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FinalizeWriteStreamResponse {
#[prost(int64, tag = "1")]
pub row_count: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FlushRowsRequest {
#[prost(string, tag = "1")]
pub write_stream: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub offset: ::core::option::Option<i64>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FlushRowsResponse {
#[prost(int64, tag = "1")]
pub offset: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StorageError {
#[prost(enumeration = "storage_error::StorageErrorCode", tag = "1")]
pub code: i32,
#[prost(string, tag = "2")]
pub entity: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub error_message: ::prost::alloc::string::String,
}
pub mod storage_error {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum StorageErrorCode {
Unspecified = 0,
TableNotFound = 1,
StreamAlreadyCommitted = 2,
StreamNotFound = 3,
InvalidStreamType = 4,
InvalidStreamState = 5,
StreamFinalized = 6,
}
}
#[doc = r" Generated client implementations."]
pub mod big_query_read_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " BigQuery Read API."]
#[doc = ""]
#[doc = " The Read API can be used to read data from BigQuery."]
#[doc = ""]
#[doc = " New code should use the v1 Read API going forward, if they don't use Write"]
#[doc = " API at the same time."]
#[derive(Debug, Clone)]
pub struct BigQueryReadClient<T> {
inner: tonic::client::Grpc<T>,
}
impl<T> BigQueryReadClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> BigQueryReadClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
BigQueryReadClient::new(InterceptedService::new(inner, interceptor))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " Creates a new read session. A read session divides the contents of a"]
#[doc = " BigQuery table into one or more streams, which can then be used to read"]
#[doc = " data from the table. The read session also specifies properties of the"]
#[doc = " data to be read, such as a list of columns or a push-down filter describing"]
#[doc = " the rows to be returned."]
#[doc = ""]
#[doc = " A particular row can be read by at most one stream. When the caller has"]
#[doc = " reached the end of each stream in the session, then all the data in the"]
#[doc = " table has been read."]
#[doc = ""]
#[doc = " Data is assigned to each stream such that roughly the same number of"]
#[doc = " rows can be read from each stream. Because the server-side unit for"]
#[doc = " assigning data is collections of rows, the API does not guarantee that"]
#[doc = " each stream will return the same number or rows. Additionally, the"]
#[doc = " limits are enforced based on the number of pre-filtered rows, so some"]
#[doc = " filters can lead to lopsided assignments."]
#[doc = ""]
#[doc = " Read sessions automatically expire 6 hours after they are created and do"]
#[doc = " not require manual clean-up by the caller."]
pub async fn create_read_session(
&mut self,
request: impl tonic::IntoRequest<super::CreateReadSessionRequest>,
) -> Result<tonic::Response<super::ReadSession>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Reads rows from the stream in the format prescribed by the ReadSession."]
#[doc = " Each response contains one or more table rows, up to a maximum of 100 MiB"]
#[doc = " per response; read requests which attempt to read individual rows larger"]
#[doc = " than 100 MiB will fail."]
#[doc = ""]
#[doc = " Each request also returns a set of stream statistics reflecting the current"]
#[doc = " state of the stream."]
pub async fn read_rows(
&mut self,
request: impl tonic::IntoRequest<super::ReadRowsRequest>,
) -> Result<tonic::Response<tonic::codec::Streaming<super::ReadRowsResponse>>, tonic::Status>
{
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows",
);
self.inner.server_streaming(request.into_request(), path, codec).await
}
#[doc = " Splits a given `ReadStream` into two `ReadStream` objects. These"]
#[doc = " `ReadStream` objects are referred to as the primary and the residual"]
#[doc = " streams of the split. The original `ReadStream` can still be read from in"]
#[doc = " the same manner as before. Both of the returned `ReadStream` objects can"]
#[doc = " also be read from, and the rows returned by both child streams will be"]
#[doc = " the same as the rows read from the original stream."]
#[doc = ""]
#[doc = " Moreover, the two child streams will be allocated back-to-back in the"]
#[doc = " original `ReadStream`. Concretely, it is guaranteed that for streams"]
#[doc = " original, primary, and residual, that original[0-j] = primary[0-j] and"]
#[doc = " original[j-n] = residual[0-m] once the streams have been read to"]
#[doc = " completion."]
pub async fn split_read_stream(
&mut self,
request: impl tonic::IntoRequest<super::SplitReadStreamRequest>,
) -> Result<tonic::Response<super::SplitReadStreamResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
}
#[doc = r" Generated client implementations."]
pub mod big_query_write_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " BigQuery Write API."]
#[doc = ""]
#[doc = " The Write API can be used to write data to BigQuery."]
#[derive(Debug, Clone)]
pub struct BigQueryWriteClient<T> {
inner: tonic::client::Grpc<T>,
}
impl<T> BigQueryWriteClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> BigQueryWriteClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
BigQueryWriteClient::new(InterceptedService::new(inner, interceptor))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " Creates a write stream to the given table."]
#[doc = " Additionally, every table has a special COMMITTED stream named '_default'"]
#[doc = " to which data can be written. This stream doesn't need to be created using"]
#[doc = " CreateWriteStream. It is a stream that can be used simultaneously by any"]
#[doc = " number of clients. Data written to this stream is considered committed as"]
#[doc = " soon as an acknowledgement is received."]
pub async fn create_write_stream(
&mut self,
request: impl tonic::IntoRequest<super::CreateWriteStreamRequest>,
) -> Result<tonic::Response<super::WriteStream>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/CreateWriteStream",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Appends data to the given stream."]
#[doc = ""]
#[doc = " If `offset` is specified, the `offset` is checked against the end of"]
#[doc = " stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an"]
#[doc = " attempt is made to append to an offset beyond the current end of the stream"]
#[doc = " or `ALREADY_EXISTS` if user provids an `offset` that has already been"]
#[doc = " written to. User can retry with adjusted offset within the same RPC"]
#[doc = " stream. If `offset` is not specified, append happens at the end of the"]
#[doc = " stream."]
#[doc = ""]
#[doc = " The response contains the offset at which the append happened. Responses"]
#[doc = " are received in the same order in which requests are sent. There will be"]
#[doc = " one response for each successful request. If the `offset` is not set in"]
#[doc = " response, it means append didn't happen due to some errors. If one request"]
#[doc = " fails, all the subsequent requests will also fail until a success request"]
#[doc = " is made again."]
#[doc = ""]
#[doc = " If the stream is of `PENDING` type, data will only be available for read"]
#[doc = " operations after the stream is committed."]
pub async fn append_rows(
&mut self,
request: impl tonic::IntoStreamingRequest<Message = super::AppendRowsRequest>,
) -> Result<
tonic::Response<tonic::codec::Streaming<super::AppendRowsResponse>>,
tonic::Status,
> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/AppendRows",
);
self.inner.streaming(request.into_streaming_request(), path, codec).await
}
#[doc = " Gets a write stream."]
pub async fn get_write_stream(
&mut self,
request: impl tonic::IntoRequest<super::GetWriteStreamRequest>,
) -> Result<tonic::Response<super::WriteStream>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/GetWriteStream",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Finalize a write stream so that no new data can be appended to the"]
#[doc = " stream. Finalize is not supported on the '_default' stream."]
pub async fn finalize_write_stream(
&mut self,
request: impl tonic::IntoRequest<super::FinalizeWriteStreamRequest>,
) -> Result<tonic::Response<super::FinalizeWriteStreamResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/FinalizeWriteStream",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Atomically commits a group of `PENDING` streams that belong to the same"]
#[doc = " `parent` table."]
#[doc = " Streams must be finalized before commit and cannot be committed multiple"]
#[doc = " times. Once a stream is committed, data in the stream becomes available"]
#[doc = " for read operations."]
pub async fn batch_commit_write_streams(
&mut self,
request: impl tonic::IntoRequest<super::BatchCommitWriteStreamsRequest>,
) -> Result<tonic::Response<super::BatchCommitWriteStreamsResponse>, tonic::Status>
{
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/BatchCommitWriteStreams",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Flushes rows to a BUFFERED stream."]
#[doc = " If users are appending rows to BUFFERED stream, flush operation is"]
#[doc = " required in order for the rows to become available for reading. A"]
#[doc = " Flush operation flushes up to any previously flushed offset in a BUFFERED"]
#[doc = " stream, to the offset specified in the request."]
#[doc = " Flush is not supported on the _default stream, since it is not BUFFERED."]
pub async fn flush_rows(
&mut self,
request: impl tonic::IntoRequest<super::FlushRowsRequest>,
) -> Result<tonic::Response<super::FlushRowsResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/google.cloud.bigquery.storage.v1beta2.BigQueryWrite/FlushRows",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
}