pub struct Client { /* private fields */ }Implementations§
Source§impl Client
impl Client
Sourcepub async fn new(config: ClientConfig) -> Result<Self, Error>
pub async fn new(config: ClientConfig) -> Result<Self, Error>
New client
Sourcepub fn dataset(&self) -> &BigqueryDatasetClient
pub fn dataset(&self) -> &BigqueryDatasetClient
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets BigqueryDatasetClient
Sourcepub fn table(&self) -> &BigqueryTableClient
pub fn table(&self) -> &BigqueryTableClient
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables BigqueryTableClient
Sourcepub fn tabledata(&self) -> &BigqueryTabledataClient
pub fn tabledata(&self) -> &BigqueryTabledataClient
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata BigqueryTabledataClient
Sourcepub fn job(&self) -> &BigqueryJobClient
pub fn job(&self) -> &BigqueryJobClient
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs BigqueryJobClient
Sourcepub fn routine(&self) -> &BigqueryRoutineClient
pub fn routine(&self) -> &BigqueryRoutineClient
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines BigqueryRoutineClient
Sourcepub fn row_access_policy(&self) -> &BigqueryRowAccessPolicyClient
pub fn row_access_policy(&self) -> &BigqueryRowAccessPolicyClient
https://cloud.google.com/bigquery/docs/reference/rest/v2/rowAccessPolicy BigqueryRowAccessPolicyClient
Sourcepub fn model(&self) -> &BigqueryModelClient
pub fn model(&self) -> &BigqueryModelClient
https://cloud.google.com/bigquery/docs/reference/rest/v2/models BigqueryModelClient
Sourcepub fn pending_storage_writer(&self, table: &str) -> Writer
pub fn pending_storage_writer(&self, table: &str) -> Writer
Creates a new pending type storage writer for the specified table. https://cloud.google.com/bigquery/docs/write-api#pending_type
use prost_types::DescriptorProto;
use google_cloud_bigquery::client::Client;
use google_cloud_gax::grpc::Status;
use prost::Message;
use tokio::sync::futures;
use google_cloud_bigquery::storage_write::AppendRowsRequestBuilder;
use futures_util::stream::StreamExt;
pub async fn run<T: Message>(client: &Client, table: &str, rows: Vec<T>, schema: DescriptorProto)
-> Result<(), Status> {
let mut writer = client.pending_storage_writer(table);
let stream = writer.create_write_stream().await?;
let mut data= vec![];
for row in rows {
let mut buf = Vec::new();
row.encode(&mut buf).unwrap();
data.push(buf);
}
let mut result = stream.append_rows(vec![AppendRowsRequestBuilder::new(schema, data)]).await.unwrap();
while let Some(Ok(res)) = result.next().await {
tracing::info!("append row errors = {:?}", res.row_errors.len());
}
let _ = stream.finalize().await?;
let _ = writer.commit().await?;
Ok(())
}Sourcepub fn default_storage_writer(&self) -> Writer
pub fn default_storage_writer(&self) -> Writer
Creates a new default type storage writer. https://cloud.google.com/bigquery/docs/write-api#default_stream
use prost_types::DescriptorProto;
use google_cloud_bigquery::client::Client;
use google_cloud_gax::grpc::Status;
use prost::Message;
use tokio::sync::futures;
use google_cloud_bigquery::storage_write::AppendRowsRequestBuilder;
use futures_util::stream::StreamExt;
pub async fn run<T: Message>(client: &Client, table: &str, rows: Vec<T>, schema: DescriptorProto)
-> Result<(), Status> {
let writer = client.default_storage_writer();
let stream = writer.create_write_stream(table).await?;
let mut data= vec![];
for row in rows {
let mut buf = Vec::new();
row.encode(&mut buf).unwrap();
data.push(buf);
}
let mut result = stream.append_rows(vec![AppendRowsRequestBuilder::new(schema, data)]).await.unwrap();
while let Some(Ok(res)) = result.next().await {
tracing::info!("append row errors = {:?}", res.row_errors.len());
}
Ok(())
}Sourcepub fn committed_storage_writer(&self) -> Writer
pub fn committed_storage_writer(&self) -> Writer
Creates a new committed type storage writer. https://cloud.google.com/bigquery/docs/write-api#committed_type
use prost_types::DescriptorProto;
use google_cloud_bigquery::client::Client;
use google_cloud_gax::grpc::Status;
use prost::Message;
use tokio::sync::futures;
use google_cloud_bigquery::storage_write::AppendRowsRequestBuilder;
use futures_util::stream::StreamExt;
pub async fn run<T: Message>(client: &Client, table: &str, rows: Vec<T>, schema: DescriptorProto)
-> Result<(), Status> {
let writer = client.committed_storage_writer();
let stream = writer.create_write_stream(table).await?;
let mut data= vec![];
for row in rows {
let mut buf = Vec::new();
row.encode(&mut buf).unwrap();
data.push(buf);
}
let mut result = stream.append_rows(vec![AppendRowsRequestBuilder::new(schema, data)]).await.unwrap();
while let Some(Ok(res)) = result.next().await {
tracing::info!("append row errors = {:?}", res.row_errors.len());
}
let _ = stream.finalize().await?;
Ok(())
}Sourcepub fn buffered_storage_writer(&self) -> Writer
pub fn buffered_storage_writer(&self) -> Writer
Creates a new buffered type storage writer. https://cloud.google.com/bigquery/docs/write-api#buffered_type
use prost_types::DescriptorProto;
use google_cloud_bigquery::client::Client;
use prost::Message;
use tokio::sync::futures;
use google_cloud_bigquery::storage_write::AppendRowsRequestBuilder;
use futures_util::stream::StreamExt;
use google_cloud_gax::grpc::Status;
pub async fn run<T: Message>(client: &Client, table: &str, rows: Vec<T>, schema: DescriptorProto)
-> Result<(), Status> {
let writer = client.buffered_storage_writer();
let stream = writer.create_write_stream(table).await?;
let mut data= vec![];
for row in rows {
let mut buf = Vec::new();
row.encode(&mut buf).unwrap();
data.push(buf);
}
let mut result = stream.append_rows(vec![AppendRowsRequestBuilder::new(schema, data)]).await.unwrap();
while let Some(Ok(res)) = result.next().await {
tracing::info!("append row errors = {:?}", res.row_errors.len());
}
let _ = stream.flush_rows(Some(0)).await?;
let _ = stream.finalize().await?;
Ok(())
}Sourcepub async fn query<T>(
&self,
project_id: &str,
request: QueryRequest,
) -> Result<Iterator<T>, QueryError>where
T: StructDecodable + StructDecodable,
pub async fn query<T>(
&self,
project_id: &str,
request: QueryRequest,
) -> Result<Iterator<T>, QueryError>where
T: StructDecodable + StructDecodable,
Run query job and get result.
use google_cloud_bigquery::http::job::query::QueryRequest;
use google_cloud_bigquery::query::row::Row;
use google_cloud_bigquery::client::Client;
async fn run(client: &Client, project_id: &str) {
let request = QueryRequest {
query: "SELECT * FROM dataset.table".to_string(),
..Default::default()
};
let mut iter = client.query::<Row>(project_id, request).await.unwrap();
while let Some(row) = iter.next().await.unwrap() {
let col1 = row.column::<String>(0);
let col2 = row.column::<Option<String>>(1);
}
}Sourcepub async fn query_with_option<T>(
&self,
project_id: &str,
request: QueryRequest,
option: QueryOption,
) -> Result<Iterator<T>, QueryError>where
T: StructDecodable + StructDecodable,
pub async fn query_with_option<T>(
&self,
project_id: &str,
request: QueryRequest,
option: QueryOption,
) -> Result<Iterator<T>, QueryError>where
T: StructDecodable + StructDecodable,
Run query job and get result.
use google_cloud_bigquery::http::job::query::QueryRequest;
use google_cloud_bigquery::query::row::Row;
use google_cloud_bigquery::client::Client;
use google_cloud_bigquery::query::QueryOption;
use google_cloud_bigquery::query::ExponentialBuilder;
async fn run(client: &Client, project_id: &str) {
let request = QueryRequest {
query: "SELECT * FROM dataset.table".to_string(),
..Default::default()
};
let retry = ExponentialBuilder::default().with_max_times(10);
let option = QueryOption::default().with_retry(retry).with_enable_storage_read(true);
let mut iter = client.query_with_option::<Row>(project_id, request, option).await.unwrap();
while let Some(row) = iter.next().await.unwrap() {
let col1 = row.column::<String>(0);
let col2 = row.column::<Option<String>>(1);
}
}Sourcepub async fn read_table<T>(
&self,
table: &TableReference,
option: Option<ReadTableOption>,
) -> Result<Iterator<T>, Error>where
T: StructDecodable,
pub async fn read_table<T>(
&self,
table: &TableReference,
option: Option<ReadTableOption>,
) -> Result<Iterator<T>, Error>where
T: StructDecodable,
Read table data by BigQuery Storage Read API.
use google_cloud_bigquery::storage::row::Row;
use google_cloud_bigquery::client::Client;
use google_cloud_bigquery::http::table::TableReference;
async fn run(client: &Client, project_id: &str) {
let table = TableReference {
project_id: project_id.to_string(),
dataset_id: "dataset".to_string(),
table_id: "table".to_string(),
};
let mut iter = client.read_table::<Row>(&table, None).await.unwrap();
while let Some(row) = iter.next().await.unwrap() {
let col1 = row.column::<String>(0);
let col2 = row.column::<Option<String>>(1);
}
}Trait Implementations§
Auto Trait Implementations§
impl Freeze for Client
impl !RefUnwindSafe for Client
impl Send for Client
impl Sync for Client
impl Unpin for Client
impl !UnwindSafe for Client
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
Source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T in a tonic::Request