google_cloud_bigquery/http/job/query.rs
1use std::collections::HashMap;
2
3use reqwest_middleware::{ClientWithMiddleware as Client, RequestBuilder};
4
5use crate::http::dataset::DatasetReference;
6use crate::http::job::{DmlStats, JobReference, SessionInfo};
7use crate::http::table::TableSchema;
8use crate::http::tabledata::list::Tuple;
9use crate::http::types::{ConnectionProperty, DataFormatOptions, ErrorProto, QueryParameter};
10
11#[derive(Clone, PartialEq, serde::Deserialize, serde::Serialize, Debug, Default)]
12#[serde(rename_all = "camelCase")]
13pub struct QueryRequest {
14 /// The resource type of the request.
15 pub kind: String,
16 /// Required. A query string to execute, using Google Standard SQL or legacy SQL syntax.
17 /// Example: "SELECT COUNT(f1) FROM myProjectId.myDatasetId.myTableId".
18 pub query: String,
19 /// Optional. The maximum number of rows of data to return per page of results.
20 /// Setting this flag to a small value such as 1000 and then paging through
21 /// results might improve reliability when the query result set is large.
22 /// In addition to this limit, responses are also limited to 10 MB.
23 /// By default, there is no maximum row count, and only the byte limit applies.
24 pub max_results: Option<i64>,
25 /// Optional. Specifies the default datasetId and projectId to assume for any unqualified table names in the query.
26 /// If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'.
27 pub default_dataset: Option<DatasetReference>,
28 /// Optional. Optional: Specifies the maximum amount of time, in milliseconds,
29 /// that the client is willing to wait for the query to complete.
30 /// By default, this limit is 10 seconds (10,000 milliseconds).
31 /// If the query is complete, the jobComplete field in the response is true.
32 /// If the query has not yet completed, jobComplete is false.
33 /// You can request a longer timeout period in the timeoutMs field.
34 /// However, the call is not guaranteed to wait for the specified timeout;
35 /// it typically returns after around 200 seconds (200,000 milliseconds), even if the query is not complete.
36 /// If jobComplete is false, you can continue to wait for the query to complete
37 /// by calling the getQueryResults method until the jobComplete field in the getQueryResults response is true.
38 pub timeout_ms: Option<i64>,
39 /// Optional. If set to true, BigQuery doesn't run the job.
40 /// Instead, if the query is valid,
41 /// BigQuery returns statistics about the job such as how many bytes would be processed.
42 /// If the query is invalid, an error returns. The default value is false.
43 pub dry_run: Option<bool>,
44 /// Optional. Whether to look for the result in the query cache.
45 /// The query cache is a best-effort cache that will be flushed whenever tables in the query are modified.
46 /// The default value is true.
47 pub use_query_cache: Option<bool>,
48 /// Specifies whether to use BigQuery's legacy SQL dialect for this query.
49 /// The default value is true. If set to false, the query will use
50 /// BigQuery's GoogleSQL: https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is set to false, the value of flattenResults is ignored; query will be run as if flattenResults is false.
51 pub use_legacy_sql: bool,
52 /// GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters or
53 /// to NAMED to use named (@myparam) query parameters in this query.
54 pub parameter_mode: Option<String>,
55 /// jobs.query parameters for GoogleSQL queries.
56 pub query_parameters: Vec<QueryParameter>,
57 /// The geographic location where the job should run.
58 /// See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location.
59 pub location: String,
60 /// Optional. Output format adjustments.
61 pub format_options: Option<DataFormatOptions>,
62 /// Optional. Connection properties which can modify the query behavior.
63 pub connection_properties: Vec<ConnectionProperty>,
64 /// Optional. The labels associated with this query.
65 /// Labels can be used to organize and group query jobs.
66 /// Label keys and values can be no longer than 63 characters,
67 /// can only contain lowercase letters, numeric characters, underscores and dashes.
68 /// International characters are allowed. Label keys must start with a letter and each
69 /// label in the list must have a different key.
70 /// An object containing a list of "key": value pairs.
71 /// Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
72 pub labels: Option<HashMap<String, String>>,
73 ///Optional. Limits the bytes billed for this query.
74 /// Queries with bytes billed above this limit will fail (without incurring a charge).
75 /// If unspecified, the project default is used.
76 #[serde(default, deserialize_with = "crate::http::from_str_option")]
77 pub maximum_bytes_billed: Option<i64>,
78 /// Optional. A unique user provided identifier to ensure idempotent behavior for queries.
79 /// Note that this is different from the jobId. It has the following properties:
80 /// 1.It is case-sensitive, limited to up to 36 ASCII characters. A UUID is recommended.
81 /// 2.Read only queries can ignore this token since they are nullipotent by definition.
82 /// 3.For the purposes of idempotency ensured by the requestId,
83 /// a request is considered duplicate of another only if they have the same requestId and are actually duplicates.
84 /// When determining whether a request is a duplicate of another request,
85 /// all parameters in the request that may affect the result are considered.
86 /// For example, query, connectionProperties, queryParameters, useLegacySql are parameters that affect the result
87 /// and are considered when determining whether a request is a duplicate,
88 /// but properties like timeoutMs don't affect the result and are thus not considered.
89 /// Dry run query requests are never considered duplicate of another request.
90 /// 4.When a duplicate mutating query request is detected, it returns:
91 /// a. the results of the mutation if it completes successfully within the timeout.
92 /// b. the running operation if it is still in progress at the end of the timeout.
93 /// 5.Its lifetime is limited to 15 minutes.
94 /// In other words, if two requests are sent with the same requestId,
95 /// but more than 15 minutes apart, idempotency is not guaranteed.
96 pub request_id: Option<String>,
97 /// Optional. If true, creates a new session using a randomly generated sessionId.
98 /// If false, runs query with an existing sessionId passed in ConnectionProperty,
99 /// otherwise runs query in non-session mode.
100 /// The session location will be set to QueryRequest.location if it is present,
101 /// otherwise it's set to the default location based on existing routing logic.
102 pub create_session: Option<bool>,
103}
104
105#[derive(Clone, PartialEq, serde::Deserialize, serde::Serialize, Debug, Default)]
106#[serde(rename_all = "camelCase")]
107pub struct QueryResponse {
108 /// The resource type.
109 pub kind: String,
110 /// The schema of the results. Present only when the query completes successfully.
111 pub schema: Option<TableSchema>,
112 /// Reference to the Job that was created to run the query.
113 /// This field will be present even if the original request timed out,
114 /// in which case jobs.getQueryResults can be used to read the results once the query has completed.
115 /// Since this API only returns the first page of results,
116 /// subsequent pages can be fetched via the same mechanism (jobs.getQueryResults).
117 pub job_reference: JobReference,
118 /// The total number of rows in the complete query result set,
119 /// which can be more than the number of rows in this single page of results.
120 #[serde(default, deserialize_with = "crate::http::from_str_option")]
121 pub total_rows: Option<i64>,
122 /// A token used for paging results.
123 /// A non-empty token indicates that additional results are available.
124 /// To see additional results, query the jobs.getQueryResults method.
125 /// For more information, see Paging through table data.
126 pub page_token: Option<String>,
127 /// An object with as many results as can be contained within the maximum permitted reply size.
128 /// To get any additional rows, you can call jobs.getQueryResults and specify the jobReference returned above.
129 pub rows: Option<Vec<Tuple>>,
130 /// The total number of bytes processed for this query.
131 /// If this query was a dry run, this is the number of bytes that would be processed if the query were run.
132 #[serde(default, deserialize_with = "crate::http::from_str_option")]
133 pub total_bytes_processed: Option<i64>,
134 /// Whether the query has completed or not.
135 /// If rows or totalRows are present, this will always be true.
136 /// If this is false, totalRows will not be available.
137 pub job_complete: bool,
138 /// Output only. The first errors or warnings encountered during the running of the job.
139 /// The final message includes the number of errors that caused the process to stop.
140 /// Errors here do not necessarily mean that the job has completed or was unsuccessful.
141 /// For more information about error messages, see Error messages.
142 pub errors: Option<Vec<ErrorProto>>,
143 /// Whether the query result was fetched from the query cache.
144 pub cache_hit: Option<bool>,
145 /// Output only. The number of rows affected by a DML statement.
146 /// Present only for DML statements INSERT, UPDATE or DELETE.
147 #[serde(default, deserialize_with = "crate::http::from_str_option")]
148 pub num_dml_affected_rows: Option<i64>,
149 /// Output only. Information of the session if this job is part of one.
150 pub session_info: Option<SessionInfo>,
151 /// Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, MERGE or TRUNCATE.
152 pub dml_stats: Option<DmlStats>,
153}
154
155pub fn build(base_url: &str, client: &Client, project_id: &str, data: &QueryRequest) -> RequestBuilder {
156 let url = format!("{}/projects/{}/queries", base_url, project_id);
157 client.post(url).json(data)
158}