gcp_client/google/cloud/automl/v1.rs
1/// A definition of an annotation spec.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct AnnotationSpec {
4 /// Output only. Resource name of the annotation spec.
5 /// Form:
6 ///
7 /// 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}'
8 #[prost(string, tag="1")]
9 pub name: std::string::String,
10 /// Required. The name of the annotation spec to show in the interface. The name can be
11 /// up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`.
12 #[prost(string, tag="2")]
13 pub display_name: std::string::String,
14 /// Output only. The number of examples in the parent dataset
15 /// labeled by the annotation spec.
16 #[prost(int32, tag="9")]
17 pub example_count: i32,
18}
19/// Contains annotation details specific to classification.
20#[derive(Clone, PartialEq, ::prost::Message)]
21pub struct ClassificationAnnotation {
22 /// Output only. A confidence estimate between 0.0 and 1.0. A higher value
23 /// means greater confidence that the annotation is positive. If a user
24 /// approves an annotation as negative or positive, the score value remains
25 /// unchanged. If a user creates an annotation, the score is 0 for negative or
26 /// 1 for positive.
27 #[prost(float, tag="1")]
28 pub score: f32,
29}
30/// Model evaluation metrics for classification problems.
31/// Note: For Video Classification this metrics only describe quality of the
32/// Video Classification predictions of "segment_classification" type.
33#[derive(Clone, PartialEq, ::prost::Message)]
34pub struct ClassificationEvaluationMetrics {
35 /// Output only. The Area Under Precision-Recall Curve metric. Micro-averaged
36 /// for the overall evaluation.
37 #[prost(float, tag="1")]
38 pub au_prc: f32,
39 /// Output only. The Area Under Receiver Operating Characteristic curve metric.
40 /// Micro-averaged for the overall evaluation.
41 #[prost(float, tag="6")]
42 pub au_roc: f32,
43 /// Output only. The Log Loss metric.
44 #[prost(float, tag="7")]
45 pub log_loss: f32,
46 /// Output only. Metrics for each confidence_threshold in
47 /// 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and
48 /// position_threshold = INT32_MAX_VALUE.
49 /// ROC and precision-recall curves, and other aggregated metrics are derived
50 /// from them. The confidence metrics entries may also be supplied for
51 /// additional values of position_threshold, but from these no aggregated
52 /// metrics are computed.
53 #[prost(message, repeated, tag="3")]
54 pub confidence_metrics_entry: ::std::vec::Vec<classification_evaluation_metrics::ConfidenceMetricsEntry>,
55 /// Output only. Confusion matrix of the evaluation.
56 /// Only set for MULTICLASS classification problems where number
57 /// of labels is no more than 10.
58 /// Only set for model level evaluation, not for evaluation per label.
59 #[prost(message, optional, tag="4")]
60 pub confusion_matrix: ::std::option::Option<classification_evaluation_metrics::ConfusionMatrix>,
61 /// Output only. The annotation spec ids used for this evaluation.
62 #[prost(string, repeated, tag="5")]
63 pub annotation_spec_id: ::std::vec::Vec<std::string::String>,
64}
65pub mod classification_evaluation_metrics {
66 /// Metrics for a single confidence threshold.
67 #[derive(Clone, PartialEq, ::prost::Message)]
68 pub struct ConfidenceMetricsEntry {
69 /// Output only. Metrics are computed with an assumption that the model
70 /// never returns predictions with score lower than this value.
71 #[prost(float, tag="1")]
72 pub confidence_threshold: f32,
73 /// Output only. Metrics are computed with an assumption that the model
74 /// always returns at most this many predictions (ordered by their score,
75 /// descendingly), but they all still need to meet the confidence_threshold.
76 #[prost(int32, tag="14")]
77 pub position_threshold: i32,
78 /// Output only. Recall (True Positive Rate) for the given confidence
79 /// threshold.
80 #[prost(float, tag="2")]
81 pub recall: f32,
82 /// Output only. Precision for the given confidence threshold.
83 #[prost(float, tag="3")]
84 pub precision: f32,
85 /// Output only. False Positive Rate for the given confidence threshold.
86 #[prost(float, tag="8")]
87 pub false_positive_rate: f32,
88 /// Output only. The harmonic mean of recall and precision.
89 #[prost(float, tag="4")]
90 pub f1_score: f32,
91 /// Output only. The Recall (True Positive Rate) when only considering the
92 /// label that has the highest prediction score and not below the confidence
93 /// threshold for each example.
94 #[prost(float, tag="5")]
95 pub recall_at1: f32,
96 /// Output only. The precision when only considering the label that has the
97 /// highest prediction score and not below the confidence threshold for each
98 /// example.
99 #[prost(float, tag="6")]
100 pub precision_at1: f32,
101 /// Output only. The False Positive Rate when only considering the label that
102 /// has the highest prediction score and not below the confidence threshold
103 /// for each example.
104 #[prost(float, tag="9")]
105 pub false_positive_rate_at1: f32,
106 /// Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
107 #[prost(float, tag="7")]
108 pub f1_score_at1: f32,
109 /// Output only. The number of model created labels that match a ground truth
110 /// label.
111 #[prost(int64, tag="10")]
112 pub true_positive_count: i64,
113 /// Output only. The number of model created labels that do not match a
114 /// ground truth label.
115 #[prost(int64, tag="11")]
116 pub false_positive_count: i64,
117 /// Output only. The number of ground truth labels that are not matched
118 /// by a model created label.
119 #[prost(int64, tag="12")]
120 pub false_negative_count: i64,
121 /// Output only. The number of labels that were not created by the model,
122 /// but if they would, they would not match a ground truth label.
123 #[prost(int64, tag="13")]
124 pub true_negative_count: i64,
125 }
126 /// Confusion matrix of the model running the classification.
127 #[derive(Clone, PartialEq, ::prost::Message)]
128 pub struct ConfusionMatrix {
129 /// Output only. IDs of the annotation specs used in the confusion matrix.
130 /// For Tables CLASSIFICATION
131 ///
132 /// [prediction_type][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]
133 /// only list of [annotation_spec_display_name-s][] is populated.
134 #[prost(string, repeated, tag="1")]
135 pub annotation_spec_id: ::std::vec::Vec<std::string::String>,
136 /// Output only. Display name of the annotation specs used in the confusion
137 /// matrix, as they were at the moment of the evaluation. For Tables
138 /// CLASSIFICATION
139 ///
140 /// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type],
141 /// distinct values of the target column at the moment of the model
142 /// evaluation are populated here.
143 #[prost(string, repeated, tag="3")]
144 pub display_name: ::std::vec::Vec<std::string::String>,
145 /// Output only. Rows in the confusion matrix. The number of rows is equal to
146 /// the size of `annotation_spec_id`.
147 /// `row[i].example_count[j]` is the number of examples that have ground
148 /// truth of the `annotation_spec_id[i]` and are predicted as
149 /// `annotation_spec_id[j]` by the model being evaluated.
150 #[prost(message, repeated, tag="2")]
151 pub row: ::std::vec::Vec<confusion_matrix::Row>,
152 }
153 pub mod confusion_matrix {
154 /// Output only. A row in the confusion matrix.
155 #[derive(Clone, PartialEq, ::prost::Message)]
156 pub struct Row {
157 /// Output only. Value of the specific cell in the confusion matrix.
158 /// The number of values each row has (i.e. the length of the row) is equal
159 /// to the length of the `annotation_spec_id` field or, if that one is not
160 /// populated, length of the [display_name][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] field.
161 #[prost(int32, repeated, tag="1")]
162 pub example_count: ::std::vec::Vec<i32>,
163 }
164 }
165}
166/// Type of the classification problem.
167#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
168#[repr(i32)]
169pub enum ClassificationType {
170 /// An un-set value of this enum.
171 Unspecified = 0,
172 /// At most one label is allowed per example.
173 Multiclass = 1,
174 /// Multiple labels are allowed for one example.
175 Multilabel = 2,
176}
177/// Dataset metadata that is specific to image classification.
178#[derive(Clone, PartialEq, ::prost::Message)]
179pub struct ImageClassificationDatasetMetadata {
180 /// Required. Type of the classification problem.
181 #[prost(enumeration="ClassificationType", tag="1")]
182 pub classification_type: i32,
183}
184/// Dataset metadata specific to image object detection.
185#[derive(Clone, PartialEq, ::prost::Message)]
186pub struct ImageObjectDetectionDatasetMetadata {
187}
188/// Model metadata for image classification.
189#[derive(Clone, PartialEq, ::prost::Message)]
190pub struct ImageClassificationModelMetadata {
191 /// Optional. The ID of the `base` model. If it is specified, the new model
192 /// will be created based on the `base` model. Otherwise, the new model will be
193 /// created from scratch. The `base` model must be in the same
194 /// `project` and `location` as the new model to create, and have the same
195 /// `model_type`.
196 #[prost(string, tag="1")]
197 pub base_model_id: std::string::String,
198 /// The train budget of creating this model, expressed in milli node
199 /// hours i.e. 1,000 value in this field means 1 node hour. The actual
200 /// `train_cost` will be equal or less than this value. If further model
201 /// training ceases to provide any improvements, it will stop without using
202 /// full budget and the stop_reason will be `MODEL_CONVERGED`.
203 /// Note, node_hour = actual_hour * number_of_nodes_invovled.
204 /// For model type `cloud`(default), the train budget must be between 8,000
205 /// and 800,000 milli node hours, inclusive. The default value is 192, 000
206 /// which represents one day in wall time. For model type
207 /// `mobile-low-latency-1`, `mobile-versatile-1`, `mobile-high-accuracy-1`,
208 /// `mobile-core-ml-low-latency-1`, `mobile-core-ml-versatile-1`,
209 /// `mobile-core-ml-high-accuracy-1`, the train budget must be between 1,000
210 /// and 100,000 milli node hours, inclusive. The default value is 24, 000 which
211 /// represents one day in wall time.
212 #[prost(int64, tag="16")]
213 pub train_budget_milli_node_hours: i64,
214 /// Output only. The actual train cost of creating this model, expressed in
215 /// milli node hours, i.e. 1,000 value in this field means 1 node hour.
216 /// Guaranteed to not exceed the train budget.
217 #[prost(int64, tag="17")]
218 pub train_cost_milli_node_hours: i64,
219 /// Output only. The reason that this create model operation stopped,
220 /// e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
221 #[prost(string, tag="5")]
222 pub stop_reason: std::string::String,
223 /// Optional. Type of the model. The available values are:
224 /// * `cloud` - Model to be used via prediction calls to AutoML API.
225 /// This is the default value.
226 /// * `mobile-low-latency-1` - A model that, in addition to providing
227 /// prediction via AutoML API, can also be exported (see
228 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
229 /// with TensorFlow afterwards. Expected to have low latency, but
230 /// may have lower prediction quality than other models.
231 /// * `mobile-versatile-1` - A model that, in addition to providing
232 /// prediction via AutoML API, can also be exported (see
233 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
234 /// with TensorFlow afterwards.
235 /// * `mobile-high-accuracy-1` - A model that, in addition to providing
236 /// prediction via AutoML API, can also be exported (see
237 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
238 /// with TensorFlow afterwards. Expected to have a higher
239 /// latency, but should also have a higher prediction quality
240 /// than other models.
241 /// * `mobile-core-ml-low-latency-1` - A model that, in addition to providing
242 /// prediction via AutoML API, can also be exported (see
243 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
244 /// ML afterwards. Expected to have low latency, but may have
245 /// lower prediction quality than other models.
246 /// * `mobile-core-ml-versatile-1` - A model that, in addition to providing
247 /// prediction via AutoML API, can also be exported (see
248 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
249 /// ML afterwards.
250 /// * `mobile-core-ml-high-accuracy-1` - A model that, in addition to
251 /// providing prediction via AutoML API, can also be exported
252 /// (see [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with
253 /// Core ML afterwards. Expected to have a higher latency, but
254 /// should also have a higher prediction quality than other
255 /// models.
256 #[prost(string, tag="7")]
257 pub model_type: std::string::String,
258 /// Output only. An approximate number of online prediction QPS that can
259 /// be supported by this model per each node on which it is deployed.
260 #[prost(double, tag="13")]
261 pub node_qps: f64,
262 /// Output only. The number of nodes this model is deployed on. A node is an
263 /// abstraction of a machine resource, which can handle online prediction QPS
264 /// as given in the node_qps field.
265 #[prost(int64, tag="14")]
266 pub node_count: i64,
267}
268/// Model metadata specific to image object detection.
269#[derive(Clone, PartialEq, ::prost::Message)]
270pub struct ImageObjectDetectionModelMetadata {
271 /// Optional. Type of the model. The available values are:
272 /// * `cloud-high-accuracy-1` - (default) A model to be used via prediction
273 /// calls to AutoML API. Expected to have a higher latency, but
274 /// should also have a higher prediction quality than other
275 /// models.
276 /// * `cloud-low-latency-1` - A model to be used via prediction
277 /// calls to AutoML API. Expected to have low latency, but may
278 /// have lower prediction quality than other models.
279 /// * `mobile-low-latency-1` - A model that, in addition to providing
280 /// prediction via AutoML API, can also be exported (see
281 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
282 /// with TensorFlow afterwards. Expected to have low latency, but
283 /// may have lower prediction quality than other models.
284 /// * `mobile-versatile-1` - A model that, in addition to providing
285 /// prediction via AutoML API, can also be exported (see
286 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
287 /// with TensorFlow afterwards.
288 /// * `mobile-high-accuracy-1` - A model that, in addition to providing
289 /// prediction via AutoML API, can also be exported (see
290 /// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
291 /// with TensorFlow afterwards. Expected to have a higher
292 /// latency, but should also have a higher prediction quality
293 /// than other models.
294 #[prost(string, tag="1")]
295 pub model_type: std::string::String,
296 /// Output only. The number of nodes this model is deployed on. A node is an
297 /// abstraction of a machine resource, which can handle online prediction QPS
298 /// as given in the qps_per_node field.
299 #[prost(int64, tag="3")]
300 pub node_count: i64,
301 /// Output only. An approximate number of online prediction QPS that can
302 /// be supported by this model per each node on which it is deployed.
303 #[prost(double, tag="4")]
304 pub node_qps: f64,
305 /// Output only. The reason that this create model operation stopped,
306 /// e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
307 #[prost(string, tag="5")]
308 pub stop_reason: std::string::String,
309 /// The train budget of creating this model, expressed in milli node
310 /// hours i.e. 1,000 value in this field means 1 node hour. The actual
311 /// `train_cost` will be equal or less than this value. If further model
312 /// training ceases to provide any improvements, it will stop without using
313 /// full budget and the stop_reason will be `MODEL_CONVERGED`.
314 /// Note, node_hour = actual_hour * number_of_nodes_invovled.
315 /// For model type `cloud-high-accuracy-1`(default) and `cloud-low-latency-1`,
316 /// the train budget must be between 20,000 and 900,000 milli node hours,
317 /// inclusive. The default value is 216, 000 which represents one day in
318 /// wall time.
319 /// For model type `mobile-low-latency-1`, `mobile-versatile-1`,
320 /// `mobile-high-accuracy-1`, `mobile-core-ml-low-latency-1`,
321 /// `mobile-core-ml-versatile-1`, `mobile-core-ml-high-accuracy-1`, the train
322 /// budget must be between 1,000 and 100,000 milli node hours, inclusive.
323 /// The default value is 24, 000 which represents one day in wall time.
324 #[prost(int64, tag="6")]
325 pub train_budget_milli_node_hours: i64,
326 /// Output only. The actual train cost of creating this model, expressed in
327 /// milli node hours, i.e. 1,000 value in this field means 1 node hour.
328 /// Guaranteed to not exceed the train budget.
329 #[prost(int64, tag="7")]
330 pub train_cost_milli_node_hours: i64,
331}
332/// Model deployment metadata specific to Image Classification.
333#[derive(Clone, PartialEq, ::prost::Message)]
334pub struct ImageClassificationModelDeploymentMetadata {
335 /// Input only. The number of nodes to deploy the model on. A node is an
336 /// abstraction of a machine resource, which can handle online prediction QPS
337 /// as given in the model's
338 ///
339 /// [node_qps][google.cloud.automl.v1.ImageClassificationModelMetadata.node_qps].
340 /// Must be between 1 and 100, inclusive on both ends.
341 #[prost(int64, tag="1")]
342 pub node_count: i64,
343}
344/// Model deployment metadata specific to Image Object Detection.
345#[derive(Clone, PartialEq, ::prost::Message)]
346pub struct ImageObjectDetectionModelDeploymentMetadata {
347 /// Input only. The number of nodes to deploy the model on. A node is an
348 /// abstraction of a machine resource, which can handle online prediction QPS
349 /// as given in the model's
350 ///
351 /// [qps_per_node][google.cloud.automl.v1.ImageObjectDetectionModelMetadata.qps_per_node].
352 /// Must be between 1 and 100, inclusive on both ends.
353 #[prost(int64, tag="1")]
354 pub node_count: i64,
355}
356/// Dataset metadata for classification.
357#[derive(Clone, PartialEq, ::prost::Message)]
358pub struct TextClassificationDatasetMetadata {
359 /// Required. Type of the classification problem.
360 #[prost(enumeration="ClassificationType", tag="1")]
361 pub classification_type: i32,
362}
363/// Model metadata that is specific to text classification.
364#[derive(Clone, PartialEq, ::prost::Message)]
365pub struct TextClassificationModelMetadata {
366 /// Output only. Classification type of the dataset used to train this model.
367 #[prost(enumeration="ClassificationType", tag="3")]
368 pub classification_type: i32,
369}
370/// Dataset metadata that is specific to text extraction
371#[derive(Clone, PartialEq, ::prost::Message)]
372pub struct TextExtractionDatasetMetadata {
373}
374/// Model metadata that is specific to text extraction.
375#[derive(Clone, PartialEq, ::prost::Message)]
376pub struct TextExtractionModelMetadata {
377}
378/// Dataset metadata for text sentiment.
379#[derive(Clone, PartialEq, ::prost::Message)]
380pub struct TextSentimentDatasetMetadata {
381 /// Required. A sentiment is expressed as an integer ordinal, where higher value
382 /// means a more positive sentiment. The range of sentiments that will be used
383 /// is between 0 and sentiment_max (inclusive on both ends), and all the values
384 /// in the range must be represented in the dataset before a model can be
385 /// created.
386 /// sentiment_max value must be between 1 and 10 (inclusive).
387 #[prost(int32, tag="1")]
388 pub sentiment_max: i32,
389}
390/// Model metadata that is specific to text sentiment.
391#[derive(Clone, PartialEq, ::prost::Message)]
392pub struct TextSentimentModelMetadata {
393}
394/// A vertex represents a 2D point in the image.
395/// The normalized vertex coordinates are between 0 to 1 fractions relative to
396/// the original plane (image, video). E.g. if the plane (e.g. whole image) would
397/// have size 10 x 20 then a point with normalized coordinates (0.1, 0.3) would
398/// be at the position (1, 6) on that plane.
399#[derive(Clone, PartialEq, ::prost::Message)]
400pub struct NormalizedVertex {
401 /// Required. Horizontal coordinate.
402 #[prost(float, tag="1")]
403 pub x: f32,
404 /// Required. Vertical coordinate.
405 #[prost(float, tag="2")]
406 pub y: f32,
407}
408/// A bounding polygon of a detected object on a plane.
409/// On output both vertices and normalized_vertices are provided.
410/// The polygon is formed by connecting vertices in the order they are listed.
411#[derive(Clone, PartialEq, ::prost::Message)]
412pub struct BoundingPoly {
413 /// Output only . The bounding polygon normalized vertices.
414 #[prost(message, repeated, tag="2")]
415 pub normalized_vertices: ::std::vec::Vec<NormalizedVertex>,
416}
417/// Input configuration for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action.
418///
419/// The format of input depends on dataset_metadata the Dataset into which
420/// the import is happening has. As input source the
421/// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
422/// is expected, unless specified otherwise. Additionally any input .CSV file
423/// by itself must be 100MB or smaller, unless specified otherwise.
424/// If an "example" file (that is, image, video etc.) with identical content
425/// (even if it had different `GCS_FILE_PATH`) is mentioned multiple times, then
426/// its label, bounding boxes etc. are appended. The same file should be always
427/// provided with the same `ML_USE` and `GCS_FILE_PATH`, if it is not, then
428/// these values are nondeterministically selected from the given ones.
429///
430/// The formats are represented in EBNF with commas being literal and with
431/// non-terminal symbols defined near the end of this comment. The formats are:
432///
433/// <h4>AutoML Vision</h4>
434///
435///
436/// <div class="ds-selector-tabs"><section><h5>Classification</h5>
437///
438/// See [Preparing your training
439/// data](https://cloud.google.com/vision/automl/docs/prepare) for more
440/// information.
441///
442/// CSV file(s) with each line in format:
443///
444/// ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
445///
446/// * `ML_USE` - Identifies the data set that the current row (file) applies
447/// to.
448/// This value can be one of the following:
449/// * `TRAIN` - Rows in this file are used to train the model.
450/// * `TEST` - Rows in this file are used to test the model during training.
451/// * `UNASSIGNED` - Rows in this file are not categorized. They are
452/// Automatically divided into train and test data. 80% for training and
453/// 20% for testing.
454///
455/// * `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
456/// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP,
457/// .TIFF, .ICO.
458///
459/// * `LABEL` - A label that identifies the object in the image.
460///
461/// For the `MULTICLASS` classification type, at most one `LABEL` is allowed
462/// per image. If an image has not yet been labeled, then it should be
463/// mentioned just once with no `LABEL`.
464///
465/// Some sample rows:
466///
467/// TRAIN,gs://folder/image1.jpg,daisy
468/// TEST,gs://folder/image2.jpg,dandelion,tulip,rose
469/// UNASSIGNED,gs://folder/image3.jpg,daisy
470/// UNASSIGNED,gs://folder/image4.jpg
471///
472///
473/// </section><section><h5>Object Detection</h5>
474/// See [Preparing your training
475/// data](https://cloud.google.com/vision/automl/object-detection/docs/prepare)
476/// for more information.
477///
478/// A CSV file(s) with each line in format:
479///
480/// ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
481///
482/// * `ML_USE` - Identifies the data set that the current row (file) applies
483/// to.
484/// This value can be one of the following:
485/// * `TRAIN` - Rows in this file are used to train the model.
486/// * `TEST` - Rows in this file are used to test the model during training.
487/// * `UNASSIGNED` - Rows in this file are not categorized. They are
488/// Automatically divided into train and test data. 80% for training and
489/// 20% for testing.
490///
491/// * `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
492/// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each image
493/// is assumed to be exhaustively labeled.
494///
495/// * `LABEL` - A label that identifies the object in the image specified by the
496/// `BOUNDING_BOX`.
497///
498/// * `BOUNDING BOX` - The vertices of an object in the example image.
499/// The minimum allowed `BOUNDING_BOX` edge length is 0.01, and no more than
500/// 500 `BOUNDING_BOX` instances per image are allowed (one `BOUNDING_BOX`
501/// per line). If an image has no looked for objects then it should be
502/// mentioned just once with no LABEL and the ",,,,,,," in place of the
503/// `BOUNDING_BOX`.
504///
505/// **Four sample rows:**
506///
507/// TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
508/// TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
509/// UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
510/// TEST,gs://folder/im3.png,,,,,,,,,
511/// </section>
512/// </div>
513///
514///
515/// <h4>AutoML Video Intelligence</h4>
516///
517///
518/// <div class="ds-selector-tabs"><section><h5>Classification</h5>
519///
520/// See [Preparing your training
521/// data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for
522/// more information.
523///
524/// CSV file(s) with each line in format:
525///
526/// ML_USE,GCS_FILE_PATH
527///
528/// For `ML_USE`, do not use `VALIDATE`.
529///
530/// `GCS_FILE_PATH` is the path to another .csv file that describes training
531/// example for a given `ML_USE`, using the following row format:
532///
533/// GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
534///
535/// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
536/// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
537///
538/// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
539/// length of the video, and the end time must be after the start time. Any
540/// segment of a video which has one or more labels on it, is considered a
541/// hard negative for all other labels. Any segment with no labels on
542/// it is considered to be unknown. If a whole video is unknown, then
543/// it should be mentioned just once with ",," in place of `LABEL,
544/// TIME_SEGMENT_START,TIME_SEGMENT_END`.
545///
546/// Sample top level CSV file:
547///
548/// TRAIN,gs://folder/train_videos.csv
549/// TEST,gs://folder/test_videos.csv
550/// UNASSIGNED,gs://folder/other_videos.csv
551///
552/// Sample rows of a CSV file for a particular ML_USE:
553///
554/// gs://folder/video1.avi,car,120,180.000021
555/// gs://folder/video1.avi,bike,150,180.000021
556/// gs://folder/vid2.avi,car,0,60.5
557/// gs://folder/vid3.avi,,,
558///
559///
560///
561/// </section><section><h5>Object Tracking</h5>
562///
563/// See [Preparing your training
564/// data](/video-intelligence/automl/object-tracking/docs/prepare) for more
565/// information.
566///
567/// CSV file(s) with each line in format:
568///
569/// ML_USE,GCS_FILE_PATH
570///
571/// For `ML_USE`, do not use `VALIDATE`.
572///
573/// `GCS_FILE_PATH` is the path to another .csv file that describes training
574/// example for a given `ML_USE`, using the following row format:
575///
576/// GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
577///
578/// or
579///
580/// GCS_FILE_PATH,,,,,,,,,,
581///
582/// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
583/// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
584/// Providing `INSTANCE_ID`s can help to obtain a better model. When
585/// a specific labeled entity leaves the video frame, and shows up
586/// afterwards it is not required, albeit preferable, that the same
587/// `INSTANCE_ID` is given to it.
588///
589/// `TIMESTAMP` must be within the length of the video, the
590/// `BOUNDING_BOX` is assumed to be drawn on the closest video's frame
591/// to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected
592/// to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per
593/// frame are allowed. If a whole video is unknown, then it should be
594/// mentioned just once with ",,,,,,,,,," in place of `LABEL,
595/// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`.
596///
597/// Sample top level CSV file:
598///
599/// TRAIN,gs://folder/train_videos.csv
600/// TEST,gs://folder/test_videos.csv
601/// UNASSIGNED,gs://folder/other_videos.csv
602///
603/// Seven sample rows of a CSV file for a particular ML_USE:
604///
605/// gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
606/// gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
607/// gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
608/// gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
609/// gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
610/// gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
611/// gs://folder/video2.avi,,,,,,,,,,,
612/// </section>
613/// </div>
614///
615///
616/// <h4>AutoML Natural Language</h4>
617///
618///
619/// <div class="ds-selector-tabs"><section><h5>Entity Extraction</h5>
620///
621/// See [Preparing your training
622/// data](/natural-language/automl/entity-analysis/docs/prepare) for more
623/// information.
624///
625/// One or more CSV file(s) with each line in the following format:
626///
627/// ML_USE,GCS_FILE_PATH
628///
629/// * `ML_USE` - Identifies the data set that the current row (file) applies
630/// to.
631/// This value can be one of the following:
632/// * `TRAIN` - Rows in this file are used to train the model.
633/// * `TEST` - Rows in this file are used to test the model during training.
634/// * `UNASSIGNED` - Rows in this file are not categorized. They are
635/// Automatically divided into train and test data. 80% for training and
636/// 20% for testing..
637///
638/// * `GCS_FILE_PATH` - a Identifies JSON Lines (.JSONL) file stored in
639/// Google Cloud Storage that contains in-line text in-line as documents
640/// for model training.
641///
642/// After the training data set has been determined from the `TRAIN` and
643/// `UNASSIGNED` CSV files, the training data is divided into train and
644/// validation data sets. 70% for training and 30% for validation.
645///
646/// For example:
647///
648/// TRAIN,gs://folder/file1.jsonl
649/// VALIDATE,gs://folder/file2.jsonl
650/// TEST,gs://folder/file3.jsonl
651///
652/// **In-line JSONL files**
653///
654/// In-line .JSONL files contain, per line, a JSON document that wraps a
655/// [`text_snippet`][google.cloud.automl.v1.TextSnippet] field followed by
656/// one or more [`annotations`][google.cloud.automl.v1.AnnotationPayload]
657/// fields, which have `display_name` and `text_extraction` fields to describe
658/// the entity from the text snippet. Multiple JSON documents can be separated
659/// using line breaks (\n).
660///
661/// The supplied text must be annotated exhaustively. For example, if you
662/// include the text "horse", but do not label it as "animal",
663/// then "horse" is assumed to not be an "animal".
664///
665/// Any given text snippet content must have 30,000 characters or
666/// less, and also be UTF-8 NFC encoded. ASCII is accepted as it is
667/// UTF-8 NFC encoded.
668///
669/// For example:
670///
671/// {
672/// "text_snippet": {
673/// "content": "dog car cat"
674/// },
675/// "annotations": [
676/// {
677/// "display_name": "animal",
678/// "text_extraction": {
679/// "text_segment": {"start_offset": 0, "end_offset": 2}
680/// }
681/// },
682/// {
683/// "display_name": "vehicle",
684/// "text_extraction": {
685/// "text_segment": {"start_offset": 4, "end_offset": 6}
686/// }
687/// },
688/// {
689/// "display_name": "animal",
690/// "text_extraction": {
691/// "text_segment": {"start_offset": 8, "end_offset": 10}
692/// }
693/// }
694/// ]
695/// }\n
696/// {
697/// "text_snippet": {
698/// "content": "This dog is good."
699/// },
700/// "annotations": [
701/// {
702/// "display_name": "animal",
703/// "text_extraction": {
704/// "text_segment": {"start_offset": 5, "end_offset": 7}
705/// }
706/// }
707/// ]
708/// }
709///
710/// **JSONL files that reference documents**
711///
712/// .JSONL files contain, per line, a JSON document that wraps a
713/// `input_config` that contains the path to a source document.
714/// Multiple JSON documents can be separated using line breaks (\n).
715///
716/// Supported document extensions: .PDF, .TIF, .TIFF
717///
718/// For example:
719///
720/// {
721/// "document": {
722/// "input_config": {
723/// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
724/// }
725/// }
726/// }
727/// }\n
728/// {
729/// "document": {
730/// "input_config": {
731/// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
732/// }
733/// }
734/// }
735/// }
736///
737/// **In-line JSONL files with document layout information**
738///
739/// **Note:** You can only annotate documents using the UI. The format described
740/// below applies to annotated documents exported using the UI or `exportData`.
741///
742/// In-line .JSONL files for documents contain, per line, a JSON document
743/// that wraps a `document` field that provides the textual content of the
744/// document and the layout information.
745///
746/// For example:
747///
748/// {
749/// "document": {
750/// "document_text": {
751/// "content": "dog car cat"
752/// }
753/// "layout": [
754/// {
755/// "text_segment": {
756/// "start_offset": 0,
757/// "end_offset": 11,
758/// },
759/// "page_number": 1,
760/// "bounding_poly": {
761/// "normalized_vertices": [
762/// {"x": 0.1, "y": 0.1},
763/// {"x": 0.1, "y": 0.3},
764/// {"x": 0.3, "y": 0.3},
765/// {"x": 0.3, "y": 0.1},
766/// ],
767/// },
768/// "text_segment_type": TOKEN,
769/// }
770/// ],
771/// "document_dimensions": {
772/// "width": 8.27,
773/// "height": 11.69,
774/// "unit": INCH,
775/// }
776/// "page_count": 3,
777/// },
778/// "annotations": [
779/// {
780/// "display_name": "animal",
781/// "text_extraction": {
782/// "text_segment": {"start_offset": 0, "end_offset": 3}
783/// }
784/// },
785/// {
786/// "display_name": "vehicle",
787/// "text_extraction": {
788/// "text_segment": {"start_offset": 4, "end_offset": 7}
789/// }
790/// },
791/// {
792/// "display_name": "animal",
793/// "text_extraction": {
794/// "text_segment": {"start_offset": 8, "end_offset": 11}
795/// }
796/// },
797/// ],
798///
799///
800///
801///
802/// </section><section><h5>Classification</h5>
803///
804/// See [Preparing your training
805/// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
806/// information.
807///
808/// One or more CSV file(s) with each line in the following format:
809///
810/// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
811///
812/// * `ML_USE` - Identifies the data set that the current row (file) applies
813/// to.
814/// This value can be one of the following:
815/// * `TRAIN` - Rows in this file are used to train the model.
816/// * `TEST` - Rows in this file are used to test the model during training.
817/// * `UNASSIGNED` - Rows in this file are not categorized. They are
818/// Automatically divided into train and test data. 80% for training and
819/// 20% for testing.
820///
821/// * `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
822/// the column content is a valid Google Cloud Storage file path, that is,
823/// prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
824/// the content is enclosed in double quotes (""), it is treated as a
825/// `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
826/// file with supported extension and UTF-8 encoding, for example,
827/// "gs://folder/content.txt" AutoML imports the file content
828/// as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
829/// excluding quotes. In both cases, size of the content must be 10MB or
830/// less in size. For zip files, the size of each file inside the zip must be
831/// 10MB or less in size.
832///
833/// For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
834///
835/// The `ML_USE` and `LABEL` columns are optional.
836/// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
837///
838/// A maximum of 100 unique labels are allowed per CSV row.
839///
840/// Sample rows:
841///
842/// TRAIN,"They have bad food and very rude",RudeService,BadFood
843/// gs://folder/content.txt,SlowService
844/// TEST,gs://folder/document.pdf
845/// VALIDATE,gs://folder/text_files.zip,BadFood
846///
847///
848///
849/// </section><section><h5>Sentiment Analysis</h5>
850///
851/// See [Preparing your training
852/// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
853/// information.
854///
855/// CSV file(s) with each line in format:
856///
857/// ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
858///
859/// * `ML_USE` - Identifies the data set that the current row (file) applies
860/// to.
861/// This value can be one of the following:
862/// * `TRAIN` - Rows in this file are used to train the model.
863/// * `TEST` - Rows in this file are used to test the model during training.
864/// * `UNASSIGNED` - Rows in this file are not categorized. They are
865/// Automatically divided into train and test data. 80% for training and
866/// 20% for testing.
867///
868/// * `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
869/// the column content is a valid Google Cloud Storage file path, that is,
870/// prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
871/// the content is enclosed in double quotes (""), it is treated as a
872/// `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
873/// file with supported extension and UTF-8 encoding, for example,
874/// "gs://folder/content.txt" AutoML imports the file content
875/// as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
876/// excluding quotes. In both cases, size of the content must be 128kB or
877/// less in size. For zip files, the size of each file inside the zip must be
878/// 128kB or less in size.
879///
880/// The `ML_USE` and `SENTIMENT` columns are optional.
881/// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
882///
883/// * `SENTIMENT` - An integer between 0 and
884/// Dataset.text_sentiment_dataset_metadata.sentiment_max
885/// (inclusive). Describes the ordinal of the sentiment - higher
886/// value means a more positive sentiment. All the values are
887/// completely relative, i.e. neither 0 needs to mean a negative or
888/// neutral sentiment nor sentiment_max needs to mean a positive one -
889/// it is just required that 0 is the least positive sentiment
890/// in the data, and sentiment_max is the most positive one.
891/// The SENTIMENT shouldn't be confused with "score" or "magnitude"
892/// from the previous Natural Language Sentiment Analysis API.
893/// All SENTIMENT values between 0 and sentiment_max must be
894/// represented in the imported data. On prediction the same 0 to
895/// sentiment_max range will be used. The difference between
896/// neighboring sentiment values needs not to be uniform, e.g. 1 and
897/// 2 may be similar whereas the difference between 2 and 3 may be
898/// large.
899///
900/// Sample rows:
901///
902/// TRAIN,"@freewrytin this is way too good for your product",2
903/// gs://folder/content.txt,3
904/// TEST,gs://folder/document.pdf
905/// VALIDATE,gs://folder/text_files.zip,2
906/// </section>
907/// </div>
908///
909///
910///
911/// <h4>AutoML Tables</h4><div class="ui-datasection-main"><section
912/// class="selected">
913///
914/// See [Preparing your training
915/// data](https://cloud.google.com/automl-tables/docs/prepare) for more
916/// information.
917///
918/// You can use either
919/// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
920/// [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
921/// All input is concatenated into a
922/// single
923///
924/// [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
925///
926/// **For gcs_source:**
927///
928/// CSV file(s), where the first row of the first file is the header,
929/// containing unique column names. If the first row of a subsequent
930/// file is the same as the header, then it is also treated as a
931/// header. All other rows contain values for the corresponding
932/// columns.
933///
934/// Each .CSV file by itself must be 10GB or smaller, and their total
935/// size must be 100GB or smaller.
936///
937/// First three sample rows of a CSV file:
938/// <pre>
939/// "Id","First Name","Last Name","Dob","Addresses"
940///
941/// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
942///
943/// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
944/// </pre>
945/// **For bigquery_source:**
946///
947/// An URI of a BigQuery table. The user data size of the BigQuery
948/// table must be 100GB or smaller.
949///
950/// An imported table must have between 2 and 1,000 columns, inclusive,
951/// and between 1000 and 100,000,000 rows, inclusive. There are at most 5
952/// import data running in parallel.
953///
954/// </section>
955/// </div>
956///
957///
958/// **Input field definitions:**
959///
960/// `ML_USE`
961/// : ("TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED")
962/// Describes how the given example (file) should be used for model
963/// training. "UNASSIGNED" can be used when user has no preference.
964///
965/// `GCS_FILE_PATH`
966/// : The path to a file on Google Cloud Storage. For example,
967/// "gs://folder/image1.png".
968///
969/// `LABEL`
970/// : A display name of an object on an image, video etc., e.g. "dog".
971/// Must be up to 32 characters long and can consist only of ASCII
972/// Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
973/// For each label an AnnotationSpec is created which display_name
974/// becomes the label; AnnotationSpecs are given back in predictions.
975///
976/// `INSTANCE_ID`
977/// : A positive integer that identifies a specific instance of a
978/// labeled entity on an example. Used e.g. to track two cars on
979/// a video while being able to tell apart which one is which.
980///
981/// `BOUNDING_BOX`
982/// : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
983/// A rectangle parallel to the frame of the example (image,
984/// video). If 4 vertices are given they are connected by edges
985/// in the order provided, if 2 are given they are recognized
986/// as diagonally opposite vertices of the rectangle.
987///
988/// `VERTEX`
989/// : (`COORDINATE,COORDINATE`)
990/// First coordinate is horizontal (x), the second is vertical (y).
991///
992/// `COORDINATE`
993/// : A float in 0 to 1 range, relative to total length of
994/// image or video in given dimension. For fractions the
995/// leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
996/// Point 0,0 is in top left.
997///
998/// `TIME_SEGMENT_START`
999/// : (`TIME_OFFSET`)
1000/// Expresses a beginning, inclusive, of a time segment
1001/// within an example that has a time dimension
1002/// (e.g. video).
1003///
1004/// `TIME_SEGMENT_END`
1005/// : (`TIME_OFFSET`)
1006/// Expresses an end, exclusive, of a time segment within
1007/// n example that has a time dimension (e.g. video).
1008///
1009/// `TIME_OFFSET`
1010/// : A number of seconds as measured from the start of an
1011/// example (e.g. video). Fractions are allowed, up to a
1012/// microsecond precision. "inf" is allowed, and it means the end
1013/// of the example.
1014///
1015/// `TEXT_SNIPPET`
1016/// : The content of a text snippet, UTF-8 encoded, enclosed within
1017/// double quotes ("").
1018///
1019/// `DOCUMENT`
1020/// : A field that provides the textual content with document and the layout
1021/// information.
1022///
1023///
1024/// **Errors:**
1025///
1026/// If any of the provided CSV files can't be parsed or if more than certain
1027/// percent of CSV rows cannot be processed then the operation fails and
1028/// nothing is imported. Regardless of overall success or failure the per-row
1029/// failures, up to a certain count cap, is listed in
1030/// Operation.metadata.partial_failures.
1031///
1032#[derive(Clone, PartialEq, ::prost::Message)]
1033pub struct InputConfig {
1034 /// Additional domain-specific parameters describing the semantic of the
1035 /// imported data, any string must be up to 25000
1036 /// characters long.
1037 ///
1038 /// <h4>AutoML Tables</h4>
1039 ///
1040 /// `schema_inference_version`
1041 /// : (integer) This value must be supplied.
1042 /// The version of the
1043 /// algorithm to use for the initial inference of the
1044 /// column data types of the imported table. Allowed values: "1".
1045 #[prost(map="string, string", tag="2")]
1046 pub params: ::std::collections::HashMap<std::string::String, std::string::String>,
1047 /// The source of the input.
1048 #[prost(oneof="input_config::Source", tags="1")]
1049 pub source: ::std::option::Option<input_config::Source>,
1050}
1051pub mod input_config {
1052 /// The source of the input.
1053 #[derive(Clone, PartialEq, ::prost::Oneof)]
1054 pub enum Source {
1055 /// The Google Cloud Storage location for the input content.
1056 /// For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with
1057 /// a structure described in [InputConfig][google.cloud.automl.v1.InputConfig].
1058 #[prost(message, tag="1")]
1059 GcsSource(super::GcsSource),
1060 }
1061}
1062/// Input configuration for BatchPredict Action.
1063///
1064/// The format of input depends on the ML problem of the model used for
1065/// prediction. As input source the
1066/// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
1067/// is expected, unless specified otherwise.
1068///
1069/// The formats are represented in EBNF with commas being literal and with
1070/// non-terminal symbols defined near the end of this comment. The formats
1071/// are:
1072///
1073/// <h4>AutoML Vision</h4>
1074/// <div class="ds-selector-tabs"><section><h5>Classification</h5>
1075///
1076/// One or more CSV files where each line is a single column:
1077///
1078/// GCS_FILE_PATH
1079///
1080/// The Google Cloud Storage location of an image of up to
1081/// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
1082/// This path is treated as the ID in the batch predict output.
1083///
1084/// Sample rows:
1085///
1086/// gs://folder/image1.jpeg
1087/// gs://folder/image2.gif
1088/// gs://folder/image3.png
1089///
1090/// </section><section><h5>Object Detection</h5>
1091///
1092/// One or more CSV files where each line is a single column:
1093///
1094/// GCS_FILE_PATH
1095///
1096/// The Google Cloud Storage location of an image of up to
1097/// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
1098/// This path is treated as the ID in the batch predict output.
1099///
1100/// Sample rows:
1101///
1102/// gs://folder/image1.jpeg
1103/// gs://folder/image2.gif
1104/// gs://folder/image3.png
1105/// </section>
1106/// </div>
1107///
1108/// <h4>AutoML Video Intelligence</h4>
1109/// <div class="ds-selector-tabs"><section><h5>Classification</h5>
1110///
1111/// One or more CSV files where each line is a single column:
1112///
1113/// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
1114///
1115/// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
1116/// size and up to 3h in duration duration.
1117/// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
1118///
1119/// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
1120/// length of the video, and the end time must be after the start time.
1121///
1122/// Sample rows:
1123///
1124/// gs://folder/video1.mp4,10,40
1125/// gs://folder/video1.mp4,20,60
1126/// gs://folder/vid2.mov,0,inf
1127///
1128/// </section><section><h5>Object Tracking</h5>
1129///
1130/// One or more CSV files where each line is a single column:
1131///
1132/// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
1133///
1134/// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
1135/// size and up to 3h in duration duration.
1136/// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
1137///
1138/// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
1139/// length of the video, and the end time must be after the start time.
1140///
1141/// Sample rows:
1142///
1143/// gs://folder/video1.mp4,10,40
1144/// gs://folder/video1.mp4,20,60
1145/// gs://folder/vid2.mov,0,inf
1146/// </section>
1147/// </div>
1148///
1149/// <h4>AutoML Natural Language</h4>
1150/// <div class="ds-selector-tabs"><section><h5>Classification</h5>
1151///
1152/// One or more CSV files where each line is a single column:
1153///
1154/// GCS_FILE_PATH
1155///
1156/// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
1157/// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
1158///
1159/// Text files can be no larger than 10MB in size.
1160///
1161/// Sample rows:
1162///
1163/// gs://folder/text1.txt
1164/// gs://folder/text2.pdf
1165/// gs://folder/text3.tif
1166///
1167/// </section><section><h5>Sentiment Analysis</h5>
1168/// One or more CSV files where each line is a single column:
1169///
1170/// GCS_FILE_PATH
1171///
1172/// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
1173/// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
1174///
1175/// Text files can be no larger than 128kB in size.
1176///
1177/// Sample rows:
1178///
1179/// gs://folder/text1.txt
1180/// gs://folder/text2.pdf
1181/// gs://folder/text3.tif
1182///
1183/// </section><section><h5>Entity Extraction</h5>
1184///
1185/// One or more JSONL (JSON Lines) files that either provide inline text or
1186/// documents. You can only use one format, either inline text or documents,
1187/// for a single call to [AutoMl.BatchPredict].
1188///
1189/// Each JSONL file contains a per line a proto that
1190/// wraps a temporary user-assigned TextSnippet ID (string up to 2000
1191/// characters long) called "id", a TextSnippet proto (in
1192/// JSON representation) and zero or more TextFeature protos. Any given
1193/// text snippet content must have 30,000 characters or less, and also
1194/// be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
1195/// unique.
1196///
1197/// Each document JSONL file contains, per line, a proto that wraps a Document
1198/// proto with `input_config` set. Each document cannot exceed 2MB in size.
1199///
1200/// Supported document extensions: .PDF, .TIF, .TIFF
1201///
1202/// Each JSONL file must not exceed 100MB in size, and no more than 20
1203/// JSONL files may be passed.
1204///
1205/// Sample inline JSONL file (Shown with artificial line
1206/// breaks. Actual line breaks are denoted by "\n".):
1207///
1208/// {
1209/// "id": "my_first_id",
1210/// "text_snippet": { "content": "dog car cat"},
1211/// "text_features": [
1212/// {
1213/// "text_segment": {"start_offset": 4, "end_offset": 6},
1214/// "structural_type": PARAGRAPH,
1215/// "bounding_poly": {
1216/// "normalized_vertices": [
1217/// {"x": 0.1, "y": 0.1},
1218/// {"x": 0.1, "y": 0.3},
1219/// {"x": 0.3, "y": 0.3},
1220/// {"x": 0.3, "y": 0.1},
1221/// ]
1222/// },
1223/// }
1224/// ],
1225/// }\n
1226/// {
1227/// "id": "2",
1228/// "text_snippet": {
1229/// "content": "Extended sample content",
1230/// "mime_type": "text/plain"
1231/// }
1232/// }
1233///
1234/// Sample document JSONL file (Shown with artificial line
1235/// breaks. Actual line breaks are denoted by "\n".):
1236///
1237/// {
1238/// "document": {
1239/// "input_config": {
1240/// "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
1241/// }
1242/// }
1243/// }
1244/// }\n
1245/// {
1246/// "document": {
1247/// "input_config": {
1248/// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
1249/// }
1250/// }
1251/// }
1252/// }
1253/// </section>
1254/// </div>
1255///
1256/// <h4>AutoML Tables</h4><div class="ui-datasection-main"><section
1257/// class="selected">
1258///
1259/// See [Preparing your training
1260/// data](https://cloud.google.com/automl-tables/docs/predict-batch) for more
1261/// information.
1262///
1263/// You can use either
1264/// [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source]
1265/// or
1266/// [bigquery_source][BatchPredictInputConfig.bigquery_source].
1267///
1268/// **For gcs_source:**
1269///
1270/// CSV file(s), each by itself 10GB or smaller and total size must be
1271/// 100GB or smaller, where first file must have a header containing
1272/// column names. If the first row of a subsequent file is the same as
1273/// the header, then it is also treated as a header. All other rows
1274/// contain values for the corresponding columns.
1275///
1276/// The column names must contain the model's
1277///
1278/// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
1279/// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
1280/// (order doesn't matter). The columns corresponding to the model's
1281/// input feature column specs must contain values compatible with the
1282/// column spec's data types. Prediction on all the rows, i.e. the CSV
1283/// lines, will be attempted.
1284///
1285///
1286/// Sample rows from a CSV file:
1287/// <pre>
1288/// "First Name","Last Name","Dob","Addresses"
1289///
1290/// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
1291///
1292/// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
1293/// </pre>
1294/// **For bigquery_source:**
1295///
1296/// The URI of a BigQuery table. The user data size of the BigQuery
1297/// table must be 100GB or smaller.
1298///
1299/// The column names must contain the model's
1300///
1301/// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
1302/// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
1303/// (order doesn't matter). The columns corresponding to the model's
1304/// input feature column specs must contain values compatible with the
1305/// column spec's data types. Prediction on all the rows of the table
1306/// will be attempted.
1307/// </section>
1308/// </div>
1309///
1310/// **Input field definitions:**
1311///
1312/// `GCS_FILE_PATH`
1313/// : The path to a file on Google Cloud Storage. For example,
1314/// "gs://folder/video.avi".
1315///
1316/// `TIME_SEGMENT_START`
1317/// : (`TIME_OFFSET`)
1318/// Expresses a beginning, inclusive, of a time segment
1319/// within an example that has a time dimension
1320/// (e.g. video).
1321///
1322/// `TIME_SEGMENT_END`
1323/// : (`TIME_OFFSET`)
1324/// Expresses an end, exclusive, of a time segment within
1325/// n example that has a time dimension (e.g. video).
1326///
1327/// `TIME_OFFSET`
1328/// : A number of seconds as measured from the start of an
1329/// example (e.g. video). Fractions are allowed, up to a
1330/// microsecond precision. "inf" is allowed, and it means the end
1331/// of the example.
1332///
1333/// **Errors:**
1334///
1335/// If any of the provided CSV files can't be parsed or if more than certain
1336/// percent of CSV rows cannot be processed then the operation fails and
1337/// prediction does not happen. Regardless of overall success or failure the
1338/// per-row failures, up to a certain count cap, will be listed in
1339/// Operation.metadata.partial_failures.
1340#[derive(Clone, PartialEq, ::prost::Message)]
1341pub struct BatchPredictInputConfig {
1342 /// The source of the input.
1343 #[prost(oneof="batch_predict_input_config::Source", tags="1")]
1344 pub source: ::std::option::Option<batch_predict_input_config::Source>,
1345}
1346pub mod batch_predict_input_config {
1347 /// The source of the input.
1348 #[derive(Clone, PartialEq, ::prost::Oneof)]
1349 pub enum Source {
1350 /// Required. The Google Cloud Storage location for the input content.
1351 #[prost(message, tag="1")]
1352 GcsSource(super::GcsSource),
1353 }
1354}
1355/// Input configuration of a [Document][google.cloud.automl.v1.Document].
1356#[derive(Clone, PartialEq, ::prost::Message)]
1357pub struct DocumentInputConfig {
1358 /// The Google Cloud Storage location of the document file. Only a single path
1359 /// should be given.
1360 ///
1361 /// Max supported size: 512MB.
1362 ///
1363 /// Supported extensions: .PDF.
1364 #[prost(message, optional, tag="1")]
1365 pub gcs_source: ::std::option::Option<GcsSource>,
1366}
1367/// * For Translation:
1368/// CSV file `translation.csv`, with each line in format:
1369/// ML_USE,GCS_FILE_PATH
1370/// GCS_FILE_PATH leads to a .TSV file which describes examples that have
1371/// given ML_USE, using the following row format per line:
1372/// TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
1373/// language)
1374///
1375/// * For Tables:
1376/// Output depends on whether the dataset was imported from Google Cloud
1377/// Storage or BigQuery.
1378/// Google Cloud Storage case:
1379///
1380/// [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination]
1381/// must be set. Exported are CSV file(s) `tables_1.csv`,
1382/// `tables_2.csv`,...,`tables_N.csv` with each having as header line
1383/// the table's column names, and all other lines contain values for
1384/// the header columns.
1385/// BigQuery case:
1386///
1387/// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
1388/// pointing to a BigQuery project must be set. In the given project a
1389/// new dataset will be created with name
1390///
1391/// `export_data_<automl-dataset-display-name>_<timestamp-of-export-call>`
1392/// where <automl-dataset-display-name> will be made
1393/// BigQuery-dataset-name compatible (e.g. most special characters will
1394/// become underscores), and timestamp will be in
1395/// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
1396/// dataset a new table called `primary_table` will be created, and
1397/// filled with precisely the same data as this obtained on import.
1398#[derive(Clone, PartialEq, ::prost::Message)]
1399pub struct OutputConfig {
1400 /// The destination of the output.
1401 #[prost(oneof="output_config::Destination", tags="1")]
1402 pub destination: ::std::option::Option<output_config::Destination>,
1403}
1404pub mod output_config {
1405 /// The destination of the output.
1406 #[derive(Clone, PartialEq, ::prost::Oneof)]
1407 pub enum Destination {
1408 /// Required. The Google Cloud Storage location where the output is to be written to.
1409 /// For Image Object Detection, Text Extraction, Video Classification and
1410 /// Tables, in the given directory a new directory will be created with name:
1411 /// export_data-<dataset-display-name>-<timestamp-of-export-call> where
1412 /// timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export
1413 /// output will be written into that directory.
1414 #[prost(message, tag="1")]
1415 GcsDestination(super::GcsDestination),
1416 }
1417}
1418/// Output configuration for BatchPredict Action.
1419///
1420/// As destination the
1421///
1422/// [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination]
1423/// must be set unless specified otherwise for a domain. If gcs_destination is
1424/// set then in the given directory a new directory is created. Its name
1425/// will be
1426/// "prediction-<model-display-name>-<timestamp-of-prediction-call>",
1427/// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
1428/// of it depends on the ML problem the predictions are made for.
1429///
1430/// * For Image Classification:
1431/// In the created directory files `image_classification_1.jsonl`,
1432/// `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
1433/// will be created, where N may be 1, and depends on the
1434/// total number of the successfully predicted images and annotations.
1435/// A single image will be listed only once with all its annotations,
1436/// and its annotations will never be split across files.
1437/// Each .JSONL file will contain, per line, a JSON representation of a
1438/// proto that wraps image's "ID" : "<id_value>" followed by a list of
1439/// zero or more AnnotationPayload protos (called annotations), which
1440/// have classification detail populated.
1441/// If prediction for any image failed (partially or completely), then an
1442/// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
1443/// files will be created (N depends on total number of failed
1444/// predictions). These files will have a JSON representation of a proto
1445/// that wraps the same "ID" : "<id_value>" but here followed by
1446/// exactly one
1447///
1448/// [`google.rpc.Status`](https:
1449/// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1450/// containing only `code` and `message`fields.
1451///
1452/// * For Image Object Detection:
1453/// In the created directory files `image_object_detection_1.jsonl`,
1454/// `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
1455/// will be created, where N may be 1, and depends on the
1456/// total number of the successfully predicted images and annotations.
1457/// Each .JSONL file will contain, per line, a JSON representation of a
1458/// proto that wraps image's "ID" : "<id_value>" followed by a list of
1459/// zero or more AnnotationPayload protos (called annotations), which
1460/// have image_object_detection detail populated. A single image will
1461/// be listed only once with all its annotations, and its annotations
1462/// will never be split across files.
1463/// If prediction for any image failed (partially or completely), then
1464/// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
1465/// files will be created (N depends on total number of failed
1466/// predictions). These files will have a JSON representation of a proto
1467/// that wraps the same "ID" : "<id_value>" but here followed by
1468/// exactly one
1469///
1470/// [`google.rpc.Status`](https:
1471/// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1472/// containing only `code` and `message`fields.
1473/// * For Video Classification:
1474/// In the created directory a video_classification.csv file, and a .JSON
1475/// file per each video classification requested in the input (i.e. each
1476/// line in given CSV(s)), will be created.
1477///
1478/// The format of video_classification.csv is:
1479///
1480/// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
1481/// where:
1482/// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
1483/// the prediction input lines (i.e. video_classification.csv has
1484/// precisely the same number of lines as the prediction input had.)
1485/// JSON_FILE_NAME = Name of .JSON file in the output directory, which
1486/// contains prediction responses for the video time segment.
1487/// STATUS = "OK" if prediction completed successfully, or an error code
1488/// with message otherwise. If STATUS is not "OK" then the .JSON file
1489/// for that line may not exist or be empty.
1490///
1491/// Each .JSON file, assuming STATUS is "OK", will contain a list of
1492/// AnnotationPayload protos in JSON format, which are the predictions
1493/// for the video time segment the file is assigned to in the
1494/// video_classification.csv. All AnnotationPayload protos will have
1495/// video_classification field set, and will be sorted by
1496/// video_classification.type field (note that the returned types are
1497/// governed by `classifaction_types` parameter in
1498/// [PredictService.BatchPredictRequest.params][]).
1499///
1500/// * For Video Object Tracking:
1501/// In the created directory a video_object_tracking.csv file will be
1502/// created, and multiple files video_object_trackinng_1.json,
1503/// video_object_trackinng_2.json,..., video_object_trackinng_N.json,
1504/// where N is the number of requests in the input (i.e. the number of
1505/// lines in given CSV(s)).
1506///
1507/// The format of video_object_tracking.csv is:
1508///
1509/// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
1510/// where:
1511/// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
1512/// the prediction input lines (i.e. video_object_tracking.csv has
1513/// precisely the same number of lines as the prediction input had.)
1514/// JSON_FILE_NAME = Name of .JSON file in the output directory, which
1515/// contains prediction responses for the video time segment.
1516/// STATUS = "OK" if prediction completed successfully, or an error
1517/// code with message otherwise. If STATUS is not "OK" then the .JSON
1518/// file for that line may not exist or be empty.
1519///
1520/// Each .JSON file, assuming STATUS is "OK", will contain a list of
1521/// AnnotationPayload protos in JSON format, which are the predictions
1522/// for each frame of the video time segment the file is assigned to in
1523/// video_object_tracking.csv. All AnnotationPayload protos will have
1524/// video_object_tracking field set.
1525/// * For Text Classification:
1526/// In the created directory files `text_classification_1.jsonl`,
1527/// `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
1528/// will be created, where N may be 1, and depends on the
1529/// total number of inputs and annotations found.
1530///
1531/// Each .JSONL file will contain, per line, a JSON representation of a
1532/// proto that wraps input text file (or document) in
1533/// the text snippet (or document) proto and a list of
1534/// zero or more AnnotationPayload protos (called annotations), which
1535/// have classification detail populated. A single text file (or
1536/// document) will be listed only once with all its annotations, and its
1537/// annotations will never be split across files.
1538///
1539/// If prediction for any input file (or document) failed (partially or
1540/// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
1541/// `errors_N.jsonl` files will be created (N depends on total number of
1542/// failed predictions). These files will have a JSON representation of a
1543/// proto that wraps input file followed by exactly one
1544///
1545/// [`google.rpc.Status`](https:
1546/// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1547/// containing only `code` and `message`.
1548///
1549/// * For Text Sentiment:
1550/// In the created directory files `text_sentiment_1.jsonl`,
1551/// `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl`
1552/// will be created, where N may be 1, and depends on the
1553/// total number of inputs and annotations found.
1554///
1555/// Each .JSONL file will contain, per line, a JSON representation of a
1556/// proto that wraps input text file (or document) in
1557/// the text snippet (or document) proto and a list of
1558/// zero or more AnnotationPayload protos (called annotations), which
1559/// have text_sentiment detail populated. A single text file (or
1560/// document) will be listed only once with all its annotations, and its
1561/// annotations will never be split across files.
1562///
1563/// If prediction for any input file (or document) failed (partially or
1564/// completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
1565/// `errors_N.jsonl` files will be created (N depends on total number of
1566/// failed predictions). These files will have a JSON representation of a
1567/// proto that wraps input file followed by exactly one
1568///
1569/// [`google.rpc.Status`](https:
1570/// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1571/// containing only `code` and `message`.
1572///
1573/// * For Text Extraction:
1574/// In the created directory files `text_extraction_1.jsonl`,
1575/// `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl`
1576/// will be created, where N may be 1, and depends on the
1577/// total number of inputs and annotations found.
1578/// The contents of these .JSONL file(s) depend on whether the input
1579/// used inline text, or documents.
1580/// If input was inline, then each .JSONL file will contain, per line,
1581/// a JSON representation of a proto that wraps given in request text
1582/// snippet's "id" (if specified), followed by input text snippet,
1583/// and a list of zero or more
1584/// AnnotationPayload protos (called annotations), which have
1585/// text_extraction detail populated. A single text snippet will be
1586/// listed only once with all its annotations, and its annotations will
1587/// never be split across files.
1588/// If input used documents, then each .JSONL file will contain, per
1589/// line, a JSON representation of a proto that wraps given in request
1590/// document proto, followed by its OCR-ed representation in the form
1591/// of a text snippet, finally followed by a list of zero or more
1592/// AnnotationPayload protos (called annotations), which have
1593/// text_extraction detail populated and refer, via their indices, to
1594/// the OCR-ed text snippet. A single document (and its text snippet)
1595/// will be listed only once with all its annotations, and its
1596/// annotations will never be split across files.
1597/// If prediction for any text snippet failed (partially or completely),
1598/// then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
1599/// `errors_N.jsonl` files will be created (N depends on total number of
1600/// failed predictions). These files will have a JSON representation of a
1601/// proto that wraps either the "id" : "<id_value>" (in case of inline)
1602/// or the document proto (in case of document) but here followed by
1603/// exactly one
1604///
1605/// [`google.rpc.Status`](https:
1606/// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1607/// containing only `code` and `message`.
1608///
1609/// * For Tables:
1610/// Output depends on whether
1611///
1612/// [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination]
1613/// or
1614///
1615/// [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination]
1616/// is set (either is allowed).
1617/// Google Cloud Storage case:
1618/// In the created directory files `tables_1.csv`, `tables_2.csv`,...,
1619/// `tables_N.csv` will be created, where N may be 1, and depends on
1620/// the total number of the successfully predicted rows.
1621/// For all CLASSIFICATION
1622///
1623/// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
1624/// Each .csv file will contain a header, listing all columns'
1625///
1626/// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
1627/// given on input followed by M target column names in the format of
1628///
1629/// "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1630///
1631/// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>_<target
1632/// value>_score" where M is the number of distinct target values,
1633/// i.e. number of distinct values in the target column of the table
1634/// used to train the model. Subsequent lines will contain the
1635/// respective values of successfully predicted rows, with the last,
1636/// i.e. the target, columns having the corresponding prediction
1637/// [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score].
1638/// For REGRESSION and FORECASTING
1639///
1640/// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
1641/// Each .csv file will contain a header, listing all columns'
1642/// [display_name-s][google.cloud.automl.v1p1beta.display_name]
1643/// given on input followed by the predicted target column with name
1644/// in the format of
1645///
1646/// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1647///
1648/// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
1649/// Subsequent lines will contain the respective values of
1650/// successfully predicted rows, with the last, i.e. the target,
1651/// column having the predicted target value.
1652/// If prediction for any rows failed, then an additional
1653/// `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
1654/// created (N depends on total number of failed rows). These files
1655/// will have analogous format as `tables_*.csv`, but always with a
1656/// single target column having
1657///
1658/// [`google.rpc.Status`](https:
1659/// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1660/// represented as a JSON string, and containing only `code` and
1661/// `message`.
1662/// BigQuery case:
1663///
1664/// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
1665/// pointing to a BigQuery project must be set. In the given project a
1666/// new dataset will be created with name
1667/// `prediction_<model-display-name>_<timestamp-of-prediction-call>`
1668/// where <model-display-name> will be made
1669/// BigQuery-dataset-name compatible (e.g. most special characters will
1670/// become underscores), and timestamp will be in
1671/// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
1672/// two tables will be created, `predictions`, and `errors`.
1673/// The `predictions` table's column names will be the input columns'
1674///
1675/// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
1676/// followed by the target column with name in the format of
1677///
1678/// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1679///
1680/// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
1681/// The input feature columns will contain the respective values of
1682/// successfully predicted rows, with the target column having an
1683/// ARRAY of
1684///
1685/// [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload],
1686/// represented as STRUCT-s, containing
1687/// [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation].
1688/// The `errors` table contains rows for which the prediction has
1689/// failed, it has analogous input columns while the target column name
1690/// is in the format of
1691///
1692/// "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1693///
1694/// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>",
1695/// and as a value has
1696///
1697/// [`google.rpc.Status`](https:
1698/// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1699/// represented as a STRUCT, and containing only `code` and `message`.
1700#[derive(Clone, PartialEq, ::prost::Message)]
1701pub struct BatchPredictOutputConfig {
1702 /// The destination of the output.
1703 #[prost(oneof="batch_predict_output_config::Destination", tags="1")]
1704 pub destination: ::std::option::Option<batch_predict_output_config::Destination>,
1705}
1706pub mod batch_predict_output_config {
1707 /// The destination of the output.
1708 #[derive(Clone, PartialEq, ::prost::Oneof)]
1709 pub enum Destination {
1710 /// Required. The Google Cloud Storage location of the directory where the output is to
1711 /// be written to.
1712 #[prost(message, tag="1")]
1713 GcsDestination(super::GcsDestination),
1714 }
1715}
1716/// Output configuration for ModelExport Action.
1717#[derive(Clone, PartialEq, ::prost::Message)]
1718pub struct ModelExportOutputConfig {
1719 /// The format in which the model must be exported. The available, and default,
1720 /// formats depend on the problem and model type (if given problem and type
1721 /// combination doesn't have a format listed, it means its models are not
1722 /// exportable):
1723 ///
1724 /// * For Image Classification mobile-low-latency-1, mobile-versatile-1,
1725 /// mobile-high-accuracy-1:
1726 /// "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
1727 /// "docker".
1728 ///
1729 /// * For Image Classification mobile-core-ml-low-latency-1,
1730 /// mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
1731 /// "core_ml" (default).
1732 ///
1733 /// * For Image Object Detection mobile-low-latency-1, mobile-versatile-1,
1734 /// mobile-high-accuracy-1:
1735 /// "tflite", "tf_saved_model", "tf_js".
1736 /// Formats description:
1737 ///
1738 /// * tflite - Used for Android mobile devices.
1739 /// * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/)
1740 /// devices.
1741 /// * tf_saved_model - A tensorflow model in SavedModel format.
1742 /// * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
1743 /// be used in the browser and in Node.js using JavaScript.
1744 /// * docker - Used for Docker containers. Use the params field to customize
1745 /// the container. The container is verified to work correctly on
1746 /// ubuntu 16.04 operating system. See more at
1747 /// [containers
1748 ///
1749 /// quickstart](https:
1750 /// //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
1751 /// * core_ml - Used for iOS mobile devices.
1752 #[prost(string, tag="4")]
1753 pub model_format: std::string::String,
1754 /// Additional model-type and format specific parameters describing the
1755 /// requirements for the to be exported model files, any string must be up to
1756 /// 25000 characters long.
1757 ///
1758 /// * For `docker` format:
1759 /// `cpu_architecture` - (string) "x86_64" (default).
1760 /// `gpu_architecture` - (string) "none" (default), "nvidia".
1761 #[prost(map="string, string", tag="2")]
1762 pub params: ::std::collections::HashMap<std::string::String, std::string::String>,
1763 /// The destination of the output.
1764 #[prost(oneof="model_export_output_config::Destination", tags="1")]
1765 pub destination: ::std::option::Option<model_export_output_config::Destination>,
1766}
1767pub mod model_export_output_config {
1768 /// The destination of the output.
1769 #[derive(Clone, PartialEq, ::prost::Oneof)]
1770 pub enum Destination {
1771 /// Required. The Google Cloud Storage location where the model is to be written to.
1772 /// This location may only be set for the following model formats:
1773 /// "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml".
1774 ///
1775 /// Under the directory given as the destination a new one with name
1776 /// "model-export-<model-display-name>-<timestamp-of-export-call>",
1777 /// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format,
1778 /// will be created. Inside the model and any of its supporting files
1779 /// will be written.
1780 #[prost(message, tag="1")]
1781 GcsDestination(super::GcsDestination),
1782 }
1783}
1784/// The Google Cloud Storage location for the input content.
1785#[derive(Clone, PartialEq, ::prost::Message)]
1786pub struct GcsSource {
1787 /// Required. Google Cloud Storage URIs to input files, up to 2000
1788 /// characters long. Accepted forms:
1789 /// * Full object path, e.g. gs://bucket/directory/object.csv
1790 #[prost(string, repeated, tag="1")]
1791 pub input_uris: ::std::vec::Vec<std::string::String>,
1792}
1793/// The Google Cloud Storage location where the output is to be written to.
1794#[derive(Clone, PartialEq, ::prost::Message)]
1795pub struct GcsDestination {
1796 /// Required. Google Cloud Storage URI to output directory, up to 2000
1797 /// characters long.
1798 /// Accepted forms:
1799 /// * Prefix path: gs://bucket/directory
1800 /// The requesting user must have write permission to the bucket.
1801 /// The directory is created if it doesn't exist.
1802 #[prost(string, tag="1")]
1803 pub output_uri_prefix: std::string::String,
1804}
1805/// A contiguous part of a text (string), assuming it has an UTF-8 NFC encoding.
1806#[derive(Clone, PartialEq, ::prost::Message)]
1807pub struct TextSegment {
1808 /// Output only. The content of the TextSegment.
1809 #[prost(string, tag="3")]
1810 pub content: std::string::String,
1811 /// Required. Zero-based character index of the first character of the text
1812 /// segment (counting characters from the beginning of the text).
1813 #[prost(int64, tag="1")]
1814 pub start_offset: i64,
1815 /// Required. Zero-based character index of the first character past the end of
1816 /// the text segment (counting character from the beginning of the text).
1817 /// The character at the end_offset is NOT included in the text segment.
1818 #[prost(int64, tag="2")]
1819 pub end_offset: i64,
1820}
1821/// A representation of an image.
1822/// Only images up to 30MB in size are supported.
1823#[derive(Clone, PartialEq, ::prost::Message)]
1824pub struct Image {
1825 /// Output only. HTTP URI to the thumbnail image.
1826 #[prost(string, tag="4")]
1827 pub thumbnail_uri: std::string::String,
1828 /// Input only. The data representing the image.
1829 /// For Predict calls [image_bytes][google.cloud.automl.v1.Image.image_bytes] must be set .
1830 #[prost(oneof="image::Data", tags="1")]
1831 pub data: ::std::option::Option<image::Data>,
1832}
1833pub mod image {
1834 /// Input only. The data representing the image.
1835 /// For Predict calls [image_bytes][google.cloud.automl.v1.Image.image_bytes] must be set .
1836 #[derive(Clone, PartialEq, ::prost::Oneof)]
1837 pub enum Data {
1838 /// Image content represented as a stream of bytes.
1839 /// Note: As with all `bytes` fields, protobuffers use a pure binary
1840 /// representation, whereas JSON representations use base64.
1841 #[prost(bytes, tag="1")]
1842 ImageBytes(std::vec::Vec<u8>),
1843 }
1844}
1845/// A representation of a text snippet.
1846#[derive(Clone, PartialEq, ::prost::Message)]
1847pub struct TextSnippet {
1848 /// Required. The content of the text snippet as a string. Up to 250000
1849 /// characters long.
1850 #[prost(string, tag="1")]
1851 pub content: std::string::String,
1852 /// Optional. The format of [content][google.cloud.automl.v1.TextSnippet.content]. Currently the only two allowed
1853 /// values are "text/html" and "text/plain". If left blank, the format is
1854 /// automatically determined from the type of the uploaded [content][google.cloud.automl.v1.TextSnippet.content].
1855 #[prost(string, tag="2")]
1856 pub mime_type: std::string::String,
1857 /// Output only. HTTP URI where you can download the content.
1858 #[prost(string, tag="4")]
1859 pub content_uri: std::string::String,
1860}
1861/// Message that describes dimension of a document.
1862#[derive(Clone, PartialEq, ::prost::Message)]
1863pub struct DocumentDimensions {
1864 /// Unit of the dimension.
1865 #[prost(enumeration="document_dimensions::DocumentDimensionUnit", tag="1")]
1866 pub unit: i32,
1867 /// Width value of the document, works together with the unit.
1868 #[prost(float, tag="2")]
1869 pub width: f32,
1870 /// Height value of the document, works together with the unit.
1871 #[prost(float, tag="3")]
1872 pub height: f32,
1873}
1874pub mod document_dimensions {
1875 /// Unit of the document dimension.
1876 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1877 #[repr(i32)]
1878 pub enum DocumentDimensionUnit {
1879 /// Should not be used.
1880 Unspecified = 0,
1881 /// Document dimension is measured in inches.
1882 Inch = 1,
1883 /// Document dimension is measured in centimeters.
1884 Centimeter = 2,
1885 /// Document dimension is measured in points. 72 points = 1 inch.
1886 Point = 3,
1887 }
1888}
1889/// A structured text document e.g. a PDF.
1890#[derive(Clone, PartialEq, ::prost::Message)]
1891pub struct Document {
1892 /// An input config specifying the content of the document.
1893 #[prost(message, optional, tag="1")]
1894 pub input_config: ::std::option::Option<DocumentInputConfig>,
1895 /// The plain text version of this document.
1896 #[prost(message, optional, tag="2")]
1897 pub document_text: ::std::option::Option<TextSnippet>,
1898 /// Describes the layout of the document.
1899 /// Sorted by [page_number][].
1900 #[prost(message, repeated, tag="3")]
1901 pub layout: ::std::vec::Vec<document::Layout>,
1902 /// The dimensions of the page in the document.
1903 #[prost(message, optional, tag="4")]
1904 pub document_dimensions: ::std::option::Option<DocumentDimensions>,
1905 /// Number of pages in the document.
1906 #[prost(int32, tag="5")]
1907 pub page_count: i32,
1908}
1909pub mod document {
1910 /// Describes the layout information of a [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the document.
1911 #[derive(Clone, PartialEq, ::prost::Message)]
1912 pub struct Layout {
1913 /// Text Segment that represents a segment in
1914 /// [document_text][google.cloud.automl.v1p1beta.Document.document_text].
1915 #[prost(message, optional, tag="1")]
1916 pub text_segment: ::std::option::Option<super::TextSegment>,
1917 /// Page number of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the original document, starts
1918 /// from 1.
1919 #[prost(int32, tag="2")]
1920 pub page_number: i32,
1921 /// The position of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the page.
1922 /// Contains exactly 4
1923 ///
1924 /// [normalized_vertices][google.cloud.automl.v1p1beta.BoundingPoly.normalized_vertices]
1925 /// and they are connected by edges in the order provided, which will
1926 /// represent a rectangle parallel to the frame. The
1927 /// [NormalizedVertex-s][google.cloud.automl.v1p1beta.NormalizedVertex] are
1928 /// relative to the page.
1929 /// Coordinates are based on top-left as point (0,0).
1930 #[prost(message, optional, tag="3")]
1931 pub bounding_poly: ::std::option::Option<super::BoundingPoly>,
1932 /// The type of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in document.
1933 #[prost(enumeration="layout::TextSegmentType", tag="4")]
1934 pub text_segment_type: i32,
1935 }
1936 pub mod layout {
1937 /// The type of TextSegment in the context of the original document.
1938 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1939 #[repr(i32)]
1940 pub enum TextSegmentType {
1941 /// Should not be used.
1942 Unspecified = 0,
1943 /// The text segment is a token. e.g. word.
1944 Token = 1,
1945 /// The text segment is a paragraph.
1946 Paragraph = 2,
1947 /// The text segment is a form field.
1948 FormField = 3,
1949 /// The text segment is the name part of a form field. It will be treated
1950 /// as child of another FORM_FIELD TextSegment if its span is subspan of
1951 /// another TextSegment with type FORM_FIELD.
1952 FormFieldName = 4,
1953 /// The text segment is the text content part of a form field. It will be
1954 /// treated as child of another FORM_FIELD TextSegment if its span is
1955 /// subspan of another TextSegment with type FORM_FIELD.
1956 FormFieldContents = 5,
1957 /// The text segment is a whole table, including headers, and all rows.
1958 Table = 6,
1959 /// The text segment is a table's headers. It will be treated as child of
1960 /// another TABLE TextSegment if its span is subspan of another TextSegment
1961 /// with type TABLE.
1962 TableHeader = 7,
1963 /// The text segment is a row in table. It will be treated as child of
1964 /// another TABLE TextSegment if its span is subspan of another TextSegment
1965 /// with type TABLE.
1966 TableRow = 8,
1967 /// The text segment is a cell in table. It will be treated as child of
1968 /// another TABLE_ROW TextSegment if its span is subspan of another
1969 /// TextSegment with type TABLE_ROW.
1970 TableCell = 9,
1971 }
1972 }
1973}
1974/// Example data used for training or prediction.
1975#[derive(Clone, PartialEq, ::prost::Message)]
1976pub struct ExamplePayload {
1977 /// Required. The example data.
1978 #[prost(oneof="example_payload::Payload", tags="1, 2, 4")]
1979 pub payload: ::std::option::Option<example_payload::Payload>,
1980}
1981pub mod example_payload {
1982 /// Required. The example data.
1983 #[derive(Clone, PartialEq, ::prost::Oneof)]
1984 pub enum Payload {
1985 /// Example image.
1986 #[prost(message, tag="1")]
1987 Image(super::Image),
1988 /// Example text.
1989 #[prost(message, tag="2")]
1990 TextSnippet(super::TextSnippet),
1991 /// Example document.
1992 #[prost(message, tag="4")]
1993 Document(super::Document),
1994 }
1995}
1996/// Dataset metadata that is specific to translation.
1997#[derive(Clone, PartialEq, ::prost::Message)]
1998pub struct TranslationDatasetMetadata {
1999 /// Required. The BCP-47 language code of the source language.
2000 #[prost(string, tag="1")]
2001 pub source_language_code: std::string::String,
2002 /// Required. The BCP-47 language code of the target language.
2003 #[prost(string, tag="2")]
2004 pub target_language_code: std::string::String,
2005}
2006/// Evaluation metrics for the dataset.
2007#[derive(Clone, PartialEq, ::prost::Message)]
2008pub struct TranslationEvaluationMetrics {
2009 /// Output only. BLEU score.
2010 #[prost(double, tag="1")]
2011 pub bleu_score: f64,
2012 /// Output only. BLEU score for base model.
2013 #[prost(double, tag="2")]
2014 pub base_bleu_score: f64,
2015}
2016/// Model metadata that is specific to translation.
2017#[derive(Clone, PartialEq, ::prost::Message)]
2018pub struct TranslationModelMetadata {
2019 /// The resource name of the model to use as a baseline to train the custom
2020 /// model. If unset, we use the default base model provided by Google
2021 /// Translate. Format:
2022 /// `projects/{project_id}/locations/{location_id}/models/{model_id}`
2023 #[prost(string, tag="1")]
2024 pub base_model: std::string::String,
2025 /// Output only. Inferred from the dataset.
2026 /// The source language (The BCP-47 language code) that is used for training.
2027 #[prost(string, tag="2")]
2028 pub source_language_code: std::string::String,
2029 /// Output only. The target language (The BCP-47 language code) that is used
2030 /// for training.
2031 #[prost(string, tag="3")]
2032 pub target_language_code: std::string::String,
2033}
2034/// Annotation details specific to translation.
2035#[derive(Clone, PartialEq, ::prost::Message)]
2036pub struct TranslationAnnotation {
2037 /// Output only . The translated content.
2038 #[prost(message, optional, tag="1")]
2039 pub translated_content: ::std::option::Option<TextSnippet>,
2040}
2041/// A workspace for solving a single, particular machine learning (ML) problem.
2042/// A workspace contains examples that may be annotated.
2043#[derive(Clone, PartialEq, ::prost::Message)]
2044pub struct Dataset {
2045 /// Output only. The resource name of the dataset.
2046 /// Form: `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`
2047 #[prost(string, tag="1")]
2048 pub name: std::string::String,
2049 /// Required. The name of the dataset to show in the interface. The name can be
2050 /// up to 32 characters long and can consist only of ASCII Latin letters A-Z
2051 /// and a-z, underscores
2052 /// (_), and ASCII digits 0-9.
2053 #[prost(string, tag="2")]
2054 pub display_name: std::string::String,
2055 /// User-provided description of the dataset. The description can be up to
2056 /// 25000 characters long.
2057 #[prost(string, tag="3")]
2058 pub description: std::string::String,
2059 /// Output only. The number of examples in the dataset.
2060 #[prost(int32, tag="21")]
2061 pub example_count: i32,
2062 /// Output only. Timestamp when this dataset was created.
2063 #[prost(message, optional, tag="14")]
2064 pub create_time: ::std::option::Option<::prost_types::Timestamp>,
2065 /// Used to perform consistent read-modify-write updates. If not set, a blind
2066 /// "overwrite" update happens.
2067 #[prost(string, tag="17")]
2068 pub etag: std::string::String,
2069 /// Optional. The labels with user-defined metadata to organize your dataset.
2070 ///
2071 /// Label keys and values can be no longer than 64 characters
2072 /// (Unicode codepoints), can only contain lowercase letters, numeric
2073 /// characters, underscores and dashes. International characters are allowed.
2074 /// Label values are optional. Label keys must start with a letter.
2075 ///
2076 /// See https://goo.gl/xmQnxf for more information on and examples of labels.
2077 #[prost(map="string, string", tag="39")]
2078 pub labels: ::std::collections::HashMap<std::string::String, std::string::String>,
2079 /// Required.
2080 /// The dataset metadata that is specific to the problem type.
2081 #[prost(oneof="dataset::DatasetMetadata", tags="23, 24, 25, 26, 28, 30")]
2082 pub dataset_metadata: ::std::option::Option<dataset::DatasetMetadata>,
2083}
2084pub mod dataset {
2085 /// Required.
2086 /// The dataset metadata that is specific to the problem type.
2087 #[derive(Clone, PartialEq, ::prost::Oneof)]
2088 pub enum DatasetMetadata {
2089 /// Metadata for a dataset used for translation.
2090 #[prost(message, tag="23")]
2091 TranslationDatasetMetadata(super::TranslationDatasetMetadata),
2092 /// Metadata for a dataset used for image classification.
2093 #[prost(message, tag="24")]
2094 ImageClassificationDatasetMetadata(super::ImageClassificationDatasetMetadata),
2095 /// Metadata for a dataset used for text classification.
2096 #[prost(message, tag="25")]
2097 TextClassificationDatasetMetadata(super::TextClassificationDatasetMetadata),
2098 /// Metadata for a dataset used for image object detection.
2099 #[prost(message, tag="26")]
2100 ImageObjectDetectionDatasetMetadata(super::ImageObjectDetectionDatasetMetadata),
2101 /// Metadata for a dataset used for text extraction.
2102 #[prost(message, tag="28")]
2103 TextExtractionDatasetMetadata(super::TextExtractionDatasetMetadata),
2104 /// Metadata for a dataset used for text sentiment.
2105 #[prost(message, tag="30")]
2106 TextSentimentDatasetMetadata(super::TextSentimentDatasetMetadata),
2107 }
2108}
2109/// Metadata used across all long running operations returned by AutoML API.
2110#[derive(Clone, PartialEq, ::prost::Message)]
2111pub struct OperationMetadata {
2112 /// Output only. Progress of operation. Range: [0, 100].
2113 /// Not used currently.
2114 #[prost(int32, tag="13")]
2115 pub progress_percent: i32,
2116 /// Output only. Partial failures encountered.
2117 /// E.g. single files that couldn't be read.
2118 /// This field should never exceed 20 entries.
2119 /// Status details field will contain standard GCP error details.
2120 #[prost(message, repeated, tag="2")]
2121 pub partial_failures: ::std::vec::Vec<super::super::super::rpc::Status>,
2122 /// Output only. Time when the operation was created.
2123 #[prost(message, optional, tag="3")]
2124 pub create_time: ::std::option::Option<::prost_types::Timestamp>,
2125 /// Output only. Time when the operation was updated for the last time.
2126 #[prost(message, optional, tag="4")]
2127 pub update_time: ::std::option::Option<::prost_types::Timestamp>,
2128 /// Ouptut only. Details of specific operation. Even if this field is empty,
2129 /// the presence allows to distinguish different types of operations.
2130 #[prost(oneof="operation_metadata::Details", tags="8, 24, 25, 10, 30, 15, 16, 21, 22")]
2131 pub details: ::std::option::Option<operation_metadata::Details>,
2132}
2133pub mod operation_metadata {
2134 /// Ouptut only. Details of specific operation. Even if this field is empty,
2135 /// the presence allows to distinguish different types of operations.
2136 #[derive(Clone, PartialEq, ::prost::Oneof)]
2137 pub enum Details {
2138 /// Details of a Delete operation.
2139 #[prost(message, tag="8")]
2140 DeleteDetails(super::DeleteOperationMetadata),
2141 /// Details of a DeployModel operation.
2142 #[prost(message, tag="24")]
2143 DeployModelDetails(super::DeployModelOperationMetadata),
2144 /// Details of an UndeployModel operation.
2145 #[prost(message, tag="25")]
2146 UndeployModelDetails(super::UndeployModelOperationMetadata),
2147 /// Details of CreateModel operation.
2148 #[prost(message, tag="10")]
2149 CreateModelDetails(super::CreateModelOperationMetadata),
2150 /// Details of CreateDataset operation.
2151 #[prost(message, tag="30")]
2152 CreateDatasetDetails(super::CreateDatasetOperationMetadata),
2153 /// Details of ImportData operation.
2154 #[prost(message, tag="15")]
2155 ImportDataDetails(super::ImportDataOperationMetadata),
2156 /// Details of BatchPredict operation.
2157 #[prost(message, tag="16")]
2158 BatchPredictDetails(super::BatchPredictOperationMetadata),
2159 /// Details of ExportData operation.
2160 #[prost(message, tag="21")]
2161 ExportDataDetails(super::ExportDataOperationMetadata),
2162 /// Details of ExportModel operation.
2163 #[prost(message, tag="22")]
2164 ExportModelDetails(super::ExportModelOperationMetadata),
2165 }
2166}
2167/// Details of operations that perform deletes of any entities.
2168#[derive(Clone, PartialEq, ::prost::Message)]
2169pub struct DeleteOperationMetadata {
2170}
2171/// Details of DeployModel operation.
2172#[derive(Clone, PartialEq, ::prost::Message)]
2173pub struct DeployModelOperationMetadata {
2174}
2175/// Details of UndeployModel operation.
2176#[derive(Clone, PartialEq, ::prost::Message)]
2177pub struct UndeployModelOperationMetadata {
2178}
2179/// Details of CreateDataset operation.
2180#[derive(Clone, PartialEq, ::prost::Message)]
2181pub struct CreateDatasetOperationMetadata {
2182}
2183/// Details of CreateModel operation.
2184#[derive(Clone, PartialEq, ::prost::Message)]
2185pub struct CreateModelOperationMetadata {
2186}
2187/// Details of ImportData operation.
2188#[derive(Clone, PartialEq, ::prost::Message)]
2189pub struct ImportDataOperationMetadata {
2190}
2191/// Details of ExportData operation.
2192#[derive(Clone, PartialEq, ::prost::Message)]
2193pub struct ExportDataOperationMetadata {
2194 /// Output only. Information further describing this export data's output.
2195 #[prost(message, optional, tag="1")]
2196 pub output_info: ::std::option::Option<export_data_operation_metadata::ExportDataOutputInfo>,
2197}
2198pub mod export_data_operation_metadata {
2199 /// Further describes this export data's output.
2200 /// Supplements
2201 /// [OutputConfig][google.cloud.automl.v1.OutputConfig].
2202 #[derive(Clone, PartialEq, ::prost::Message)]
2203 pub struct ExportDataOutputInfo {
2204 /// The output location to which the exported data is written.
2205 #[prost(oneof="export_data_output_info::OutputLocation", tags="1")]
2206 pub output_location: ::std::option::Option<export_data_output_info::OutputLocation>,
2207 }
2208 pub mod export_data_output_info {
2209 /// The output location to which the exported data is written.
2210 #[derive(Clone, PartialEq, ::prost::Oneof)]
2211 pub enum OutputLocation {
2212 /// The full path of the Google Cloud Storage directory created, into which
2213 /// the exported data is written.
2214 #[prost(string, tag="1")]
2215 GcsOutputDirectory(std::string::String),
2216 }
2217 }
2218}
2219/// Details of BatchPredict operation.
2220#[derive(Clone, PartialEq, ::prost::Message)]
2221pub struct BatchPredictOperationMetadata {
2222 /// Output only. The input config that was given upon starting this
2223 /// batch predict operation.
2224 #[prost(message, optional, tag="1")]
2225 pub input_config: ::std::option::Option<BatchPredictInputConfig>,
2226 /// Output only. Information further describing this batch predict's output.
2227 #[prost(message, optional, tag="2")]
2228 pub output_info: ::std::option::Option<batch_predict_operation_metadata::BatchPredictOutputInfo>,
2229}
2230pub mod batch_predict_operation_metadata {
2231 /// Further describes this batch predict's output.
2232 /// Supplements
2233 ///
2234 /// [BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig].
2235 #[derive(Clone, PartialEq, ::prost::Message)]
2236 pub struct BatchPredictOutputInfo {
2237 /// The output location into which prediction output is written.
2238 #[prost(oneof="batch_predict_output_info::OutputLocation", tags="1")]
2239 pub output_location: ::std::option::Option<batch_predict_output_info::OutputLocation>,
2240 }
2241 pub mod batch_predict_output_info {
2242 /// The output location into which prediction output is written.
2243 #[derive(Clone, PartialEq, ::prost::Oneof)]
2244 pub enum OutputLocation {
2245 /// The full path of the Google Cloud Storage directory created, into which
2246 /// the prediction output is written.
2247 #[prost(string, tag="1")]
2248 GcsOutputDirectory(std::string::String),
2249 }
2250 }
2251}
2252/// Details of ExportModel operation.
2253#[derive(Clone, PartialEq, ::prost::Message)]
2254pub struct ExportModelOperationMetadata {
2255 /// Output only. Information further describing the output of this model
2256 /// export.
2257 #[prost(message, optional, tag="2")]
2258 pub output_info: ::std::option::Option<export_model_operation_metadata::ExportModelOutputInfo>,
2259}
2260pub mod export_model_operation_metadata {
2261 /// Further describes the output of model export.
2262 /// Supplements
2263 /// [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig].
2264 #[derive(Clone, PartialEq, ::prost::Message)]
2265 pub struct ExportModelOutputInfo {
2266 /// The full path of the Google Cloud Storage directory created, into which
2267 /// the model will be exported.
2268 #[prost(string, tag="1")]
2269 pub gcs_output_directory: std::string::String,
2270 }
2271}
2272/// Contains annotation details specific to text sentiment.
2273#[derive(Clone, PartialEq, ::prost::Message)]
2274pub struct TextSentimentAnnotation {
2275 /// Output only. The sentiment with the semantic, as given to the
2276 /// [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] when populating the dataset from which the model used
2277 /// for the prediction had been trained.
2278 /// The sentiment values are between 0 and
2279 /// Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive),
2280 /// with higher value meaning more positive sentiment. They are completely
2281 /// relative, i.e. 0 means least positive sentiment and sentiment_max means
2282 /// the most positive from the sentiments present in the train data. Therefore
2283 /// e.g. if train data had only negative sentiment, then sentiment_max, would
2284 /// be still negative (although least negative).
2285 /// The sentiment shouldn't be confused with "score" or "magnitude"
2286 /// from the previous Natural Language Sentiment Analysis API.
2287 #[prost(int32, tag="1")]
2288 pub sentiment: i32,
2289}
2290/// Model evaluation metrics for text sentiment problems.
2291#[derive(Clone, PartialEq, ::prost::Message)]
2292pub struct TextSentimentEvaluationMetrics {
2293 /// Output only. Precision.
2294 #[prost(float, tag="1")]
2295 pub precision: f32,
2296 /// Output only. Recall.
2297 #[prost(float, tag="2")]
2298 pub recall: f32,
2299 /// Output only. The harmonic mean of recall and precision.
2300 #[prost(float, tag="3")]
2301 pub f1_score: f32,
2302 /// Output only. Mean absolute error. Only set for the overall model
2303 /// evaluation, not for evaluation of a single annotation spec.
2304 #[prost(float, tag="4")]
2305 pub mean_absolute_error: f32,
2306 /// Output only. Mean squared error. Only set for the overall model
2307 /// evaluation, not for evaluation of a single annotation spec.
2308 #[prost(float, tag="5")]
2309 pub mean_squared_error: f32,
2310 /// Output only. Linear weighted kappa. Only set for the overall model
2311 /// evaluation, not for evaluation of a single annotation spec.
2312 #[prost(float, tag="6")]
2313 pub linear_kappa: f32,
2314 /// Output only. Quadratic weighted kappa. Only set for the overall model
2315 /// evaluation, not for evaluation of a single annotation spec.
2316 #[prost(float, tag="7")]
2317 pub quadratic_kappa: f32,
2318 /// Output only. Confusion matrix of the evaluation.
2319 /// Only set for the overall model evaluation, not for evaluation of a single
2320 /// annotation spec.
2321 #[prost(message, optional, tag="8")]
2322 pub confusion_matrix: ::std::option::Option<classification_evaluation_metrics::ConfusionMatrix>,
2323}
2324/// Annotation details for image object detection.
2325#[derive(Clone, PartialEq, ::prost::Message)]
2326pub struct ImageObjectDetectionAnnotation {
2327 /// Output only. The rectangle representing the object location.
2328 #[prost(message, optional, tag="1")]
2329 pub bounding_box: ::std::option::Option<BoundingPoly>,
2330 /// Output only. The confidence that this annotation is positive for the parent example,
2331 /// value in [0, 1], higher means higher positivity confidence.
2332 #[prost(float, tag="2")]
2333 pub score: f32,
2334}
2335/// Bounding box matching model metrics for a single intersection-over-union
2336/// threshold and multiple label match confidence thresholds.
2337#[derive(Clone, PartialEq, ::prost::Message)]
2338pub struct BoundingBoxMetricsEntry {
2339 /// Output only. The intersection-over-union threshold value used to compute
2340 /// this metrics entry.
2341 #[prost(float, tag="1")]
2342 pub iou_threshold: f32,
2343 /// Output only. The mean average precision, most often close to au_prc.
2344 #[prost(float, tag="2")]
2345 pub mean_average_precision: f32,
2346 /// Output only. Metrics for each label-match confidence_threshold from
2347 /// 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is
2348 /// derived from them.
2349 #[prost(message, repeated, tag="3")]
2350 pub confidence_metrics_entries: ::std::vec::Vec<bounding_box_metrics_entry::ConfidenceMetricsEntry>,
2351}
2352pub mod bounding_box_metrics_entry {
2353 /// Metrics for a single confidence threshold.
2354 #[derive(Clone, PartialEq, ::prost::Message)]
2355 pub struct ConfidenceMetricsEntry {
2356 /// Output only. The confidence threshold value used to compute the metrics.
2357 #[prost(float, tag="1")]
2358 pub confidence_threshold: f32,
2359 /// Output only. Recall under the given confidence threshold.
2360 #[prost(float, tag="2")]
2361 pub recall: f32,
2362 /// Output only. Precision under the given confidence threshold.
2363 #[prost(float, tag="3")]
2364 pub precision: f32,
2365 /// Output only. The harmonic mean of recall and precision.
2366 #[prost(float, tag="4")]
2367 pub f1_score: f32,
2368 }
2369}
2370/// Model evaluation metrics for image object detection problems.
2371/// Evaluates prediction quality of labeled bounding boxes.
2372#[derive(Clone, PartialEq, ::prost::Message)]
2373pub struct ImageObjectDetectionEvaluationMetrics {
2374 /// Output only. The total number of bounding boxes (i.e. summed over all
2375 /// images) the ground truth used to create this evaluation had.
2376 #[prost(int32, tag="1")]
2377 pub evaluated_bounding_box_count: i32,
2378 /// Output only. The bounding boxes match metrics for each
2379 /// Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
2380 /// and each label confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99
2381 /// pair.
2382 #[prost(message, repeated, tag="2")]
2383 pub bounding_box_metrics_entries: ::std::vec::Vec<BoundingBoxMetricsEntry>,
2384 /// Output only. The single metric for bounding boxes evaluation:
2385 /// the mean_average_precision averaged over all bounding_box_metrics_entries.
2386 #[prost(float, tag="3")]
2387 pub bounding_box_mean_average_precision: f32,
2388}
2389/// Annotation for identifying spans of text.
2390#[derive(Clone, PartialEq, ::prost::Message)]
2391pub struct TextExtractionAnnotation {
2392 /// Output only. A confidence estimate between 0.0 and 1.0. A higher value
2393 /// means greater confidence in correctness of the annotation.
2394 #[prost(float, tag="1")]
2395 pub score: f32,
2396 /// Required. Text extraction annotations can either be a text segment or a
2397 /// text relation.
2398 #[prost(oneof="text_extraction_annotation::Annotation", tags="3")]
2399 pub annotation: ::std::option::Option<text_extraction_annotation::Annotation>,
2400}
2401pub mod text_extraction_annotation {
2402 /// Required. Text extraction annotations can either be a text segment or a
2403 /// text relation.
2404 #[derive(Clone, PartialEq, ::prost::Oneof)]
2405 pub enum Annotation {
2406 /// An entity annotation will set this, which is the part of the original
2407 /// text to which the annotation pertains.
2408 #[prost(message, tag="3")]
2409 TextSegment(super::TextSegment),
2410 }
2411}
2412/// Model evaluation metrics for text extraction problems.
2413#[derive(Clone, PartialEq, ::prost::Message)]
2414pub struct TextExtractionEvaluationMetrics {
2415 /// Output only. The Area under precision recall curve metric.
2416 #[prost(float, tag="1")]
2417 pub au_prc: f32,
2418 /// Output only. Metrics that have confidence thresholds.
2419 /// Precision-recall curve can be derived from it.
2420 #[prost(message, repeated, tag="2")]
2421 pub confidence_metrics_entries: ::std::vec::Vec<text_extraction_evaluation_metrics::ConfidenceMetricsEntry>,
2422}
2423pub mod text_extraction_evaluation_metrics {
2424 /// Metrics for a single confidence threshold.
2425 #[derive(Clone, PartialEq, ::prost::Message)]
2426 pub struct ConfidenceMetricsEntry {
2427 /// Output only. The confidence threshold value used to compute the metrics.
2428 /// Only annotations with score of at least this threshold are considered to
2429 /// be ones the model would return.
2430 #[prost(float, tag="1")]
2431 pub confidence_threshold: f32,
2432 /// Output only. Recall under the given confidence threshold.
2433 #[prost(float, tag="3")]
2434 pub recall: f32,
2435 /// Output only. Precision under the given confidence threshold.
2436 #[prost(float, tag="4")]
2437 pub precision: f32,
2438 /// Output only. The harmonic mean of recall and precision.
2439 #[prost(float, tag="5")]
2440 pub f1_score: f32,
2441 }
2442}
2443/// Evaluation results of a model.
2444#[derive(Clone, PartialEq, ::prost::Message)]
2445pub struct ModelEvaluation {
2446 /// Output only. Resource name of the model evaluation.
2447 /// Format:
2448 ///
2449 /// `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`
2450 #[prost(string, tag="1")]
2451 pub name: std::string::String,
2452 /// Output only. The ID of the annotation spec that the model evaluation applies to. The
2453 /// The ID is empty for the overall model evaluation.
2454 /// For Tables annotation specs in the dataset do not exist and this ID is
2455 /// always not set, but for CLASSIFICATION
2456 ///
2457 /// [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
2458 /// the
2459 /// [display_name][google.cloud.automl.v1.ModelEvaluation.display_name]
2460 /// field is used.
2461 #[prost(string, tag="2")]
2462 pub annotation_spec_id: std::string::String,
2463 /// Output only. The value of
2464 /// [display_name][google.cloud.automl.v1.AnnotationSpec.display_name]
2465 /// at the moment when the model was trained. Because this field returns a
2466 /// value at model training time, for different models trained from the same
2467 /// dataset, the values may differ, since display names could had been changed
2468 /// between the two model's trainings. For Tables CLASSIFICATION
2469 ///
2470 /// [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
2471 /// distinct values of the target column at the moment of the model evaluation
2472 /// are populated here.
2473 /// The display_name is empty for the overall model evaluation.
2474 #[prost(string, tag="15")]
2475 pub display_name: std::string::String,
2476 /// Output only. Timestamp when this model evaluation was created.
2477 #[prost(message, optional, tag="5")]
2478 pub create_time: ::std::option::Option<::prost_types::Timestamp>,
2479 /// Output only. The number of examples used for model evaluation, i.e. for
2480 /// which ground truth from time of model creation is compared against the
2481 /// predicted annotations created by the model.
2482 /// For overall ModelEvaluation (i.e. with annotation_spec_id not set) this is
2483 /// the total number of all examples used for evaluation.
2484 /// Otherwise, this is the count of examples that according to the ground
2485 /// truth were annotated by the
2486 ///
2487 /// [annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id].
2488 #[prost(int32, tag="6")]
2489 pub evaluated_example_count: i32,
2490 /// Output only. Problem type specific evaluation metrics.
2491 #[prost(oneof="model_evaluation::Metrics", tags="8, 9, 12, 11, 13")]
2492 pub metrics: ::std::option::Option<model_evaluation::Metrics>,
2493}
2494pub mod model_evaluation {
2495 /// Output only. Problem type specific evaluation metrics.
2496 #[derive(Clone, PartialEq, ::prost::Oneof)]
2497 pub enum Metrics {
2498 /// Model evaluation metrics for image, text, video and tables
2499 /// classification.
2500 /// Tables problem is considered a classification when the target column
2501 /// is CATEGORY DataType.
2502 #[prost(message, tag="8")]
2503 ClassificationEvaluationMetrics(super::ClassificationEvaluationMetrics),
2504 /// Model evaluation metrics for translation.
2505 #[prost(message, tag="9")]
2506 TranslationEvaluationMetrics(super::TranslationEvaluationMetrics),
2507 /// Model evaluation metrics for image object detection.
2508 #[prost(message, tag="12")]
2509 ImageObjectDetectionEvaluationMetrics(super::ImageObjectDetectionEvaluationMetrics),
2510 /// Evaluation metrics for text sentiment models.
2511 #[prost(message, tag="11")]
2512 TextSentimentEvaluationMetrics(super::TextSentimentEvaluationMetrics),
2513 /// Evaluation metrics for text extraction models.
2514 #[prost(message, tag="13")]
2515 TextExtractionEvaluationMetrics(super::TextExtractionEvaluationMetrics),
2516 }
2517}
2518/// Contains annotation information that is relevant to AutoML.
2519#[derive(Clone, PartialEq, ::prost::Message)]
2520pub struct AnnotationPayload {
2521 /// Output only . The resource ID of the annotation spec that
2522 /// this annotation pertains to. The annotation spec comes from either an
2523 /// ancestor dataset, or the dataset that was used to train the model in use.
2524 #[prost(string, tag="1")]
2525 pub annotation_spec_id: std::string::String,
2526 /// Output only. The value of
2527 /// [display_name][google.cloud.automl.v1.AnnotationSpec.display_name]
2528 /// when the model was trained. Because this field returns a value at model
2529 /// training time, for different models trained using the same dataset, the
2530 /// returned value could be different as model owner could update the
2531 /// `display_name` between any two model training.
2532 #[prost(string, tag="5")]
2533 pub display_name: std::string::String,
2534 /// Output only . Additional information about the annotation
2535 /// specific to the AutoML domain.
2536 #[prost(oneof="annotation_payload::Detail", tags="2, 3, 4, 6, 7")]
2537 pub detail: ::std::option::Option<annotation_payload::Detail>,
2538}
2539pub mod annotation_payload {
2540 /// Output only . Additional information about the annotation
2541 /// specific to the AutoML domain.
2542 #[derive(Clone, PartialEq, ::prost::Oneof)]
2543 pub enum Detail {
2544 /// Annotation details for translation.
2545 #[prost(message, tag="2")]
2546 Translation(super::TranslationAnnotation),
2547 /// Annotation details for content or image classification.
2548 #[prost(message, tag="3")]
2549 Classification(super::ClassificationAnnotation),
2550 /// Annotation details for image object detection.
2551 #[prost(message, tag="4")]
2552 ImageObjectDetection(super::ImageObjectDetectionAnnotation),
2553 /// Annotation details for text extraction.
2554 #[prost(message, tag="6")]
2555 TextExtraction(super::TextExtractionAnnotation),
2556 /// Annotation details for text sentiment.
2557 #[prost(message, tag="7")]
2558 TextSentiment(super::TextSentimentAnnotation),
2559 }
2560}
2561/// Request message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
2562#[derive(Clone, PartialEq, ::prost::Message)]
2563pub struct PredictRequest {
2564 /// Required. Name of the model requested to serve the prediction.
2565 #[prost(string, tag="1")]
2566 pub name: std::string::String,
2567 /// Required. Payload to perform a prediction on. The payload must match the
2568 /// problem type that the model was trained to solve.
2569 #[prost(message, optional, tag="2")]
2570 pub payload: ::std::option::Option<ExamplePayload>,
2571 /// Additional domain-specific parameters, any string must be up to 25000
2572 /// characters long.
2573 ///
2574 /// AutoML Vision Classification
2575 ///
2576 /// `score_threshold`
2577 /// : (float) A value from 0.0 to 1.0. When the model
2578 /// makes predictions for an image, it will only produce results that have
2579 /// at least this confidence score. The default is 0.5.
2580 ///
2581 /// AutoML Vision Object Detection
2582 ///
2583 /// `score_threshold`
2584 /// : (float) When Model detects objects on the image,
2585 /// it will only produce bounding boxes which have at least this
2586 /// confidence score. Value in 0 to 1 range, default is 0.5.
2587 ///
2588 /// `max_bounding_box_count`
2589 /// : (int64) The maximum number of bounding
2590 /// boxes returned. The default is 100. The
2591 /// number of returned bounding boxes might be limited by the server.
2592 ///
2593 /// AutoML Tables
2594 ///
2595 /// `feature_importance`
2596 /// : (boolean) Whether
2597 ///
2598 /// [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
2599 /// is populated in the returned list of
2600 /// [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
2601 /// objects. The default is false.
2602 #[prost(map="string, string", tag="3")]
2603 pub params: ::std::collections::HashMap<std::string::String, std::string::String>,
2604}
2605/// Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
2606#[derive(Clone, PartialEq, ::prost::Message)]
2607pub struct PredictResponse {
2608 /// Prediction result.
2609 /// AutoML Translation and AutoML Natural Language Sentiment Analysis
2610 /// return precisely one payload.
2611 #[prost(message, repeated, tag="1")]
2612 pub payload: ::std::vec::Vec<AnnotationPayload>,
2613 /// The preprocessed example that AutoML actually makes prediction on.
2614 /// Empty if AutoML does not preprocess the input example.
2615 ///
2616 /// For AutoML Natural Language (Classification, Entity Extraction, and
2617 /// Sentiment Analysis), if the input is a document, the recognized text is
2618 /// returned in the
2619 /// [document_text][google.cloud.automl.v1.Document.document_text]
2620 /// property.
2621 #[prost(message, optional, tag="3")]
2622 pub preprocessed_input: ::std::option::Option<ExamplePayload>,
2623 /// Additional domain-specific prediction response metadata.
2624 ///
2625 /// AutoML Vision Object Detection
2626 ///
2627 /// `max_bounding_box_count`
2628 /// : (int64) The maximum number of bounding boxes to return per image.
2629 ///
2630 /// AutoML Natural Language Sentiment Analysis
2631 ///
2632 /// `sentiment_score`
2633 /// : (float, deprecated) A value between -1 and 1,
2634 /// -1 maps to least positive sentiment, while 1 maps to the most positive
2635 /// one and the higher the score, the more positive the sentiment in the
2636 /// document is. Yet these values are relative to the training data, so
2637 /// e.g. if all data was positive then -1 is also positive (though
2638 /// the least).
2639 /// `sentiment_score` is not the same as "score" and "magnitude"
2640 /// from Sentiment Analysis in the Natural Language API.
2641 #[prost(map="string, string", tag="2")]
2642 pub metadata: ::std::collections::HashMap<std::string::String, std::string::String>,
2643}
2644/// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
2645#[derive(Clone, PartialEq, ::prost::Message)]
2646pub struct BatchPredictRequest {
2647 /// Required. Name of the model requested to serve the batch prediction.
2648 #[prost(string, tag="1")]
2649 pub name: std::string::String,
2650 /// Required. The input configuration for batch prediction.
2651 #[prost(message, optional, tag="3")]
2652 pub input_config: ::std::option::Option<BatchPredictInputConfig>,
2653 /// Required. The Configuration specifying where output predictions should
2654 /// be written.
2655 #[prost(message, optional, tag="4")]
2656 pub output_config: ::std::option::Option<BatchPredictOutputConfig>,
2657 /// Additional domain-specific parameters for the predictions, any string must
2658 /// be up to 25000 characters long.
2659 ///
2660 /// AutoML Natural Language Classification
2661 ///
2662 /// `score_threshold`
2663 /// : (float) A value from 0.0 to 1.0. When the model
2664 /// makes predictions for a text snippet, it will only produce results
2665 /// that have at least this confidence score. The default is 0.5.
2666 ///
2667 ///
2668 /// AutoML Vision Classification
2669 ///
2670 /// `score_threshold`
2671 /// : (float) A value from 0.0 to 1.0. When the model
2672 /// makes predictions for an image, it will only produce results that
2673 /// have at least this confidence score. The default is 0.5.
2674 ///
2675 /// AutoML Vision Object Detection
2676 ///
2677 /// `score_threshold`
2678 /// : (float) When Model detects objects on the image,
2679 /// it will only produce bounding boxes which have at least this
2680 /// confidence score. Value in 0 to 1 range, default is 0.5.
2681 ///
2682 /// `max_bounding_box_count`
2683 /// : (int64) The maximum number of bounding
2684 /// boxes returned per image. The default is 100, the
2685 /// number of bounding boxes returned might be limited by the server.
2686 /// AutoML Video Intelligence Classification
2687 ///
2688 /// `score_threshold`
2689 /// : (float) A value from 0.0 to 1.0. When the model
2690 /// makes predictions for a video, it will only produce results that
2691 /// have at least this confidence score. The default is 0.5.
2692 ///
2693 /// `segment_classification`
2694 /// : (boolean) Set to true to request
2695 /// segment-level classification. AutoML Video Intelligence returns
2696 /// labels and their confidence scores for the entire segment of the
2697 /// video that user specified in the request configuration.
2698 /// The default is true.
2699 ///
2700 /// `shot_classification`
2701 /// : (boolean) Set to true to request shot-level
2702 /// classification. AutoML Video Intelligence determines the boundaries
2703 /// for each camera shot in the entire segment of the video that user
2704 /// specified in the request configuration. AutoML Video Intelligence
2705 /// then returns labels and their confidence scores for each detected
2706 /// shot, along with the start and end time of the shot.
2707 /// The default is false.
2708 ///
2709 /// WARNING: Model evaluation is not done for this classification type,
2710 /// the quality of it depends on training data, but there are no metrics
2711 /// provided to describe that quality.
2712 ///
2713 /// `1s_interval_classification`
2714 /// : (boolean) Set to true to request
2715 /// classification for a video at one-second intervals. AutoML Video
2716 /// Intelligence returns labels and their confidence scores for each
2717 /// second of the entire segment of the video that user specified in the
2718 /// request configuration. The default is false.
2719 ///
2720 /// WARNING: Model evaluation is not done for this classification
2721 /// type, the quality of it depends on training data, but there are no
2722 /// metrics provided to describe that quality.
2723 ///
2724 /// AutoML Video Intelligence Object Tracking
2725 ///
2726 /// `score_threshold`
2727 /// : (float) When Model detects objects on video frames,
2728 /// it will only produce bounding boxes which have at least this
2729 /// confidence score. Value in 0 to 1 range, default is 0.5.
2730 ///
2731 /// `max_bounding_box_count`
2732 /// : (int64) The maximum number of bounding
2733 /// boxes returned per image. The default is 100, the
2734 /// number of bounding boxes returned might be limited by the server.
2735 ///
2736 /// `min_bounding_box_size`
2737 /// : (float) Only bounding boxes with shortest edge
2738 /// at least that long as a relative value of video frame size are
2739 /// returned. Value in 0 to 1 range. Default is 0.
2740 ///
2741 #[prost(map="string, string", tag="5")]
2742 pub params: ::std::collections::HashMap<std::string::String, std::string::String>,
2743}
2744/// Result of the Batch Predict. This message is returned in
2745/// [response][google.longrunning.Operation.response] of the operation returned
2746/// by the [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
2747#[derive(Clone, PartialEq, ::prost::Message)]
2748pub struct BatchPredictResult {
2749 /// Additional domain-specific prediction response metadata.
2750 ///
2751 /// AutoML Vision Object Detection
2752 ///
2753 /// `max_bounding_box_count`
2754 /// : (int64) The maximum number of bounding boxes returned per image.
2755 ///
2756 /// AutoML Video Intelligence Object Tracking
2757 ///
2758 /// `max_bounding_box_count`
2759 /// : (int64) The maximum number of bounding boxes returned per frame.
2760 #[prost(map="string, string", tag="1")]
2761 pub metadata: ::std::collections::HashMap<std::string::String, std::string::String>,
2762}
2763# [ doc = r" Generated client implementations." ] pub mod prediction_service_client { # ! [ allow ( unused_variables , dead_code , missing_docs ) ] use tonic :: codegen :: * ; # [ doc = " AutoML Prediction API." ] # [ doc = "" ] # [ doc = " On any input that is documented to expect a string parameter in" ] # [ doc = " snake_case or kebab-case, either of those cases is accepted." ] pub struct PredictionServiceClient < T > { inner : tonic :: client :: Grpc < T > , } impl < T > PredictionServiceClient < T > where T : tonic :: client :: GrpcService < tonic :: body :: BoxBody > , T :: ResponseBody : Body + HttpBody + Send + 'static , T :: Error : Into < StdError > , < T :: ResponseBody as HttpBody > :: Error : Into < StdError > + Send , { pub fn new ( inner : T ) -> Self { let inner = tonic :: client :: Grpc :: new ( inner ) ; Self { inner } } pub fn with_interceptor ( inner : T , interceptor : impl Into < tonic :: Interceptor > ) -> Self { let inner = tonic :: client :: Grpc :: with_interceptor ( inner , interceptor ) ; Self { inner } } # [ doc = " Perform an online prediction. The prediction result is directly" ] # [ doc = " returned in the response." ] # [ doc = " Available for following ML scenarios, and their expected request payloads:" ] # [ doc = "" ] # [ doc = " AutoML Vision Classification" ] # [ doc = "" ] # [ doc = " * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB." ] # [ doc = "" ] # [ doc = " AutoML Vision Object Detection" ] # [ doc = "" ] # [ doc = " * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB." ] # [ doc = "" ] # [ doc = " AutoML Natural Language Classification" ] # [ doc = "" ] # [ doc = " * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in" ] # [ doc = " .PDF, .TIF or .TIFF format with size upto 2MB." ] # [ doc = "" ] # [ doc = " AutoML Natural Language Entity Extraction" ] # [ doc = "" ] # [ doc = " * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document" ] # [ doc = " in .PDF, .TIF or .TIFF format with size upto 20MB." ] # [ doc = "" ] # [ doc = " AutoML Natural Language Sentiment Analysis" ] # [ doc = "" ] # [ doc = " * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in" ] # [ doc = " .PDF, .TIF or .TIFF format with size upto 2MB." ] # [ doc = "" ] # [ doc = " AutoML Translation" ] # [ doc = "" ] # [ doc = " * A TextSnippet up to 25,000 characters, UTF-8 encoded." ] # [ doc = "" ] # [ doc = " AutoML Tables" ] # [ doc = "" ] # [ doc = " * A row with column values matching" ] # [ doc = " the columns of the model, up to 5MB. Not available for FORECASTING" ] # [ doc = " `prediction_type`." ] pub async fn predict ( & mut self , request : impl tonic :: IntoRequest < super :: PredictRequest > , ) -> Result < tonic :: Response < super :: PredictResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.PredictionService/Predict" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch" ] # [ doc = " prediction result won't be immediately available in the response. Instead," ] # [ doc = " a long running operation object is returned. User can poll the operation" ] # [ doc = " result via [GetOperation][google.longrunning.Operations.GetOperation]" ] # [ doc = " method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in" ] # [ doc = " the [response][google.longrunning.Operation.response] field." ] # [ doc = " Available for following ML scenarios:" ] # [ doc = "" ] # [ doc = " * AutoML Vision Classification" ] # [ doc = " * AutoML Vision Object Detection" ] # [ doc = " * AutoML Video Intelligence Classification" ] # [ doc = " * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification" ] # [ doc = " * AutoML Natural Language Entity Extraction" ] # [ doc = " * AutoML Natural Language Sentiment Analysis" ] # [ doc = " * AutoML Tables" ] pub async fn batch_predict ( & mut self , request : impl tonic :: IntoRequest < super :: BatchPredictRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.PredictionService/BatchPredict" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } } impl < T : Clone > Clone for PredictionServiceClient < T > { fn clone ( & self ) -> Self { Self { inner : self . inner . clone ( ) , } } } impl < T > std :: fmt :: Debug for PredictionServiceClient < T > { fn fmt ( & self , f : & mut std :: fmt :: Formatter < '_ > ) -> std :: fmt :: Result { write ! ( f , "PredictionServiceClient {{ ... }}" ) } } }/// API proto representing a trained machine learning model.
2764#[derive(Clone, PartialEq, ::prost::Message)]
2765pub struct Model {
2766 /// Output only. Resource name of the model.
2767 /// Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
2768 #[prost(string, tag="1")]
2769 pub name: std::string::String,
2770 /// Required. The name of the model to show in the interface. The name can be
2771 /// up to 32 characters long and can consist only of ASCII Latin letters A-Z
2772 /// and a-z, underscores
2773 /// (_), and ASCII digits 0-9. It must start with a letter.
2774 #[prost(string, tag="2")]
2775 pub display_name: std::string::String,
2776 /// Required. The resource ID of the dataset used to create the model. The dataset must
2777 /// come from the same ancestor project and location.
2778 #[prost(string, tag="3")]
2779 pub dataset_id: std::string::String,
2780 /// Output only. Timestamp when the model training finished and can be used for prediction.
2781 #[prost(message, optional, tag="7")]
2782 pub create_time: ::std::option::Option<::prost_types::Timestamp>,
2783 /// Output only. Timestamp when this model was last updated.
2784 #[prost(message, optional, tag="11")]
2785 pub update_time: ::std::option::Option<::prost_types::Timestamp>,
2786 /// Output only. Deployment state of the model. A model can only serve
2787 /// prediction requests after it gets deployed.
2788 #[prost(enumeration="model::DeploymentState", tag="8")]
2789 pub deployment_state: i32,
2790 /// Used to perform a consistent read-modify-write updates. If not set, a blind
2791 /// "overwrite" update happens.
2792 #[prost(string, tag="10")]
2793 pub etag: std::string::String,
2794 /// Optional. The labels with user-defined metadata to organize your model.
2795 ///
2796 /// Label keys and values can be no longer than 64 characters
2797 /// (Unicode codepoints), can only contain lowercase letters, numeric
2798 /// characters, underscores and dashes. International characters are allowed.
2799 /// Label values are optional. Label keys must start with a letter.
2800 ///
2801 /// See https://goo.gl/xmQnxf for more information on and examples of labels.
2802 #[prost(map="string, string", tag="34")]
2803 pub labels: ::std::collections::HashMap<std::string::String, std::string::String>,
2804 /// Required.
2805 /// The model metadata that is specific to the problem type.
2806 /// Must match the metadata type of the dataset used to train the model.
2807 #[prost(oneof="model::ModelMetadata", tags="15, 13, 14, 20, 19, 22")]
2808 pub model_metadata: ::std::option::Option<model::ModelMetadata>,
2809}
2810pub mod model {
2811 /// Deployment state of the model.
2812 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
2813 #[repr(i32)]
2814 pub enum DeploymentState {
2815 /// Should not be used, an un-set enum has this value by default.
2816 Unspecified = 0,
2817 /// Model is deployed.
2818 Deployed = 1,
2819 /// Model is not deployed.
2820 Undeployed = 2,
2821 }
2822 /// Required.
2823 /// The model metadata that is specific to the problem type.
2824 /// Must match the metadata type of the dataset used to train the model.
2825 #[derive(Clone, PartialEq, ::prost::Oneof)]
2826 pub enum ModelMetadata {
2827 /// Metadata for translation models.
2828 #[prost(message, tag="15")]
2829 TranslationModelMetadata(super::TranslationModelMetadata),
2830 /// Metadata for image classification models.
2831 #[prost(message, tag="13")]
2832 ImageClassificationModelMetadata(super::ImageClassificationModelMetadata),
2833 /// Metadata for text classification models.
2834 #[prost(message, tag="14")]
2835 TextClassificationModelMetadata(super::TextClassificationModelMetadata),
2836 /// Metadata for image object detection models.
2837 #[prost(message, tag="20")]
2838 ImageObjectDetectionModelMetadata(super::ImageObjectDetectionModelMetadata),
2839 /// Metadata for text extraction models.
2840 #[prost(message, tag="19")]
2841 TextExtractionModelMetadata(super::TextExtractionModelMetadata),
2842 /// Metadata for text sentiment models.
2843 #[prost(message, tag="22")]
2844 TextSentimentModelMetadata(super::TextSentimentModelMetadata),
2845 }
2846}
2847/// Request message for [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset].
2848#[derive(Clone, PartialEq, ::prost::Message)]
2849pub struct CreateDatasetRequest {
2850 /// Required. The resource name of the project to create the dataset for.
2851 #[prost(string, tag="1")]
2852 pub parent: std::string::String,
2853 /// Required. The dataset to create.
2854 #[prost(message, optional, tag="2")]
2855 pub dataset: ::std::option::Option<Dataset>,
2856}
2857/// Request message for [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset].
2858#[derive(Clone, PartialEq, ::prost::Message)]
2859pub struct GetDatasetRequest {
2860 /// Required. The resource name of the dataset to retrieve.
2861 #[prost(string, tag="1")]
2862 pub name: std::string::String,
2863}
2864/// Request message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
2865#[derive(Clone, PartialEq, ::prost::Message)]
2866pub struct ListDatasetsRequest {
2867 /// Required. The resource name of the project from which to list datasets.
2868 #[prost(string, tag="1")]
2869 pub parent: std::string::String,
2870 /// An expression for filtering the results of the request.
2871 ///
2872 /// * `dataset_metadata` - for existence of the case (e.g.
2873 /// image_classification_dataset_metadata:*). Some examples of using the filter are:
2874 ///
2875 /// * `translation_dataset_metadata:*` --> The dataset has
2876 /// translation_dataset_metadata.
2877 #[prost(string, tag="3")]
2878 pub filter: std::string::String,
2879 /// Requested page size. Server may return fewer results than requested.
2880 /// If unspecified, server will pick a default size.
2881 #[prost(int32, tag="4")]
2882 pub page_size: i32,
2883 /// A token identifying a page of results for the server to return
2884 /// Typically obtained via
2885 /// [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] of the previous
2886 /// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] call.
2887 #[prost(string, tag="6")]
2888 pub page_token: std::string::String,
2889}
2890/// Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets].
2891#[derive(Clone, PartialEq, ::prost::Message)]
2892pub struct ListDatasetsResponse {
2893 /// The datasets read.
2894 #[prost(message, repeated, tag="1")]
2895 pub datasets: ::std::vec::Vec<Dataset>,
2896 /// A token to retrieve next page of results.
2897 /// Pass to [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] to obtain that page.
2898 #[prost(string, tag="2")]
2899 pub next_page_token: std::string::String,
2900}
2901/// Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset]
2902#[derive(Clone, PartialEq, ::prost::Message)]
2903pub struct UpdateDatasetRequest {
2904 /// Required. The dataset which replaces the resource on the server.
2905 #[prost(message, optional, tag="1")]
2906 pub dataset: ::std::option::Option<Dataset>,
2907 /// Required. The update mask applies to the resource.
2908 #[prost(message, optional, tag="2")]
2909 pub update_mask: ::std::option::Option<::prost_types::FieldMask>,
2910}
2911/// Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset].
2912#[derive(Clone, PartialEq, ::prost::Message)]
2913pub struct DeleteDatasetRequest {
2914 /// Required. The resource name of the dataset to delete.
2915 #[prost(string, tag="1")]
2916 pub name: std::string::String,
2917}
2918/// Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData].
2919#[derive(Clone, PartialEq, ::prost::Message)]
2920pub struct ImportDataRequest {
2921 /// Required. Dataset name. Dataset must already exist. All imported
2922 /// annotations and examples will be added.
2923 #[prost(string, tag="1")]
2924 pub name: std::string::String,
2925 /// Required. The desired input location and its domain specific semantics,
2926 /// if any.
2927 #[prost(message, optional, tag="3")]
2928 pub input_config: ::std::option::Option<InputConfig>,
2929}
2930/// Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData].
2931#[derive(Clone, PartialEq, ::prost::Message)]
2932pub struct ExportDataRequest {
2933 /// Required. The resource name of the dataset.
2934 #[prost(string, tag="1")]
2935 pub name: std::string::String,
2936 /// Required. The desired output location.
2937 #[prost(message, optional, tag="3")]
2938 pub output_config: ::std::option::Option<OutputConfig>,
2939}
2940/// Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec].
2941#[derive(Clone, PartialEq, ::prost::Message)]
2942pub struct GetAnnotationSpecRequest {
2943 /// Required. The resource name of the annotation spec to retrieve.
2944 #[prost(string, tag="1")]
2945 pub name: std::string::String,
2946}
2947/// Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel].
2948#[derive(Clone, PartialEq, ::prost::Message)]
2949pub struct CreateModelRequest {
2950 /// Required. Resource name of the parent project where the model is being created.
2951 #[prost(string, tag="1")]
2952 pub parent: std::string::String,
2953 /// Required. The model to create.
2954 #[prost(message, optional, tag="4")]
2955 pub model: ::std::option::Option<Model>,
2956}
2957/// Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel].
2958#[derive(Clone, PartialEq, ::prost::Message)]
2959pub struct GetModelRequest {
2960 /// Required. Resource name of the model.
2961 #[prost(string, tag="1")]
2962 pub name: std::string::String,
2963}
2964/// Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
2965#[derive(Clone, PartialEq, ::prost::Message)]
2966pub struct ListModelsRequest {
2967 /// Required. Resource name of the project, from which to list the models.
2968 #[prost(string, tag="1")]
2969 pub parent: std::string::String,
2970 /// An expression for filtering the results of the request.
2971 ///
2972 /// * `model_metadata` - for existence of the case (e.g.
2973 /// video_classification_model_metadata:*).
2974 /// * `dataset_id` - for = or !=. Some examples of using the filter are:
2975 ///
2976 /// * `image_classification_model_metadata:*` --> The model has
2977 /// image_classification_model_metadata.
2978 /// * `dataset_id=5` --> The model was created from a dataset with ID 5.
2979 #[prost(string, tag="3")]
2980 pub filter: std::string::String,
2981 /// Requested page size.
2982 #[prost(int32, tag="4")]
2983 pub page_size: i32,
2984 /// A token identifying a page of results for the server to return
2985 /// Typically obtained via
2986 /// [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] of the previous
2987 /// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call.
2988 #[prost(string, tag="6")]
2989 pub page_token: std::string::String,
2990}
2991/// Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels].
2992#[derive(Clone, PartialEq, ::prost::Message)]
2993pub struct ListModelsResponse {
2994 /// List of models in the requested page.
2995 #[prost(message, repeated, tag="1")]
2996 pub model: ::std::vec::Vec<Model>,
2997 /// A token to retrieve next page of results.
2998 /// Pass to [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] to obtain that page.
2999 #[prost(string, tag="2")]
3000 pub next_page_token: std::string::String,
3001}
3002/// Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel].
3003#[derive(Clone, PartialEq, ::prost::Message)]
3004pub struct DeleteModelRequest {
3005 /// Required. Resource name of the model being deleted.
3006 #[prost(string, tag="1")]
3007 pub name: std::string::String,
3008}
3009/// Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel]
3010#[derive(Clone, PartialEq, ::prost::Message)]
3011pub struct UpdateModelRequest {
3012 /// Required. The model which replaces the resource on the server.
3013 #[prost(message, optional, tag="1")]
3014 pub model: ::std::option::Option<Model>,
3015 /// Required. The update mask applies to the resource.
3016 #[prost(message, optional, tag="2")]
3017 pub update_mask: ::std::option::Option<::prost_types::FieldMask>,
3018}
3019/// Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel].
3020#[derive(Clone, PartialEq, ::prost::Message)]
3021pub struct DeployModelRequest {
3022 /// Required. Resource name of the model to deploy.
3023 #[prost(string, tag="1")]
3024 pub name: std::string::String,
3025 /// The per-domain specific deployment parameters.
3026 #[prost(oneof="deploy_model_request::ModelDeploymentMetadata", tags="2, 4")]
3027 pub model_deployment_metadata: ::std::option::Option<deploy_model_request::ModelDeploymentMetadata>,
3028}
3029pub mod deploy_model_request {
3030 /// The per-domain specific deployment parameters.
3031 #[derive(Clone, PartialEq, ::prost::Oneof)]
3032 pub enum ModelDeploymentMetadata {
3033 /// Model deployment metadata specific to Image Object Detection.
3034 #[prost(message, tag="2")]
3035 ImageObjectDetectionModelDeploymentMetadata(super::ImageObjectDetectionModelDeploymentMetadata),
3036 /// Model deployment metadata specific to Image Classification.
3037 #[prost(message, tag="4")]
3038 ImageClassificationModelDeploymentMetadata(super::ImageClassificationModelDeploymentMetadata),
3039 }
3040}
3041/// Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel].
3042#[derive(Clone, PartialEq, ::prost::Message)]
3043pub struct UndeployModelRequest {
3044 /// Required. Resource name of the model to undeploy.
3045 #[prost(string, tag="1")]
3046 pub name: std::string::String,
3047}
3048/// Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel].
3049/// Models need to be enabled for exporting, otherwise an error code will be
3050/// returned.
3051#[derive(Clone, PartialEq, ::prost::Message)]
3052pub struct ExportModelRequest {
3053 /// Required. The resource name of the model to export.
3054 #[prost(string, tag="1")]
3055 pub name: std::string::String,
3056 /// Required. The desired output location and configuration.
3057 #[prost(message, optional, tag="3")]
3058 pub output_config: ::std::option::Option<ModelExportOutputConfig>,
3059}
3060/// Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation].
3061#[derive(Clone, PartialEq, ::prost::Message)]
3062pub struct GetModelEvaluationRequest {
3063 /// Required. Resource name for the model evaluation.
3064 #[prost(string, tag="1")]
3065 pub name: std::string::String,
3066}
3067/// Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
3068#[derive(Clone, PartialEq, ::prost::Message)]
3069pub struct ListModelEvaluationsRequest {
3070 /// Required. Resource name of the model to list the model evaluations for.
3071 /// If modelId is set as "-", this will list model evaluations from across all
3072 /// models of the parent location.
3073 #[prost(string, tag="1")]
3074 pub parent: std::string::String,
3075 /// Required. An expression for filtering the results of the request.
3076 ///
3077 /// * `annotation_spec_id` - for =, != or existence. See example below for
3078 /// the last.
3079 ///
3080 /// Some examples of using the filter are:
3081 ///
3082 /// * `annotation_spec_id!=4` --> The model evaluation was done for
3083 /// annotation spec with ID different than 4.
3084 /// * `NOT annotation_spec_id:*` --> The model evaluation was done for
3085 /// aggregate of all annotation specs.
3086 #[prost(string, tag="3")]
3087 pub filter: std::string::String,
3088 /// Requested page size.
3089 #[prost(int32, tag="4")]
3090 pub page_size: i32,
3091 /// A token identifying a page of results for the server to return.
3092 /// Typically obtained via
3093 /// [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] of the previous
3094 /// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] call.
3095 #[prost(string, tag="6")]
3096 pub page_token: std::string::String,
3097}
3098/// Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations].
3099#[derive(Clone, PartialEq, ::prost::Message)]
3100pub struct ListModelEvaluationsResponse {
3101 /// List of model evaluations in the requested page.
3102 #[prost(message, repeated, tag="1")]
3103 pub model_evaluation: ::std::vec::Vec<ModelEvaluation>,
3104 /// A token to retrieve next page of results.
3105 /// Pass to the [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] field of a new
3106 /// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] request to obtain that page.
3107 #[prost(string, tag="2")]
3108 pub next_page_token: std::string::String,
3109}
3110# [ doc = r" Generated client implementations." ] pub mod auto_ml_client { # ! [ allow ( unused_variables , dead_code , missing_docs ) ] use tonic :: codegen :: * ; # [ doc = " AutoML Server API." ] # [ doc = "" ] # [ doc = " The resource names are assigned by the server." ] # [ doc = " The server never reuses names that it has created after the resources with" ] # [ doc = " those names are deleted." ] # [ doc = "" ] # [ doc = " An ID of a resource is the last element of the item's resource name. For" ] # [ doc = " `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then" ] # [ doc = " the id for the item is `{dataset_id}`." ] # [ doc = "" ] # [ doc = " Currently the only supported `location_id` is \"us-central1\"." ] # [ doc = "" ] # [ doc = " On any input that is documented to expect a string parameter in" ] # [ doc = " snake_case or kebab-case, either of those cases is accepted." ] pub struct AutoMlClient < T > { inner : tonic :: client :: Grpc < T > , } impl < T > AutoMlClient < T > where T : tonic :: client :: GrpcService < tonic :: body :: BoxBody > , T :: ResponseBody : Body + HttpBody + Send + 'static , T :: Error : Into < StdError > , < T :: ResponseBody as HttpBody > :: Error : Into < StdError > + Send , { pub fn new ( inner : T ) -> Self { let inner = tonic :: client :: Grpc :: new ( inner ) ; Self { inner } } pub fn with_interceptor ( inner : T , interceptor : impl Into < tonic :: Interceptor > ) -> Self { let inner = tonic :: client :: Grpc :: with_interceptor ( inner , interceptor ) ; Self { inner } } # [ doc = " Creates a dataset." ] pub async fn create_dataset ( & mut self , request : impl tonic :: IntoRequest < super :: CreateDatasetRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/CreateDataset" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Gets a dataset." ] pub async fn get_dataset ( & mut self , request : impl tonic :: IntoRequest < super :: GetDatasetRequest > , ) -> Result < tonic :: Response < super :: Dataset > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/GetDataset" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Lists datasets in a project." ] pub async fn list_datasets ( & mut self , request : impl tonic :: IntoRequest < super :: ListDatasetsRequest > , ) -> Result < tonic :: Response < super :: ListDatasetsResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/ListDatasets" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Updates a dataset." ] pub async fn update_dataset ( & mut self , request : impl tonic :: IntoRequest < super :: UpdateDatasetRequest > , ) -> Result < tonic :: Response < super :: Dataset > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/UpdateDataset" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Deletes a dataset and all of its contents." ] # [ doc = " Returns empty response in the" ] # [ doc = " [response][google.longrunning.Operation.response] field when it completes," ] # [ doc = " and `delete_details` in the" ] # [ doc = " [metadata][google.longrunning.Operation.metadata] field." ] pub async fn delete_dataset ( & mut self , request : impl tonic :: IntoRequest < super :: DeleteDatasetRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/DeleteDataset" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Imports data into a dataset." ] # [ doc = " For Tables this method can only be called on an empty Dataset." ] # [ doc = "" ] # [ doc = " For Tables:" ] # [ doc = " * A" ] # [ doc = " [schema_inference_version][google.cloud.automl.v1.InputConfig.params]" ] # [ doc = " parameter must be explicitly set." ] # [ doc = " Returns an empty response in the" ] # [ doc = " [response][google.longrunning.Operation.response] field when it completes." ] pub async fn import_data ( & mut self , request : impl tonic :: IntoRequest < super :: ImportDataRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/ImportData" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Exports dataset's data to the provided output location." ] # [ doc = " Returns an empty response in the" ] # [ doc = " [response][google.longrunning.Operation.response] field when it completes." ] pub async fn export_data ( & mut self , request : impl tonic :: IntoRequest < super :: ExportDataRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/ExportData" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Gets an annotation spec." ] pub async fn get_annotation_spec ( & mut self , request : impl tonic :: IntoRequest < super :: GetAnnotationSpecRequest > , ) -> Result < tonic :: Response < super :: AnnotationSpec > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/GetAnnotationSpec" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Creates a model." ] # [ doc = " Returns a Model in the [response][google.longrunning.Operation.response]" ] # [ doc = " field when it completes." ] # [ doc = " When you create a model, several model evaluations are created for it:" ] # [ doc = " a global evaluation, and one evaluation for each annotation spec." ] pub async fn create_model ( & mut self , request : impl tonic :: IntoRequest < super :: CreateModelRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/CreateModel" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Gets a model." ] pub async fn get_model ( & mut self , request : impl tonic :: IntoRequest < super :: GetModelRequest > , ) -> Result < tonic :: Response < super :: Model > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/GetModel" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Lists models." ] pub async fn list_models ( & mut self , request : impl tonic :: IntoRequest < super :: ListModelsRequest > , ) -> Result < tonic :: Response < super :: ListModelsResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/ListModels" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Deletes a model." ] # [ doc = " Returns `google.protobuf.Empty` in the" ] # [ doc = " [response][google.longrunning.Operation.response] field when it completes," ] # [ doc = " and `delete_details` in the" ] # [ doc = " [metadata][google.longrunning.Operation.metadata] field." ] pub async fn delete_model ( & mut self , request : impl tonic :: IntoRequest < super :: DeleteModelRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/DeleteModel" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Updates a model." ] pub async fn update_model ( & mut self , request : impl tonic :: IntoRequest < super :: UpdateModelRequest > , ) -> Result < tonic :: Response < super :: Model > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/UpdateModel" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Deploys a model. If a model is already deployed, deploying it with the" ] # [ doc = " same parameters has no effect. Deploying with different parametrs" ] # [ doc = " (as e.g. changing" ] # [ doc = "" ] # [ doc = " [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number])" ] # [ doc = " will reset the deployment state without pausing the model's availability." ] # [ doc = "" ] # [ doc = " Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage" ] # [ doc = " deployment automatically." ] # [ doc = "" ] # [ doc = " Returns an empty response in the" ] # [ doc = " [response][google.longrunning.Operation.response] field when it completes." ] pub async fn deploy_model ( & mut self , request : impl tonic :: IntoRequest < super :: DeployModelRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/DeployModel" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Undeploys a model. If the model is not deployed this method has no effect." ] # [ doc = "" ] # [ doc = " Only applicable for Text Classification, Image Object Detection and Tables;" ] # [ doc = " all other domains manage deployment automatically." ] # [ doc = "" ] # [ doc = " Returns an empty response in the" ] # [ doc = " [response][google.longrunning.Operation.response] field when it completes." ] pub async fn undeploy_model ( & mut self , request : impl tonic :: IntoRequest < super :: UndeployModelRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/UndeployModel" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Exports a trained, \"export-able\", model to a user specified Google Cloud" ] # [ doc = " Storage location. A model is considered export-able if and only if it has" ] # [ doc = " an export format defined for it in" ] # [ doc = " [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]." ] # [ doc = "" ] # [ doc = " Returns an empty response in the" ] # [ doc = " [response][google.longrunning.Operation.response] field when it completes." ] pub async fn export_model ( & mut self , request : impl tonic :: IntoRequest < super :: ExportModelRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/ExportModel" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Gets a model evaluation." ] pub async fn get_model_evaluation ( & mut self , request : impl tonic :: IntoRequest < super :: GetModelEvaluationRequest > , ) -> Result < tonic :: Response < super :: ModelEvaluation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/GetModelEvaluation" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Lists model evaluations." ] pub async fn list_model_evaluations ( & mut self , request : impl tonic :: IntoRequest < super :: ListModelEvaluationsRequest > , ) -> Result < tonic :: Response < super :: ListModelEvaluationsResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.automl.v1.AutoMl/ListModelEvaluations" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } } impl < T : Clone > Clone for AutoMlClient < T > { fn clone ( & self ) -> Self { Self { inner : self . inner . clone ( ) , } } } impl < T > std :: fmt :: Debug for AutoMlClient < T > { fn fmt ( & self , f : & mut std :: fmt :: Formatter < '_ > ) -> std :: fmt :: Result { write ! ( f , "AutoMlClient {{ ... }}" ) } } }use serde :: { Serialize , Deserialize } ;