Skip to main content

gcp_client/google/cloud/vision/
v1p2beta1.rs

1/// A vertex represents a 2D point in the image.
2/// NOTE: the vertex coordinates are in the same scale as the original image.
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct Vertex {
5    /// X coordinate.
6    #[prost(int32, tag="1")]
7    pub x: i32,
8    /// Y coordinate.
9    #[prost(int32, tag="2")]
10    pub y: i32,
11}
12/// A vertex represents a 2D point in the image.
13/// NOTE: the normalized vertex coordinates are relative to the original image
14/// and range from 0 to 1.
15#[derive(Clone, PartialEq, ::prost::Message)]
16pub struct NormalizedVertex {
17    /// X coordinate.
18    #[prost(float, tag="1")]
19    pub x: f32,
20    /// Y coordinate.
21    #[prost(float, tag="2")]
22    pub y: f32,
23}
24/// A bounding polygon for the detected image annotation.
25#[derive(Clone, PartialEq, ::prost::Message)]
26pub struct BoundingPoly {
27    /// The bounding polygon vertices.
28    #[prost(message, repeated, tag="1")]
29    pub vertices: ::std::vec::Vec<Vertex>,
30    /// The bounding polygon normalized vertices.
31    #[prost(message, repeated, tag="2")]
32    pub normalized_vertices: ::std::vec::Vec<NormalizedVertex>,
33}
34/// A 3D position in the image, used primarily for Face detection landmarks.
35/// A valid Position must have both x and y coordinates.
36/// The position coordinates are in the same scale as the original image.
37#[derive(Clone, PartialEq, ::prost::Message)]
38pub struct Position {
39    /// X coordinate.
40    #[prost(float, tag="1")]
41    pub x: f32,
42    /// Y coordinate.
43    #[prost(float, tag="2")]
44    pub y: f32,
45    /// Z coordinate (or depth).
46    #[prost(float, tag="3")]
47    pub z: f32,
48}
49/// TextAnnotation contains a structured representation of OCR extracted text.
50/// The hierarchy of an OCR extracted text structure is like this:
51///     TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
52/// Each structural component, starting from Page, may further have their own
53/// properties. Properties describe detected languages, breaks etc.. Please refer
54/// to the
55/// [TextAnnotation.TextProperty][google.cloud.vision.v1p2beta1.TextAnnotation.TextProperty]
56/// message definition below for more detail.
57#[derive(Clone, PartialEq, ::prost::Message)]
58pub struct TextAnnotation {
59    /// List of pages detected by OCR.
60    #[prost(message, repeated, tag="1")]
61    pub pages: ::std::vec::Vec<Page>,
62    /// UTF-8 text detected on the pages.
63    #[prost(string, tag="2")]
64    pub text: std::string::String,
65}
66pub mod text_annotation {
67    /// Detected language for a structural component.
68    #[derive(Clone, PartialEq, ::prost::Message)]
69    pub struct DetectedLanguage {
70        /// The BCP-47 language code, such as "en-US" or "sr-Latn". For more
71        /// information, see
72        /// http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
73        #[prost(string, tag="1")]
74        pub language_code: std::string::String,
75        /// Confidence of detected language. Range [0, 1].
76        #[prost(float, tag="2")]
77        pub confidence: f32,
78    }
79    /// Detected start or end of a structural component.
80    #[derive(Clone, PartialEq, ::prost::Message)]
81    pub struct DetectedBreak {
82        /// Detected break type.
83        #[prost(enumeration="detected_break::BreakType", tag="1")]
84        pub r#type: i32,
85        /// True if break prepends the element.
86        #[prost(bool, tag="2")]
87        pub is_prefix: bool,
88    }
89    pub mod detected_break {
90        /// Enum to denote the type of break found. New line, space etc.
91        #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
92        #[repr(i32)]
93        pub enum BreakType {
94            /// Unknown break label type.
95            Unknown = 0,
96            /// Regular space.
97            Space = 1,
98            /// Sure space (very wide).
99            SureSpace = 2,
100            /// Line-wrapping break.
101            EolSureSpace = 3,
102            /// End-line hyphen that is not present in text; does not co-occur with
103            /// `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
104            Hyphen = 4,
105            /// Line break that ends a paragraph.
106            LineBreak = 5,
107        }
108    }
109    /// Additional information detected on the structural component.
110    #[derive(Clone, PartialEq, ::prost::Message)]
111    pub struct TextProperty {
112        /// A list of detected languages together with confidence.
113        #[prost(message, repeated, tag="1")]
114        pub detected_languages: ::std::vec::Vec<DetectedLanguage>,
115        /// Detected start or end of a text segment.
116        #[prost(message, optional, tag="2")]
117        pub detected_break: ::std::option::Option<DetectedBreak>,
118    }
119}
120/// Detected page from OCR.
121#[derive(Clone, PartialEq, ::prost::Message)]
122pub struct Page {
123    /// Additional information detected on the page.
124    #[prost(message, optional, tag="1")]
125    pub property: ::std::option::Option<text_annotation::TextProperty>,
126    /// Page width. For PDFs the unit is points. For images (including
127    /// TIFFs) the unit is pixels.
128    #[prost(int32, tag="2")]
129    pub width: i32,
130    /// Page height. For PDFs the unit is points. For images (including
131    /// TIFFs) the unit is pixels.
132    #[prost(int32, tag="3")]
133    pub height: i32,
134    /// List of blocks of text, images etc on this page.
135    #[prost(message, repeated, tag="4")]
136    pub blocks: ::std::vec::Vec<Block>,
137    /// Confidence of the OCR results on the page. Range [0, 1].
138    #[prost(float, tag="5")]
139    pub confidence: f32,
140}
141/// Logical element on the page.
142#[derive(Clone, PartialEq, ::prost::Message)]
143pub struct Block {
144    /// Additional information detected for the block.
145    #[prost(message, optional, tag="1")]
146    pub property: ::std::option::Option<text_annotation::TextProperty>,
147    /// The bounding box for the block.
148    /// The vertices are in the order of top-left, top-right, bottom-right,
149    /// bottom-left. When a rotation of the bounding box is detected the rotation
150    /// is represented as around the top-left corner as defined when the text is
151    /// read in the 'natural' orientation.
152    /// For example:
153    ///
154    /// * when the text is horizontal it might look like:
155    ///
156    ///         0----1
157    ///         |    |
158    ///         3----2
159    ///
160    /// * when it's rotated 180 degrees around the top-left corner it becomes:
161    ///
162    ///         2----3
163    ///         |    |
164    ///         1----0
165    ///
166    ///   and the vertice order will still be (0, 1, 2, 3).
167    #[prost(message, optional, tag="2")]
168    pub bounding_box: ::std::option::Option<BoundingPoly>,
169    /// List of paragraphs in this block (if this blocks is of type text).
170    #[prost(message, repeated, tag="3")]
171    pub paragraphs: ::std::vec::Vec<Paragraph>,
172    /// Detected block type (text, image etc) for this block.
173    #[prost(enumeration="block::BlockType", tag="4")]
174    pub block_type: i32,
175    /// Confidence of the OCR results on the block. Range [0, 1].
176    #[prost(float, tag="5")]
177    pub confidence: f32,
178}
179pub mod block {
180    /// Type of a block (text, image etc) as identified by OCR.
181    #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
182    #[repr(i32)]
183    pub enum BlockType {
184        /// Unknown block type.
185        Unknown = 0,
186        /// Regular text block.
187        Text = 1,
188        /// Table block.
189        Table = 2,
190        /// Image block.
191        Picture = 3,
192        /// Horizontal/vertical line box.
193        Ruler = 4,
194        /// Barcode block.
195        Barcode = 5,
196    }
197}
198/// Structural unit of text representing a number of words in certain order.
199#[derive(Clone, PartialEq, ::prost::Message)]
200pub struct Paragraph {
201    /// Additional information detected for the paragraph.
202    #[prost(message, optional, tag="1")]
203    pub property: ::std::option::Option<text_annotation::TextProperty>,
204    /// The bounding box for the paragraph.
205    /// The vertices are in the order of top-left, top-right, bottom-right,
206    /// bottom-left. When a rotation of the bounding box is detected the rotation
207    /// is represented as around the top-left corner as defined when the text is
208    /// read in the 'natural' orientation.
209    /// For example:
210    ///   * when the text is horizontal it might look like:
211    ///      0----1
212    ///      |    |
213    ///      3----2
214    ///   * when it's rotated 180 degrees around the top-left corner it becomes:
215    ///      2----3
216    ///      |    |
217    ///      1----0
218    ///   and the vertice order will still be (0, 1, 2, 3).
219    #[prost(message, optional, tag="2")]
220    pub bounding_box: ::std::option::Option<BoundingPoly>,
221    /// List of words in this paragraph.
222    #[prost(message, repeated, tag="3")]
223    pub words: ::std::vec::Vec<Word>,
224    /// Confidence of the OCR results for the paragraph. Range [0, 1].
225    #[prost(float, tag="4")]
226    pub confidence: f32,
227}
228/// A word representation.
229#[derive(Clone, PartialEq, ::prost::Message)]
230pub struct Word {
231    /// Additional information detected for the word.
232    #[prost(message, optional, tag="1")]
233    pub property: ::std::option::Option<text_annotation::TextProperty>,
234    /// The bounding box for the word.
235    /// The vertices are in the order of top-left, top-right, bottom-right,
236    /// bottom-left. When a rotation of the bounding box is detected the rotation
237    /// is represented as around the top-left corner as defined when the text is
238    /// read in the 'natural' orientation.
239    /// For example:
240    ///   * when the text is horizontal it might look like:
241    ///      0----1
242    ///      |    |
243    ///      3----2
244    ///   * when it's rotated 180 degrees around the top-left corner it becomes:
245    ///      2----3
246    ///      |    |
247    ///      1----0
248    ///   and the vertice order will still be (0, 1, 2, 3).
249    #[prost(message, optional, tag="2")]
250    pub bounding_box: ::std::option::Option<BoundingPoly>,
251    /// List of symbols in the word.
252    /// The order of the symbols follows the natural reading order.
253    #[prost(message, repeated, tag="3")]
254    pub symbols: ::std::vec::Vec<Symbol>,
255    /// Confidence of the OCR results for the word. Range [0, 1].
256    #[prost(float, tag="4")]
257    pub confidence: f32,
258}
259/// A single symbol representation.
260#[derive(Clone, PartialEq, ::prost::Message)]
261pub struct Symbol {
262    /// Additional information detected for the symbol.
263    #[prost(message, optional, tag="1")]
264    pub property: ::std::option::Option<text_annotation::TextProperty>,
265    /// The bounding box for the symbol.
266    /// The vertices are in the order of top-left, top-right, bottom-right,
267    /// bottom-left. When a rotation of the bounding box is detected the rotation
268    /// is represented as around the top-left corner as defined when the text is
269    /// read in the 'natural' orientation.
270    /// For example:
271    ///   * when the text is horizontal it might look like:
272    ///      0----1
273    ///      |    |
274    ///      3----2
275    ///   * when it's rotated 180 degrees around the top-left corner it becomes:
276    ///      2----3
277    ///      |    |
278    ///      1----0
279    ///   and the vertice order will still be (0, 1, 2, 3).
280    #[prost(message, optional, tag="2")]
281    pub bounding_box: ::std::option::Option<BoundingPoly>,
282    /// The actual UTF-8 representation of the symbol.
283    #[prost(string, tag="3")]
284    pub text: std::string::String,
285    /// Confidence of the OCR results for the symbol. Range [0, 1].
286    #[prost(float, tag="4")]
287    pub confidence: f32,
288}
289/// Relevant information for the image from the Internet.
290#[derive(Clone, PartialEq, ::prost::Message)]
291pub struct WebDetection {
292    /// Deduced entities from similar images on the Internet.
293    #[prost(message, repeated, tag="1")]
294    pub web_entities: ::std::vec::Vec<web_detection::WebEntity>,
295    /// Fully matching images from the Internet.
296    /// Can include resized copies of the query image.
297    #[prost(message, repeated, tag="2")]
298    pub full_matching_images: ::std::vec::Vec<web_detection::WebImage>,
299    /// Partial matching images from the Internet.
300    /// Those images are similar enough to share some key-point features. For
301    /// example an original image will likely have partial matching for its crops.
302    #[prost(message, repeated, tag="3")]
303    pub partial_matching_images: ::std::vec::Vec<web_detection::WebImage>,
304    /// Web pages containing the matching images from the Internet.
305    #[prost(message, repeated, tag="4")]
306    pub pages_with_matching_images: ::std::vec::Vec<web_detection::WebPage>,
307    /// The visually similar image results.
308    #[prost(message, repeated, tag="6")]
309    pub visually_similar_images: ::std::vec::Vec<web_detection::WebImage>,
310    /// Best guess text labels for the request image.
311    #[prost(message, repeated, tag="8")]
312    pub best_guess_labels: ::std::vec::Vec<web_detection::WebLabel>,
313}
314pub mod web_detection {
315    /// Entity deduced from similar images on the Internet.
316    #[derive(Clone, PartialEq, ::prost::Message)]
317    pub struct WebEntity {
318        /// Opaque entity ID.
319        #[prost(string, tag="1")]
320        pub entity_id: std::string::String,
321        /// Overall relevancy score for the entity.
322        /// Not normalized and not comparable across different image queries.
323        #[prost(float, tag="2")]
324        pub score: f32,
325        /// Canonical description of the entity, in English.
326        #[prost(string, tag="3")]
327        pub description: std::string::String,
328    }
329    /// Metadata for online images.
330    #[derive(Clone, PartialEq, ::prost::Message)]
331    pub struct WebImage {
332        /// The result image URL.
333        #[prost(string, tag="1")]
334        pub url: std::string::String,
335        /// (Deprecated) Overall relevancy score for the image.
336        #[prost(float, tag="2")]
337        pub score: f32,
338    }
339    /// Metadata for web pages.
340    #[derive(Clone, PartialEq, ::prost::Message)]
341    pub struct WebPage {
342        /// The result web page URL.
343        #[prost(string, tag="1")]
344        pub url: std::string::String,
345        /// (Deprecated) Overall relevancy score for the web page.
346        #[prost(float, tag="2")]
347        pub score: f32,
348        /// Title for the web page, may contain HTML markups.
349        #[prost(string, tag="3")]
350        pub page_title: std::string::String,
351        /// Fully matching images on the page.
352        /// Can include resized copies of the query image.
353        #[prost(message, repeated, tag="4")]
354        pub full_matching_images: ::std::vec::Vec<WebImage>,
355        /// Partial matching images on the page.
356        /// Those images are similar enough to share some key-point features. For
357        /// example an original image will likely have partial matching for its
358        /// crops.
359        #[prost(message, repeated, tag="5")]
360        pub partial_matching_images: ::std::vec::Vec<WebImage>,
361    }
362    /// Label to provide extra metadata for the web detection.
363    #[derive(Clone, PartialEq, ::prost::Message)]
364    pub struct WebLabel {
365        /// Label for extra metadata.
366        #[prost(string, tag="1")]
367        pub label: std::string::String,
368        /// The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
369        /// For more information, see
370        /// http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
371        #[prost(string, tag="2")]
372        pub language_code: std::string::String,
373    }
374}
375/// The type of Google Cloud Vision API detection to perform, and the maximum
376/// number of results to return for that type. Multiple `Feature` objects can
377/// be specified in the `features` list.
378#[derive(Clone, PartialEq, ::prost::Message)]
379pub struct Feature {
380    /// The feature type.
381    #[prost(enumeration="feature::Type", tag="1")]
382    pub r#type: i32,
383    /// Maximum number of results of this type. Does not apply to
384    /// `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
385    #[prost(int32, tag="2")]
386    pub max_results: i32,
387    /// Model to use for the feature.
388    /// Supported values: "builtin/stable" (the default if unset) and
389    /// "builtin/latest".
390    #[prost(string, tag="3")]
391    pub model: std::string::String,
392}
393pub mod feature {
394    /// Type of Google Cloud Vision API feature to be extracted.
395    #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
396    #[repr(i32)]
397    pub enum Type {
398        /// Unspecified feature type.
399        Unspecified = 0,
400        /// Run face detection.
401        FaceDetection = 1,
402        /// Run landmark detection.
403        LandmarkDetection = 2,
404        /// Run logo detection.
405        LogoDetection = 3,
406        /// Run label detection.
407        LabelDetection = 4,
408        /// Run text detection / optical character recognition (OCR). Text detection
409        /// is optimized for areas of text within a larger image; if the image is
410        /// a document, use `DOCUMENT_TEXT_DETECTION` instead.
411        TextDetection = 5,
412        /// Run dense text document OCR. Takes precedence when both
413        /// `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
414        DocumentTextDetection = 11,
415        /// Run Safe Search to detect potentially unsafe
416        /// or undesirable content.
417        SafeSearchDetection = 6,
418        /// Compute a set of image properties, such as the
419        /// image's dominant colors.
420        ImageProperties = 7,
421        /// Run crop hints.
422        CropHints = 9,
423        /// Run web detection.
424        WebDetection = 10,
425    }
426}
427/// External image source (Google Cloud Storage or web URL image location).
428#[derive(Clone, PartialEq, ::prost::Message)]
429pub struct ImageSource {
430    /// **Use `image_uri` instead.**
431    ///
432    /// The Google Cloud Storage  URI of the form
433    /// `gs://bucket_name/object_name`. Object versioning is not supported. See
434    /// [Google Cloud Storage Request
435    /// URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
436    #[prost(string, tag="1")]
437    pub gcs_image_uri: std::string::String,
438    /// The URI of the source image. Can be either:
439    ///
440    /// 1. A Google Cloud Storage URI of the form
441    ///    `gs://bucket_name/object_name`. Object versioning is not supported. See
442    ///    [Google Cloud Storage Request
443    ///    URIs](https://cloud.google.com/storage/docs/reference-uris) for more
444    ///    info.
445    ///
446    /// 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
447    ///    HTTP/HTTPS URLs, Google cannot guarantee that the request will be
448    ///    completed. Your request may fail if the specified host denies the
449    ///    request (e.g. due to request throttling or DOS prevention), or if Google
450    ///    throttles requests to the site for abuse prevention. You should not
451    ///    depend on externally-hosted images for production applications.
452    ///
453    /// When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
454    /// precedence.
455    #[prost(string, tag="2")]
456    pub image_uri: std::string::String,
457}
458/// Client image to perform Google Cloud Vision API tasks over.
459#[derive(Clone, PartialEq, ::prost::Message)]
460pub struct Image {
461    /// Image content, represented as a stream of bytes.
462    /// Note: As with all `bytes` fields, protobuffers use a pure binary
463    /// representation, whereas JSON representations use base64.
464    #[prost(bytes, tag="1")]
465    pub content: std::vec::Vec<u8>,
466    /// Google Cloud Storage image location, or publicly-accessible image
467    /// URL. If both `content` and `source` are provided for an image, `content`
468    /// takes precedence and is used to perform the image annotation request.
469    #[prost(message, optional, tag="2")]
470    pub source: ::std::option::Option<ImageSource>,
471}
472/// A face annotation object contains the results of face detection.
473#[derive(Clone, PartialEq, ::prost::Message)]
474pub struct FaceAnnotation {
475    /// The bounding polygon around the face. The coordinates of the bounding box
476    /// are in the original image's scale, as returned in `ImageParams`.
477    /// The bounding box is computed to "frame" the face in accordance with human
478    /// expectations. It is based on the landmarker results.
479    /// Note that one or more x and/or y coordinates may not be generated in the
480    /// `BoundingPoly` (the polygon will be unbounded) if only a partial face
481    /// appears in the image to be annotated.
482    #[prost(message, optional, tag="1")]
483    pub bounding_poly: ::std::option::Option<BoundingPoly>,
484    /// The `fd_bounding_poly` bounding polygon is tighter than the
485    /// `boundingPoly`, and encloses only the skin part of the face. Typically, it
486    /// is used to eliminate the face from any image analysis that detects the
487    /// "amount of skin" visible in an image. It is not based on the
488    /// landmarker results, only on the initial face detection, hence
489    /// the <code>fd</code> (face detection) prefix.
490    #[prost(message, optional, tag="2")]
491    pub fd_bounding_poly: ::std::option::Option<BoundingPoly>,
492    /// Detected face landmarks.
493    #[prost(message, repeated, tag="3")]
494    pub landmarks: ::std::vec::Vec<face_annotation::Landmark>,
495    /// Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
496    /// of the face relative to the image vertical about the axis perpendicular to
497    /// the face. Range [-180,180].
498    #[prost(float, tag="4")]
499    pub roll_angle: f32,
500    /// Yaw angle, which indicates the leftward/rightward angle that the face is
501    /// pointing relative to the vertical plane perpendicular to the image. Range
502    /// [-180,180].
503    #[prost(float, tag="5")]
504    pub pan_angle: f32,
505    /// Pitch angle, which indicates the upwards/downwards angle that the face is
506    /// pointing relative to the image's horizontal plane. Range [-180,180].
507    #[prost(float, tag="6")]
508    pub tilt_angle: f32,
509    /// Detection confidence. Range [0, 1].
510    #[prost(float, tag="7")]
511    pub detection_confidence: f32,
512    /// Face landmarking confidence. Range [0, 1].
513    #[prost(float, tag="8")]
514    pub landmarking_confidence: f32,
515    /// Joy likelihood.
516    #[prost(enumeration="Likelihood", tag="9")]
517    pub joy_likelihood: i32,
518    /// Sorrow likelihood.
519    #[prost(enumeration="Likelihood", tag="10")]
520    pub sorrow_likelihood: i32,
521    /// Anger likelihood.
522    #[prost(enumeration="Likelihood", tag="11")]
523    pub anger_likelihood: i32,
524    /// Surprise likelihood.
525    #[prost(enumeration="Likelihood", tag="12")]
526    pub surprise_likelihood: i32,
527    /// Under-exposed likelihood.
528    #[prost(enumeration="Likelihood", tag="13")]
529    pub under_exposed_likelihood: i32,
530    /// Blurred likelihood.
531    #[prost(enumeration="Likelihood", tag="14")]
532    pub blurred_likelihood: i32,
533    /// Headwear likelihood.
534    #[prost(enumeration="Likelihood", tag="15")]
535    pub headwear_likelihood: i32,
536}
537pub mod face_annotation {
538    /// A face-specific landmark (for example, a face feature).
539    #[derive(Clone, PartialEq, ::prost::Message)]
540    pub struct Landmark {
541        /// Face landmark type.
542        #[prost(enumeration="landmark::Type", tag="3")]
543        pub r#type: i32,
544        /// Face landmark position.
545        #[prost(message, optional, tag="4")]
546        pub position: ::std::option::Option<super::Position>,
547    }
548    pub mod landmark {
549        /// Face landmark (feature) type.
550        /// Left and right are defined from the vantage of the viewer of the image
551        /// without considering mirror projections typical of photos. So, `LEFT_EYE`,
552        /// typically, is the person's right eye.
553        #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
554        #[repr(i32)]
555        pub enum Type {
556            /// Unknown face landmark detected. Should not be filled.
557            UnknownLandmark = 0,
558            /// Left eye.
559            LeftEye = 1,
560            /// Right eye.
561            RightEye = 2,
562            /// Left of left eyebrow.
563            LeftOfLeftEyebrow = 3,
564            /// Right of left eyebrow.
565            RightOfLeftEyebrow = 4,
566            /// Left of right eyebrow.
567            LeftOfRightEyebrow = 5,
568            /// Right of right eyebrow.
569            RightOfRightEyebrow = 6,
570            /// Midpoint between eyes.
571            MidpointBetweenEyes = 7,
572            /// Nose tip.
573            NoseTip = 8,
574            /// Upper lip.
575            UpperLip = 9,
576            /// Lower lip.
577            LowerLip = 10,
578            /// Mouth left.
579            MouthLeft = 11,
580            /// Mouth right.
581            MouthRight = 12,
582            /// Mouth center.
583            MouthCenter = 13,
584            /// Nose, bottom right.
585            NoseBottomRight = 14,
586            /// Nose, bottom left.
587            NoseBottomLeft = 15,
588            /// Nose, bottom center.
589            NoseBottomCenter = 16,
590            /// Left eye, top boundary.
591            LeftEyeTopBoundary = 17,
592            /// Left eye, right corner.
593            LeftEyeRightCorner = 18,
594            /// Left eye, bottom boundary.
595            LeftEyeBottomBoundary = 19,
596            /// Left eye, left corner.
597            LeftEyeLeftCorner = 20,
598            /// Right eye, top boundary.
599            RightEyeTopBoundary = 21,
600            /// Right eye, right corner.
601            RightEyeRightCorner = 22,
602            /// Right eye, bottom boundary.
603            RightEyeBottomBoundary = 23,
604            /// Right eye, left corner.
605            RightEyeLeftCorner = 24,
606            /// Left eyebrow, upper midpoint.
607            LeftEyebrowUpperMidpoint = 25,
608            /// Right eyebrow, upper midpoint.
609            RightEyebrowUpperMidpoint = 26,
610            /// Left ear tragion.
611            LeftEarTragion = 27,
612            /// Right ear tragion.
613            RightEarTragion = 28,
614            /// Left eye pupil.
615            LeftEyePupil = 29,
616            /// Right eye pupil.
617            RightEyePupil = 30,
618            /// Forehead glabella.
619            ForeheadGlabella = 31,
620            /// Chin gnathion.
621            ChinGnathion = 32,
622            /// Chin left gonion.
623            ChinLeftGonion = 33,
624            /// Chin right gonion.
625            ChinRightGonion = 34,
626        }
627    }
628}
629/// Detected entity location information.
630#[derive(Clone, PartialEq, ::prost::Message)]
631pub struct LocationInfo {
632    /// lat/long location coordinates.
633    #[prost(message, optional, tag="1")]
634    pub lat_lng: ::std::option::Option<super::super::super::r#type::LatLng>,
635}
636/// A `Property` consists of a user-supplied name/value pair.
637#[derive(Clone, PartialEq, ::prost::Message)]
638pub struct Property {
639    /// Name of the property.
640    #[prost(string, tag="1")]
641    pub name: std::string::String,
642    /// Value of the property.
643    #[prost(string, tag="2")]
644    pub value: std::string::String,
645    /// Value of numeric properties.
646    #[prost(uint64, tag="3")]
647    pub uint64_value: u64,
648}
649/// Set of detected entity features.
650#[derive(Clone, PartialEq, ::prost::Message)]
651pub struct EntityAnnotation {
652    /// Opaque entity ID. Some IDs may be available in
653    /// [Google Knowledge Graph Search
654    /// API](https://developers.google.com/knowledge-graph/).
655    #[prost(string, tag="1")]
656    pub mid: std::string::String,
657    /// The language code for the locale in which the entity textual
658    /// `description` is expressed.
659    #[prost(string, tag="2")]
660    pub locale: std::string::String,
661    /// Entity textual description, expressed in its `locale` language.
662    #[prost(string, tag="3")]
663    pub description: std::string::String,
664    /// Overall score of the result. Range [0, 1].
665    #[prost(float, tag="4")]
666    pub score: f32,
667    /// **Deprecated. Use `score` instead.**
668    /// The accuracy of the entity detection in an image.
669    /// For example, for an image in which the "Eiffel Tower" entity is detected,
670    /// this field represents the confidence that there is a tower in the query
671    /// image. Range [0, 1].
672    #[prost(float, tag="5")]
673    pub confidence: f32,
674    /// The relevancy of the ICA (Image Content Annotation) label to the
675    /// image. For example, the relevancy of "tower" is likely higher to an image
676    /// containing the detected "Eiffel Tower" than to an image containing a
677    /// detected distant towering building, even though the confidence that
678    /// there is a tower in each image may be the same. Range [0, 1].
679    #[prost(float, tag="6")]
680    pub topicality: f32,
681    /// Image region to which this entity belongs. Not produced
682    /// for `LABEL_DETECTION` features.
683    #[prost(message, optional, tag="7")]
684    pub bounding_poly: ::std::option::Option<BoundingPoly>,
685    /// The location information for the detected entity. Multiple
686    /// `LocationInfo` elements can be present because one location may
687    /// indicate the location of the scene in the image, and another location
688    /// may indicate the location of the place where the image was taken.
689    /// Location information is usually present for landmarks.
690    #[prost(message, repeated, tag="8")]
691    pub locations: ::std::vec::Vec<LocationInfo>,
692    /// Some entities may have optional user-supplied `Property` (name/value)
693    /// fields, such a score or string that qualifies the entity.
694    #[prost(message, repeated, tag="9")]
695    pub properties: ::std::vec::Vec<Property>,
696}
697/// Set of features pertaining to the image, computed by computer vision
698/// methods over safe-search verticals (for example, adult, spoof, medical,
699/// violence).
700#[derive(Clone, PartialEq, ::prost::Message)]
701pub struct SafeSearchAnnotation {
702    /// Represents the adult content likelihood for the image. Adult content may
703    /// contain elements such as nudity, pornographic images or cartoons, or
704    /// sexual activities.
705    #[prost(enumeration="Likelihood", tag="1")]
706    pub adult: i32,
707    /// Spoof likelihood. The likelihood that an modification
708    /// was made to the image's canonical version to make it appear
709    /// funny or offensive.
710    #[prost(enumeration="Likelihood", tag="2")]
711    pub spoof: i32,
712    /// Likelihood that this is a medical image.
713    #[prost(enumeration="Likelihood", tag="3")]
714    pub medical: i32,
715    /// Likelihood that this image contains violent content.
716    #[prost(enumeration="Likelihood", tag="4")]
717    pub violence: i32,
718    /// Likelihood that the request image contains racy content. Racy content may
719    /// include (but is not limited to) skimpy or sheer clothing, strategically
720    /// covered nudity, lewd or provocative poses, or close-ups of sensitive
721    /// body areas.
722    #[prost(enumeration="Likelihood", tag="9")]
723    pub racy: i32,
724}
725/// Rectangle determined by min and max `LatLng` pairs.
726#[derive(Clone, PartialEq, ::prost::Message)]
727pub struct LatLongRect {
728    /// Min lat/long pair.
729    #[prost(message, optional, tag="1")]
730    pub min_lat_lng: ::std::option::Option<super::super::super::r#type::LatLng>,
731    /// Max lat/long pair.
732    #[prost(message, optional, tag="2")]
733    pub max_lat_lng: ::std::option::Option<super::super::super::r#type::LatLng>,
734}
735/// Color information consists of RGB channels, score, and the fraction of
736/// the image that the color occupies in the image.
737#[derive(Clone, PartialEq, ::prost::Message)]
738pub struct ColorInfo {
739    /// RGB components of the color.
740    #[prost(message, optional, tag="1")]
741    pub color: ::std::option::Option<super::super::super::r#type::Color>,
742    /// Image-specific score for this color. Value in range [0, 1].
743    #[prost(float, tag="2")]
744    pub score: f32,
745    /// The fraction of pixels the color occupies in the image.
746    /// Value in range [0, 1].
747    #[prost(float, tag="3")]
748    pub pixel_fraction: f32,
749}
750/// Set of dominant colors and their corresponding scores.
751#[derive(Clone, PartialEq, ::prost::Message)]
752pub struct DominantColorsAnnotation {
753    /// RGB color values with their score and pixel fraction.
754    #[prost(message, repeated, tag="1")]
755    pub colors: ::std::vec::Vec<ColorInfo>,
756}
757/// Stores image properties, such as dominant colors.
758#[derive(Clone, PartialEq, ::prost::Message)]
759pub struct ImageProperties {
760    /// If present, dominant colors completed successfully.
761    #[prost(message, optional, tag="1")]
762    pub dominant_colors: ::std::option::Option<DominantColorsAnnotation>,
763}
764/// Single crop hint that is used to generate a new crop when serving an image.
765#[derive(Clone, PartialEq, ::prost::Message)]
766pub struct CropHint {
767    /// The bounding polygon for the crop region. The coordinates of the bounding
768    /// box are in the original image's scale, as returned in `ImageParams`.
769    #[prost(message, optional, tag="1")]
770    pub bounding_poly: ::std::option::Option<BoundingPoly>,
771    /// Confidence of this being a salient region.  Range [0, 1].
772    #[prost(float, tag="2")]
773    pub confidence: f32,
774    /// Fraction of importance of this salient region with respect to the original
775    /// image.
776    #[prost(float, tag="3")]
777    pub importance_fraction: f32,
778}
779/// Set of crop hints that are used to generate new crops when serving images.
780#[derive(Clone, PartialEq, ::prost::Message)]
781pub struct CropHintsAnnotation {
782    /// Crop hint results.
783    #[prost(message, repeated, tag="1")]
784    pub crop_hints: ::std::vec::Vec<CropHint>,
785}
786/// Parameters for crop hints annotation request.
787#[derive(Clone, PartialEq, ::prost::Message)]
788pub struct CropHintsParams {
789    /// Aspect ratios in floats, representing the ratio of the width to the height
790    /// of the image. For example, if the desired aspect ratio is 4/3, the
791    /// corresponding float value should be 1.33333.  If not specified, the
792    /// best possible crop is returned. The number of provided aspect ratios is
793    /// limited to a maximum of 16; any aspect ratios provided after the 16th are
794    /// ignored.
795    #[prost(float, repeated, tag="1")]
796    pub aspect_ratios: ::std::vec::Vec<f32>,
797}
798/// Parameters for web detection request.
799#[derive(Clone, PartialEq, ::prost::Message)]
800pub struct WebDetectionParams {
801    /// Whether to include results derived from the geo information in the image.
802    #[prost(bool, tag="2")]
803    pub include_geo_results: bool,
804}
805/// Image context and/or feature-specific parameters.
806#[derive(Clone, PartialEq, ::prost::Message)]
807pub struct ImageContext {
808    /// Not used.
809    #[prost(message, optional, tag="1")]
810    pub lat_long_rect: ::std::option::Option<LatLongRect>,
811    /// List of languages to use for TEXT_DETECTION. In most cases, an empty value
812    /// yields the best results since it enables automatic language detection. For
813    /// languages based on the Latin alphabet, setting `language_hints` is not
814    /// needed. In rare cases, when the language of the text in the image is known,
815    /// setting a hint will help get better results (although it will be a
816    /// significant hindrance if the hint is wrong). Text detection returns an
817    /// error if one or more of the specified languages is not one of the
818    /// [supported languages](https://cloud.google.com/vision/docs/languages).
819    #[prost(string, repeated, tag="2")]
820    pub language_hints: ::std::vec::Vec<std::string::String>,
821    /// Parameters for crop hints annotation request.
822    #[prost(message, optional, tag="4")]
823    pub crop_hints_params: ::std::option::Option<CropHintsParams>,
824    /// Parameters for web detection.
825    #[prost(message, optional, tag="6")]
826    pub web_detection_params: ::std::option::Option<WebDetectionParams>,
827}
828/// Request for performing Google Cloud Vision API tasks over a user-provided
829/// image, with user-requested features.
830#[derive(Clone, PartialEq, ::prost::Message)]
831pub struct AnnotateImageRequest {
832    /// The image to be processed.
833    #[prost(message, optional, tag="1")]
834    pub image: ::std::option::Option<Image>,
835    /// Requested features.
836    #[prost(message, repeated, tag="2")]
837    pub features: ::std::vec::Vec<Feature>,
838    /// Additional context that may accompany the image.
839    #[prost(message, optional, tag="3")]
840    pub image_context: ::std::option::Option<ImageContext>,
841}
842/// If an image was produced from a file (e.g. a PDF), this message gives
843/// information about the source of that image.
844#[derive(Clone, PartialEq, ::prost::Message)]
845pub struct ImageAnnotationContext {
846    /// The URI of the file used to produce the image.
847    #[prost(string, tag="1")]
848    pub uri: std::string::String,
849    /// If the file was a PDF or TIFF, this field gives the page number within
850    /// the file used to produce the image.
851    #[prost(int32, tag="2")]
852    pub page_number: i32,
853}
854/// Response to an image annotation request.
855#[derive(Clone, PartialEq, ::prost::Message)]
856pub struct AnnotateImageResponse {
857    /// If present, face detection has completed successfully.
858    #[prost(message, repeated, tag="1")]
859    pub face_annotations: ::std::vec::Vec<FaceAnnotation>,
860    /// If present, landmark detection has completed successfully.
861    #[prost(message, repeated, tag="2")]
862    pub landmark_annotations: ::std::vec::Vec<EntityAnnotation>,
863    /// If present, logo detection has completed successfully.
864    #[prost(message, repeated, tag="3")]
865    pub logo_annotations: ::std::vec::Vec<EntityAnnotation>,
866    /// If present, label detection has completed successfully.
867    #[prost(message, repeated, tag="4")]
868    pub label_annotations: ::std::vec::Vec<EntityAnnotation>,
869    /// If present, text (OCR) detection has completed successfully.
870    #[prost(message, repeated, tag="5")]
871    pub text_annotations: ::std::vec::Vec<EntityAnnotation>,
872    /// If present, text (OCR) detection or document (OCR) text detection has
873    /// completed successfully.
874    /// This annotation provides the structural hierarchy for the OCR detected
875    /// text.
876    #[prost(message, optional, tag="12")]
877    pub full_text_annotation: ::std::option::Option<TextAnnotation>,
878    /// If present, safe-search annotation has completed successfully.
879    #[prost(message, optional, tag="6")]
880    pub safe_search_annotation: ::std::option::Option<SafeSearchAnnotation>,
881    /// If present, image properties were extracted successfully.
882    #[prost(message, optional, tag="8")]
883    pub image_properties_annotation: ::std::option::Option<ImageProperties>,
884    /// If present, crop hints have completed successfully.
885    #[prost(message, optional, tag="11")]
886    pub crop_hints_annotation: ::std::option::Option<CropHintsAnnotation>,
887    /// If present, web detection has completed successfully.
888    #[prost(message, optional, tag="13")]
889    pub web_detection: ::std::option::Option<WebDetection>,
890    /// If set, represents the error message for the operation.
891    /// Note that filled-in image annotations are guaranteed to be
892    /// correct, even when `error` is set.
893    #[prost(message, optional, tag="9")]
894    pub error: ::std::option::Option<super::super::super::rpc::Status>,
895    /// If present, contextual information is needed to understand where this image
896    /// comes from.
897    #[prost(message, optional, tag="21")]
898    pub context: ::std::option::Option<ImageAnnotationContext>,
899}
900/// Response to a single file annotation request. A file may contain one or more
901/// images, which individually have their own responses.
902#[derive(Clone, PartialEq, ::prost::Message)]
903pub struct AnnotateFileResponse {
904    /// Information about the file for which this response is generated.
905    #[prost(message, optional, tag="1")]
906    pub input_config: ::std::option::Option<InputConfig>,
907    /// Individual responses to images found within the file.
908    #[prost(message, repeated, tag="2")]
909    pub responses: ::std::vec::Vec<AnnotateImageResponse>,
910}
911/// Multiple image annotation requests are batched into a single service call.
912#[derive(Clone, PartialEq, ::prost::Message)]
913pub struct BatchAnnotateImagesRequest {
914    /// Required. Individual image annotation requests for this batch.
915    #[prost(message, repeated, tag="1")]
916    pub requests: ::std::vec::Vec<AnnotateImageRequest>,
917}
918/// Response to a batch image annotation request.
919#[derive(Clone, PartialEq, ::prost::Message)]
920pub struct BatchAnnotateImagesResponse {
921    /// Individual responses to image annotation requests within the batch.
922    #[prost(message, repeated, tag="1")]
923    pub responses: ::std::vec::Vec<AnnotateImageResponse>,
924}
925/// An offline file annotation request.
926#[derive(Clone, PartialEq, ::prost::Message)]
927pub struct AsyncAnnotateFileRequest {
928    /// Required. Information about the input file.
929    #[prost(message, optional, tag="1")]
930    pub input_config: ::std::option::Option<InputConfig>,
931    /// Required. Requested features.
932    #[prost(message, repeated, tag="2")]
933    pub features: ::std::vec::Vec<Feature>,
934    /// Additional context that may accompany the image(s) in the file.
935    #[prost(message, optional, tag="3")]
936    pub image_context: ::std::option::Option<ImageContext>,
937    /// Required. The desired output location and metadata (e.g. format).
938    #[prost(message, optional, tag="4")]
939    pub output_config: ::std::option::Option<OutputConfig>,
940}
941/// The response for a single offline file annotation request.
942#[derive(Clone, PartialEq, ::prost::Message)]
943pub struct AsyncAnnotateFileResponse {
944    /// The output location and metadata from AsyncAnnotateFileRequest.
945    #[prost(message, optional, tag="1")]
946    pub output_config: ::std::option::Option<OutputConfig>,
947}
948/// Multiple async file annotation requests are batched into a single service
949/// call.
950#[derive(Clone, PartialEq, ::prost::Message)]
951pub struct AsyncBatchAnnotateFilesRequest {
952    /// Required. Individual async file annotation requests for this batch.
953    #[prost(message, repeated, tag="1")]
954    pub requests: ::std::vec::Vec<AsyncAnnotateFileRequest>,
955}
956/// Response to an async batch file annotation request.
957#[derive(Clone, PartialEq, ::prost::Message)]
958pub struct AsyncBatchAnnotateFilesResponse {
959    /// The list of file annotation responses, one for each request in
960    /// AsyncBatchAnnotateFilesRequest.
961    #[prost(message, repeated, tag="1")]
962    pub responses: ::std::vec::Vec<AsyncAnnotateFileResponse>,
963}
964/// The desired input location and metadata.
965#[derive(Clone, PartialEq, ::prost::Message)]
966pub struct InputConfig {
967    /// The Google Cloud Storage location to read the input from.
968    #[prost(message, optional, tag="1")]
969    pub gcs_source: ::std::option::Option<GcsSource>,
970    /// The type of the file. Currently only "application/pdf" and "image/tiff"
971    /// are supported. Wildcards are not supported.
972    #[prost(string, tag="2")]
973    pub mime_type: std::string::String,
974}
975/// The desired output location and metadata.
976#[derive(Clone, PartialEq, ::prost::Message)]
977pub struct OutputConfig {
978    /// The Google Cloud Storage location to write the output(s) to.
979    #[prost(message, optional, tag="1")]
980    pub gcs_destination: ::std::option::Option<GcsDestination>,
981    /// The max number of response protos to put into each output JSON file on GCS.
982    /// The valid range is [1, 100]. If not specified, the default value is 20.
983    ///
984    /// For example, for one pdf file with 100 pages, 100 response protos will
985    /// be generated. If `batch_size` = 20, then 5 json files each
986    /// containing 20 response protos will be written under the prefix
987    /// `gcs_destination`.`uri`.
988    ///
989    /// Currently, batch_size only applies to GcsDestination, with potential future
990    /// support for other output configurations.
991    #[prost(int32, tag="2")]
992    pub batch_size: i32,
993}
994/// The Google Cloud Storage location where the input will be read from.
995#[derive(Clone, PartialEq, ::prost::Message)]
996pub struct GcsSource {
997    /// Google Cloud Storage URI for the input file. This must only be a GCS
998    /// object. Wildcards are not currently supported.
999    #[prost(string, tag="1")]
1000    pub uri: std::string::String,
1001}
1002/// The Google Cloud Storage location where the output will be written to.
1003#[derive(Clone, PartialEq, ::prost::Message)]
1004pub struct GcsDestination {
1005    /// Google Cloud Storage URI where the results will be stored. Results will
1006    /// be in JSON format and preceded by its corresponding input URI. This field
1007    /// can either represent a single file, or a prefix for multiple outputs.
1008    /// Prefixes must end in a `/`.
1009    ///
1010    /// Examples:
1011    ///
1012    /// *    File: gs://bucket-name/filename.json
1013    /// *    Prefix: gs://bucket-name/prefix/here/
1014    /// *    File: gs://bucket-name/prefix/here
1015    ///
1016    /// If multiple outputs, each response is still AnnotateFileResponse, each of
1017    /// which contains some subset of the full list of AnnotateImageResponse.
1018    /// Multiple outputs can happen if, for example, the output JSON is too large
1019    /// and overflows into multiple sharded files.
1020    #[prost(string, tag="1")]
1021    pub uri: std::string::String,
1022}
1023/// Contains metadata for the BatchAnnotateImages operation.
1024#[derive(Clone, PartialEq, ::prost::Message)]
1025pub struct OperationMetadata {
1026    /// Current state of the batch operation.
1027    #[prost(enumeration="operation_metadata::State", tag="1")]
1028    pub state: i32,
1029    /// The time when the batch request was received.
1030    #[prost(message, optional, tag="5")]
1031    pub create_time: ::std::option::Option<::prost_types::Timestamp>,
1032    /// The time when the operation result was last updated.
1033    #[prost(message, optional, tag="6")]
1034    pub update_time: ::std::option::Option<::prost_types::Timestamp>,
1035}
1036pub mod operation_metadata {
1037    /// Batch operation states.
1038    #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1039    #[repr(i32)]
1040    pub enum State {
1041        /// Invalid.
1042        Unspecified = 0,
1043        /// Request is received.
1044        Created = 1,
1045        /// Request is actively being processed.
1046        Running = 2,
1047        /// The batch processing is done.
1048        Done = 3,
1049        /// The batch processing was cancelled.
1050        Cancelled = 4,
1051    }
1052}
1053/// A bucketized representation of likelihood, which is intended to give clients
1054/// highly stable results across model upgrades.
1055#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1056#[repr(i32)]
1057pub enum Likelihood {
1058    /// Unknown likelihood.
1059    Unknown = 0,
1060    /// It is very unlikely that the image belongs to the specified vertical.
1061    VeryUnlikely = 1,
1062    /// It is unlikely that the image belongs to the specified vertical.
1063    Unlikely = 2,
1064    /// It is possible that the image belongs to the specified vertical.
1065    Possible = 3,
1066    /// It is likely that the image belongs to the specified vertical.
1067    Likely = 4,
1068    /// It is very likely that the image belongs to the specified vertical.
1069    VeryLikely = 5,
1070}
1071# [ doc = r" Generated client implementations." ] pub mod image_annotator_client { # ! [ allow ( unused_variables , dead_code , missing_docs ) ] use tonic :: codegen :: * ; # [ doc = " Service that performs Google Cloud Vision API detection tasks over client" ] # [ doc = " images, such as face, landmark, logo, label, and text detection. The" ] # [ doc = " ImageAnnotator service returns detected entities from the images." ] pub struct ImageAnnotatorClient < T > { inner : tonic :: client :: Grpc < T > , } impl < T > ImageAnnotatorClient < T > where T : tonic :: client :: GrpcService < tonic :: body :: BoxBody > , T :: ResponseBody : Body + HttpBody + Send + 'static , T :: Error : Into < StdError > , < T :: ResponseBody as HttpBody > :: Error : Into < StdError > + Send , { pub fn new ( inner : T ) -> Self { let inner = tonic :: client :: Grpc :: new ( inner ) ; Self { inner } } pub fn with_interceptor ( inner : T , interceptor : impl Into < tonic :: Interceptor > ) -> Self { let inner = tonic :: client :: Grpc :: with_interceptor ( inner , interceptor ) ; Self { inner } } # [ doc = " Run image detection and annotation for a batch of images." ] pub async fn batch_annotate_images ( & mut self , request : impl tonic :: IntoRequest < super :: BatchAnnotateImagesRequest > , ) -> Result < tonic :: Response < super :: BatchAnnotateImagesResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.vision.v1p2beta1.ImageAnnotator/BatchAnnotateImages" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " Run async image detection and annotation for a list of generic files (e.g." ] # [ doc = " PDF) which may contain multiple pages and multiple images per page." ] # [ doc = " Progress and results can be retrieved through the" ] # [ doc = " `google.longrunning.Operations` interface." ] # [ doc = " `Operation.metadata` contains `OperationMetadata` (metadata)." ] # [ doc = " `Operation.response` contains `AsyncBatchAnnotateFilesResponse` (results)." ] pub async fn async_batch_annotate_files ( & mut self , request : impl tonic :: IntoRequest < super :: AsyncBatchAnnotateFilesRequest > , ) -> Result < tonic :: Response < super :: super :: super :: super :: longrunning :: Operation > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/google.cloud.vision.v1p2beta1.ImageAnnotator/AsyncBatchAnnotateFiles" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } } impl < T : Clone > Clone for ImageAnnotatorClient < T > { fn clone ( & self ) -> Self { Self { inner : self . inner . clone ( ) , } } } impl < T > std :: fmt :: Debug for ImageAnnotatorClient < T > { fn fmt ( & self , f : & mut std :: fmt :: Formatter < '_ > ) -> std :: fmt :: Result { write ! ( f , "ImageAnnotatorClient {{ ... }}" ) } } }use serde :: { Serialize , Deserialize } ;