aws_sdk_rekognition/operation/index_faces/
_index_faces_input.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
5pub struct IndexFacesInput {
6    /// <p>The ID of an existing collection to which you want to add the faces that are detected in the input images.</p>
7    pub collection_id: ::std::option::Option<::std::string::String>,
8    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.</p>
9    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
10    pub image: ::std::option::Option<crate::types::Image>,
11    /// <p>The ID you want to assign to all the faces detected in the image.</p>
12    pub external_image_id: ::std::option::Option<::std::string::String>,
13    /// <p>An array of facial attributes you want to be returned. A <code>DEFAULT</code> subset of facial attributes - <code>BoundingBox</code>, <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and <code>Landmarks</code> - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using <code>\["DEFAULT", "FACE_OCCLUDED"\]</code> or just <code>\["FACE_OCCLUDED"\]</code>. You can request for all facial attributes by using <code>\["ALL"\]</code>. Requesting more attributes may increase response time.</p>
14    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).</p>
15    pub detection_attributes: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>,
16    /// <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100 detected faces in an image, even if you specify a larger value for <code>MaxFaces</code>.</p>
17    /// <p>If <code>IndexFaces</code> detects more faces than the value of <code>MaxFaces</code>, the faces with the lowest quality are filtered out first. If there are still more faces than the value of <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered out (up to the number that's needed to satisfy the value of <code>MaxFaces</code>). Information about the unindexed faces is available in the <code>UnindexedFaces</code> array.</p>
18    /// <p>The faces that are returned by <code>IndexFaces</code> are sorted by the largest face bounding box size to the smallest size, in descending order.</p>
19    /// <p><code>MaxFaces</code> can be used with a collection associated with any version of the face model.</p>
20    pub max_faces: ::std::option::Option<i32>,
21    /// <p>A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that don’t meet the chosen quality bar. The default value is <code>AUTO</code>. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, or a face with a pose that's too extreme to use. If you specify <code>NONE</code>, no filtering is performed.</p>
22    /// <p>To use quality filtering, the collection you are using must be associated with version 3 of the face model or higher.</p>
23    pub quality_filter: ::std::option::Option<crate::types::QualityFilter>,
24}
25impl IndexFacesInput {
26    /// <p>The ID of an existing collection to which you want to add the faces that are detected in the input images.</p>
27    pub fn collection_id(&self) -> ::std::option::Option<&str> {
28        self.collection_id.as_deref()
29    }
30    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.</p>
31    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
32    pub fn image(&self) -> ::std::option::Option<&crate::types::Image> {
33        self.image.as_ref()
34    }
35    /// <p>The ID you want to assign to all the faces detected in the image.</p>
36    pub fn external_image_id(&self) -> ::std::option::Option<&str> {
37        self.external_image_id.as_deref()
38    }
39    /// <p>An array of facial attributes you want to be returned. A <code>DEFAULT</code> subset of facial attributes - <code>BoundingBox</code>, <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and <code>Landmarks</code> - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using <code>\["DEFAULT", "FACE_OCCLUDED"\]</code> or just <code>\["FACE_OCCLUDED"\]</code>. You can request for all facial attributes by using <code>\["ALL"\]</code>. Requesting more attributes may increase response time.</p>
40    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).</p>
41    ///
42    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.detection_attributes.is_none()`.
43    pub fn detection_attributes(&self) -> &[crate::types::Attribute] {
44        self.detection_attributes.as_deref().unwrap_or_default()
45    }
46    /// <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100 detected faces in an image, even if you specify a larger value for <code>MaxFaces</code>.</p>
47    /// <p>If <code>IndexFaces</code> detects more faces than the value of <code>MaxFaces</code>, the faces with the lowest quality are filtered out first. If there are still more faces than the value of <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered out (up to the number that's needed to satisfy the value of <code>MaxFaces</code>). Information about the unindexed faces is available in the <code>UnindexedFaces</code> array.</p>
48    /// <p>The faces that are returned by <code>IndexFaces</code> are sorted by the largest face bounding box size to the smallest size, in descending order.</p>
49    /// <p><code>MaxFaces</code> can be used with a collection associated with any version of the face model.</p>
50    pub fn max_faces(&self) -> ::std::option::Option<i32> {
51        self.max_faces
52    }
53    /// <p>A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that don’t meet the chosen quality bar. The default value is <code>AUTO</code>. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, or a face with a pose that's too extreme to use. If you specify <code>NONE</code>, no filtering is performed.</p>
54    /// <p>To use quality filtering, the collection you are using must be associated with version 3 of the face model or higher.</p>
55    pub fn quality_filter(&self) -> ::std::option::Option<&crate::types::QualityFilter> {
56        self.quality_filter.as_ref()
57    }
58}
59impl IndexFacesInput {
60    /// Creates a new builder-style object to manufacture [`IndexFacesInput`](crate::operation::index_faces::IndexFacesInput).
61    pub fn builder() -> crate::operation::index_faces::builders::IndexFacesInputBuilder {
62        crate::operation::index_faces::builders::IndexFacesInputBuilder::default()
63    }
64}
65
66/// A builder for [`IndexFacesInput`](crate::operation::index_faces::IndexFacesInput).
67#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
68#[non_exhaustive]
69pub struct IndexFacesInputBuilder {
70    pub(crate) collection_id: ::std::option::Option<::std::string::String>,
71    pub(crate) image: ::std::option::Option<crate::types::Image>,
72    pub(crate) external_image_id: ::std::option::Option<::std::string::String>,
73    pub(crate) detection_attributes: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>,
74    pub(crate) max_faces: ::std::option::Option<i32>,
75    pub(crate) quality_filter: ::std::option::Option<crate::types::QualityFilter>,
76}
77impl IndexFacesInputBuilder {
78    /// <p>The ID of an existing collection to which you want to add the faces that are detected in the input images.</p>
79    /// This field is required.
80    pub fn collection_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
81        self.collection_id = ::std::option::Option::Some(input.into());
82        self
83    }
84    /// <p>The ID of an existing collection to which you want to add the faces that are detected in the input images.</p>
85    pub fn set_collection_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
86        self.collection_id = input;
87        self
88    }
89    /// <p>The ID of an existing collection to which you want to add the faces that are detected in the input images.</p>
90    pub fn get_collection_id(&self) -> &::std::option::Option<::std::string::String> {
91        &self.collection_id
92    }
93    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.</p>
94    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
95    /// This field is required.
96    pub fn image(mut self, input: crate::types::Image) -> Self {
97        self.image = ::std::option::Option::Some(input);
98        self
99    }
100    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.</p>
101    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
102    pub fn set_image(mut self, input: ::std::option::Option<crate::types::Image>) -> Self {
103        self.image = input;
104        self
105    }
106    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes isn't supported.</p>
107    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
108    pub fn get_image(&self) -> &::std::option::Option<crate::types::Image> {
109        &self.image
110    }
111    /// <p>The ID you want to assign to all the faces detected in the image.</p>
112    pub fn external_image_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
113        self.external_image_id = ::std::option::Option::Some(input.into());
114        self
115    }
116    /// <p>The ID you want to assign to all the faces detected in the image.</p>
117    pub fn set_external_image_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
118        self.external_image_id = input;
119        self
120    }
121    /// <p>The ID you want to assign to all the faces detected in the image.</p>
122    pub fn get_external_image_id(&self) -> &::std::option::Option<::std::string::String> {
123        &self.external_image_id
124    }
125    /// Appends an item to `detection_attributes`.
126    ///
127    /// To override the contents of this collection use [`set_detection_attributes`](Self::set_detection_attributes).
128    ///
129    /// <p>An array of facial attributes you want to be returned. A <code>DEFAULT</code> subset of facial attributes - <code>BoundingBox</code>, <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and <code>Landmarks</code> - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using <code>\["DEFAULT", "FACE_OCCLUDED"\]</code> or just <code>\["FACE_OCCLUDED"\]</code>. You can request for all facial attributes by using <code>\["ALL"\]</code>. Requesting more attributes may increase response time.</p>
130    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).</p>
131    pub fn detection_attributes(mut self, input: crate::types::Attribute) -> Self {
132        let mut v = self.detection_attributes.unwrap_or_default();
133        v.push(input);
134        self.detection_attributes = ::std::option::Option::Some(v);
135        self
136    }
137    /// <p>An array of facial attributes you want to be returned. A <code>DEFAULT</code> subset of facial attributes - <code>BoundingBox</code>, <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and <code>Landmarks</code> - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using <code>\["DEFAULT", "FACE_OCCLUDED"\]</code> or just <code>\["FACE_OCCLUDED"\]</code>. You can request for all facial attributes by using <code>\["ALL"\]</code>. Requesting more attributes may increase response time.</p>
138    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).</p>
139    pub fn set_detection_attributes(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>) -> Self {
140        self.detection_attributes = input;
141        self
142    }
143    /// <p>An array of facial attributes you want to be returned. A <code>DEFAULT</code> subset of facial attributes - <code>BoundingBox</code>, <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and <code>Landmarks</code> - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using <code>\["DEFAULT", "FACE_OCCLUDED"\]</code> or just <code>\["FACE_OCCLUDED"\]</code>. You can request for all facial attributes by using <code>\["ALL"\]</code>. Requesting more attributes may increase response time.</p>
144    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical AND operator to determine which attributes to return (in this case, all attributes).</p>
145    pub fn get_detection_attributes(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Attribute>> {
146        &self.detection_attributes
147    }
148    /// <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100 detected faces in an image, even if you specify a larger value for <code>MaxFaces</code>.</p>
149    /// <p>If <code>IndexFaces</code> detects more faces than the value of <code>MaxFaces</code>, the faces with the lowest quality are filtered out first. If there are still more faces than the value of <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered out (up to the number that's needed to satisfy the value of <code>MaxFaces</code>). Information about the unindexed faces is available in the <code>UnindexedFaces</code> array.</p>
150    /// <p>The faces that are returned by <code>IndexFaces</code> are sorted by the largest face bounding box size to the smallest size, in descending order.</p>
151    /// <p><code>MaxFaces</code> can be used with a collection associated with any version of the face model.</p>
152    pub fn max_faces(mut self, input: i32) -> Self {
153        self.max_faces = ::std::option::Option::Some(input);
154        self
155    }
156    /// <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100 detected faces in an image, even if you specify a larger value for <code>MaxFaces</code>.</p>
157    /// <p>If <code>IndexFaces</code> detects more faces than the value of <code>MaxFaces</code>, the faces with the lowest quality are filtered out first. If there are still more faces than the value of <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered out (up to the number that's needed to satisfy the value of <code>MaxFaces</code>). Information about the unindexed faces is available in the <code>UnindexedFaces</code> array.</p>
158    /// <p>The faces that are returned by <code>IndexFaces</code> are sorted by the largest face bounding box size to the smallest size, in descending order.</p>
159    /// <p><code>MaxFaces</code> can be used with a collection associated with any version of the face model.</p>
160    pub fn set_max_faces(mut self, input: ::std::option::Option<i32>) -> Self {
161        self.max_faces = input;
162        self
163    }
164    /// <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100 detected faces in an image, even if you specify a larger value for <code>MaxFaces</code>.</p>
165    /// <p>If <code>IndexFaces</code> detects more faces than the value of <code>MaxFaces</code>, the faces with the lowest quality are filtered out first. If there are still more faces than the value of <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered out (up to the number that's needed to satisfy the value of <code>MaxFaces</code>). Information about the unindexed faces is available in the <code>UnindexedFaces</code> array.</p>
166    /// <p>The faces that are returned by <code>IndexFaces</code> are sorted by the largest face bounding box size to the smallest size, in descending order.</p>
167    /// <p><code>MaxFaces</code> can be used with a collection associated with any version of the face model.</p>
168    pub fn get_max_faces(&self) -> &::std::option::Option<i32> {
169        &self.max_faces
170    }
171    /// <p>A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that don’t meet the chosen quality bar. The default value is <code>AUTO</code>. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, or a face with a pose that's too extreme to use. If you specify <code>NONE</code>, no filtering is performed.</p>
172    /// <p>To use quality filtering, the collection you are using must be associated with version 3 of the face model or higher.</p>
173    pub fn quality_filter(mut self, input: crate::types::QualityFilter) -> Self {
174        self.quality_filter = ::std::option::Option::Some(input);
175        self
176    }
177    /// <p>A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that don’t meet the chosen quality bar. The default value is <code>AUTO</code>. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, or a face with a pose that's too extreme to use. If you specify <code>NONE</code>, no filtering is performed.</p>
178    /// <p>To use quality filtering, the collection you are using must be associated with version 3 of the face model or higher.</p>
179    pub fn set_quality_filter(mut self, input: ::std::option::Option<crate::types::QualityFilter>) -> Self {
180        self.quality_filter = input;
181        self
182    }
183    /// <p>A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that don’t meet the chosen quality bar. The default value is <code>AUTO</code>. The quality bar is based on a variety of common use cases. Low-quality detections can occur for a number of reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, or a face with a pose that's too extreme to use. If you specify <code>NONE</code>, no filtering is performed.</p>
184    /// <p>To use quality filtering, the collection you are using must be associated with version 3 of the face model or higher.</p>
185    pub fn get_quality_filter(&self) -> &::std::option::Option<crate::types::QualityFilter> {
186        &self.quality_filter
187    }
188    /// Consumes the builder and constructs a [`IndexFacesInput`](crate::operation::index_faces::IndexFacesInput).
189    pub fn build(self) -> ::std::result::Result<crate::operation::index_faces::IndexFacesInput, ::aws_smithy_types::error::operation::BuildError> {
190        ::std::result::Result::Ok(crate::operation::index_faces::IndexFacesInput {
191            collection_id: self.collection_id,
192            image: self.image,
193            external_image_id: self.external_image_id,
194            detection_attributes: self.detection_attributes,
195            max_faces: self.max_faces,
196            quality_filter: self.quality_filter,
197        })
198    }
199}