1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
pub struct IndexFacesOutput {
    /// <p>An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. </p>
    pub face_records: ::std::option::Option<::std::vec::Vec<crate::types::FaceRecord>>,
    /// <p>If your collection is associated with a face detection model that's later than version 3.0, the value of <code>OrientationCorrection</code> is always null and no orientation information is returned.</p>
    /// <p>If your collection is associated with a face detection model that's version 3.0 or earlier, the following applies:</p>
    /// <ul>
    /// <li> <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction - the bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. The value of <code>OrientationCorrection</code> is null.</p> </li>
    /// <li> <p>If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction for images. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.</p> </li>
    /// </ul>
    /// <p>Bounding box information is returned in the <code>FaceRecords</code> array. You can get the version of the face detection model by calling <code>DescribeCollection</code>. </p>
    pub orientation_correction: ::std::option::Option<crate::types::OrientationCorrection>,
    /// <p>The version number of the face detection model that's associated with the input collection (<code>CollectionId</code>).</p>
    pub face_model_version: ::std::option::Option<::std::string::String>,
    /// <p>An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality filter identified them as low quality, or the <code>MaxFaces</code> request parameter filtered them out. To use the quality filter, you specify the <code>QualityFilter</code> request parameter.</p>
    pub unindexed_faces: ::std::option::Option<::std::vec::Vec<crate::types::UnindexedFace>>,
    _request_id: Option<String>,
}
impl IndexFacesOutput {
    /// <p>An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. </p>
    ///
    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.face_records.is_none()`.
    pub fn face_records(&self) -> &[crate::types::FaceRecord] {
        self.face_records.as_deref().unwrap_or_default()
    }
    /// <p>If your collection is associated with a face detection model that's later than version 3.0, the value of <code>OrientationCorrection</code> is always null and no orientation information is returned.</p>
    /// <p>If your collection is associated with a face detection model that's version 3.0 or earlier, the following applies:</p>
    /// <ul>
    /// <li> <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction - the bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. The value of <code>OrientationCorrection</code> is null.</p> </li>
    /// <li> <p>If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction for images. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.</p> </li>
    /// </ul>
    /// <p>Bounding box information is returned in the <code>FaceRecords</code> array. You can get the version of the face detection model by calling <code>DescribeCollection</code>. </p>
    pub fn orientation_correction(&self) -> ::std::option::Option<&crate::types::OrientationCorrection> {
        self.orientation_correction.as_ref()
    }
    /// <p>The version number of the face detection model that's associated with the input collection (<code>CollectionId</code>).</p>
    pub fn face_model_version(&self) -> ::std::option::Option<&str> {
        self.face_model_version.as_deref()
    }
    /// <p>An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality filter identified them as low quality, or the <code>MaxFaces</code> request parameter filtered them out. To use the quality filter, you specify the <code>QualityFilter</code> request parameter.</p>
    ///
    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.unindexed_faces.is_none()`.
    pub fn unindexed_faces(&self) -> &[crate::types::UnindexedFace] {
        self.unindexed_faces.as_deref().unwrap_or_default()
    }
}
impl ::aws_types::request_id::RequestId for IndexFacesOutput {
    fn request_id(&self) -> Option<&str> {
        self._request_id.as_deref()
    }
}
impl IndexFacesOutput {
    /// Creates a new builder-style object to manufacture [`IndexFacesOutput`](crate::operation::index_faces::IndexFacesOutput).
    pub fn builder() -> crate::operation::index_faces::builders::IndexFacesOutputBuilder {
        crate::operation::index_faces::builders::IndexFacesOutputBuilder::default()
    }
}

/// A builder for [`IndexFacesOutput`](crate::operation::index_faces::IndexFacesOutput).
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
pub struct IndexFacesOutputBuilder {
    pub(crate) face_records: ::std::option::Option<::std::vec::Vec<crate::types::FaceRecord>>,
    pub(crate) orientation_correction: ::std::option::Option<crate::types::OrientationCorrection>,
    pub(crate) face_model_version: ::std::option::Option<::std::string::String>,
    pub(crate) unindexed_faces: ::std::option::Option<::std::vec::Vec<crate::types::UnindexedFace>>,
    _request_id: Option<String>,
}
impl IndexFacesOutputBuilder {
    /// Appends an item to `face_records`.
    ///
    /// To override the contents of this collection use [`set_face_records`](Self::set_face_records).
    ///
    /// <p>An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. </p>
    pub fn face_records(mut self, input: crate::types::FaceRecord) -> Self {
        let mut v = self.face_records.unwrap_or_default();
        v.push(input);
        self.face_records = ::std::option::Option::Some(v);
        self
    }
    /// <p>An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. </p>
    pub fn set_face_records(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::FaceRecord>>) -> Self {
        self.face_records = input;
        self
    }
    /// <p>An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. </p>
    pub fn get_face_records(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::FaceRecord>> {
        &self.face_records
    }
    /// <p>If your collection is associated with a face detection model that's later than version 3.0, the value of <code>OrientationCorrection</code> is always null and no orientation information is returned.</p>
    /// <p>If your collection is associated with a face detection model that's version 3.0 or earlier, the following applies:</p>
    /// <ul>
    /// <li> <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction - the bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. The value of <code>OrientationCorrection</code> is null.</p> </li>
    /// <li> <p>If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction for images. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.</p> </li>
    /// </ul>
    /// <p>Bounding box information is returned in the <code>FaceRecords</code> array. You can get the version of the face detection model by calling <code>DescribeCollection</code>. </p>
    pub fn orientation_correction(mut self, input: crate::types::OrientationCorrection) -> Self {
        self.orientation_correction = ::std::option::Option::Some(input);
        self
    }
    /// <p>If your collection is associated with a face detection model that's later than version 3.0, the value of <code>OrientationCorrection</code> is always null and no orientation information is returned.</p>
    /// <p>If your collection is associated with a face detection model that's version 3.0 or earlier, the following applies:</p>
    /// <ul>
    /// <li> <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction - the bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. The value of <code>OrientationCorrection</code> is null.</p> </li>
    /// <li> <p>If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction for images. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.</p> </li>
    /// </ul>
    /// <p>Bounding box information is returned in the <code>FaceRecords</code> array. You can get the version of the face detection model by calling <code>DescribeCollection</code>. </p>
    pub fn set_orientation_correction(mut self, input: ::std::option::Option<crate::types::OrientationCorrection>) -> Self {
        self.orientation_correction = input;
        self
    }
    /// <p>If your collection is associated with a face detection model that's later than version 3.0, the value of <code>OrientationCorrection</code> is always null and no orientation information is returned.</p>
    /// <p>If your collection is associated with a face detection model that's version 3.0 or earlier, the following applies:</p>
    /// <ul>
    /// <li> <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction - the bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. The value of <code>OrientationCorrection</code> is null.</p> </li>
    /// <li> <p>If the image doesn't contain orientation information in its Exif metadata, Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction for images. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.</p> </li>
    /// </ul>
    /// <p>Bounding box information is returned in the <code>FaceRecords</code> array. You can get the version of the face detection model by calling <code>DescribeCollection</code>. </p>
    pub fn get_orientation_correction(&self) -> &::std::option::Option<crate::types::OrientationCorrection> {
        &self.orientation_correction
    }
    /// <p>The version number of the face detection model that's associated with the input collection (<code>CollectionId</code>).</p>
    pub fn face_model_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.face_model_version = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>The version number of the face detection model that's associated with the input collection (<code>CollectionId</code>).</p>
    pub fn set_face_model_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.face_model_version = input;
        self
    }
    /// <p>The version number of the face detection model that's associated with the input collection (<code>CollectionId</code>).</p>
    pub fn get_face_model_version(&self) -> &::std::option::Option<::std::string::String> {
        &self.face_model_version
    }
    /// Appends an item to `unindexed_faces`.
    ///
    /// To override the contents of this collection use [`set_unindexed_faces`](Self::set_unindexed_faces).
    ///
    /// <p>An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality filter identified them as low quality, or the <code>MaxFaces</code> request parameter filtered them out. To use the quality filter, you specify the <code>QualityFilter</code> request parameter.</p>
    pub fn unindexed_faces(mut self, input: crate::types::UnindexedFace) -> Self {
        let mut v = self.unindexed_faces.unwrap_or_default();
        v.push(input);
        self.unindexed_faces = ::std::option::Option::Some(v);
        self
    }
    /// <p>An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality filter identified them as low quality, or the <code>MaxFaces</code> request parameter filtered them out. To use the quality filter, you specify the <code>QualityFilter</code> request parameter.</p>
    pub fn set_unindexed_faces(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::UnindexedFace>>) -> Self {
        self.unindexed_faces = input;
        self
    }
    /// <p>An array of faces that were detected in the image but weren't indexed. They weren't indexed because the quality filter identified them as low quality, or the <code>MaxFaces</code> request parameter filtered them out. To use the quality filter, you specify the <code>QualityFilter</code> request parameter.</p>
    pub fn get_unindexed_faces(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::UnindexedFace>> {
        &self.unindexed_faces
    }
    pub(crate) fn _request_id(mut self, request_id: impl Into<String>) -> Self {
        self._request_id = Some(request_id.into());
        self
    }

    pub(crate) fn _set_request_id(&mut self, request_id: Option<String>) -> &mut Self {
        self._request_id = request_id;
        self
    }
    /// Consumes the builder and constructs a [`IndexFacesOutput`](crate::operation::index_faces::IndexFacesOutput).
    pub fn build(self) -> crate::operation::index_faces::IndexFacesOutput {
        crate::operation::index_faces::IndexFacesOutput {
            face_records: self.face_records,
            orientation_correction: self.orientation_correction,
            face_model_version: self.face_model_version,
            unindexed_faces: self.unindexed_faces,
            _request_id: self._request_id,
        }
    }
}