1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
pub struct DetectFacesInput {
    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
    pub image: ::std::option::Option<crate::types::Image>,
    /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
    /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
    pub attributes: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>,
}
impl DetectFacesInput {
    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
    pub fn image(&self) -> ::std::option::Option<&crate::types::Image> {
        self.image.as_ref()
    }
    /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
    /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
    ///
    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.attributes.is_none()`.
    pub fn attributes(&self) -> &[crate::types::Attribute] {
        self.attributes.as_deref().unwrap_or_default()
    }
}
impl DetectFacesInput {
    /// Creates a new builder-style object to manufacture [`DetectFacesInput`](crate::operation::detect_faces::DetectFacesInput).
    pub fn builder() -> crate::operation::detect_faces::builders::DetectFacesInputBuilder {
        crate::operation::detect_faces::builders::DetectFacesInputBuilder::default()
    }
}

/// A builder for [`DetectFacesInput`](crate::operation::detect_faces::DetectFacesInput).
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
#[non_exhaustive]
pub struct DetectFacesInputBuilder {
    pub(crate) image: ::std::option::Option<crate::types::Image>,
    pub(crate) attributes: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>,
}
impl DetectFacesInputBuilder {
    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
    /// This field is required.
    pub fn image(mut self, input: crate::types::Image) -> Self {
        self.image = ::std::option::Option::Some(input);
        self
    }
    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
    pub fn set_image(mut self, input: ::std::option::Option<crate::types::Image>) -> Self {
        self.image = input;
        self
    }
    /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
    /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
    pub fn get_image(&self) -> &::std::option::Option<crate::types::Image> {
        &self.image
    }
    /// Appends an item to `attributes`.
    ///
    /// To override the contents of this collection use [`set_attributes`](Self::set_attributes).
    ///
    /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
    /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
    pub fn attributes(mut self, input: crate::types::Attribute) -> Self {
        let mut v = self.attributes.unwrap_or_default();
        v.push(input);
        self.attributes = ::std::option::Option::Some(v);
        self
    }
    /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
    /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
    pub fn set_attributes(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>) -> Self {
        self.attributes = input;
        self
    }
    /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
    /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
    /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
    pub fn get_attributes(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Attribute>> {
        &self.attributes
    }
    /// Consumes the builder and constructs a [`DetectFacesInput`](crate::operation::detect_faces::DetectFacesInput).
    pub fn build(self) -> ::std::result::Result<crate::operation::detect_faces::DetectFacesInput, ::aws_smithy_types::error::operation::BuildError> {
        ::std::result::Result::Ok(crate::operation::detect_faces::DetectFacesInput {
            image: self.image,
            attributes: self.attributes,
        })
    }
}