aws_sdk_rekognition/operation/detect_faces/_detect_faces_input.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
5pub struct DetectFacesInput {
6 /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
7 /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
8 pub image: ::std::option::Option<crate::types::Image>,
9 /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
10 /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
11 /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
12 pub attributes: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>,
13}
14impl DetectFacesInput {
15 /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
16 /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
17 pub fn image(&self) -> ::std::option::Option<&crate::types::Image> {
18 self.image.as_ref()
19 }
20 /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
21 /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
22 /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
23 ///
24 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.attributes.is_none()`.
25 pub fn attributes(&self) -> &[crate::types::Attribute] {
26 self.attributes.as_deref().unwrap_or_default()
27 }
28}
29impl DetectFacesInput {
30 /// Creates a new builder-style object to manufacture [`DetectFacesInput`](crate::operation::detect_faces::DetectFacesInput).
31 pub fn builder() -> crate::operation::detect_faces::builders::DetectFacesInputBuilder {
32 crate::operation::detect_faces::builders::DetectFacesInputBuilder::default()
33 }
34}
35
36/// A builder for [`DetectFacesInput`](crate::operation::detect_faces::DetectFacesInput).
37#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
38#[non_exhaustive]
39pub struct DetectFacesInputBuilder {
40 pub(crate) image: ::std::option::Option<crate::types::Image>,
41 pub(crate) attributes: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>,
42}
43impl DetectFacesInputBuilder {
44 /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
45 /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
46 /// This field is required.
47 pub fn image(mut self, input: crate::types::Image) -> Self {
48 self.image = ::std::option::Option::Some(input);
49 self
50 }
51 /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
52 /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
53 pub fn set_image(mut self, input: ::std::option::Option<crate::types::Image>) -> Self {
54 self.image = input;
55 self
56 }
57 /// <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.</p>
58 /// <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p>
59 pub fn get_image(&self) -> &::std::option::Option<crate::types::Image> {
60 &self.image
61 }
62 /// Appends an item to `attributes`.
63 ///
64 /// To override the contents of this collection use [`set_attributes`](Self::set_attributes).
65 ///
66 /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
67 /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
68 /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
69 pub fn attributes(mut self, input: crate::types::Attribute) -> Self {
70 let mut v = self.attributes.unwrap_or_default();
71 v.push(input);
72 self.attributes = ::std::option::Option::Some(v);
73 self
74 }
75 /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
76 /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
77 /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
78 pub fn set_attributes(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Attribute>>) -> Self {
79 self.attributes = input;
80 self
81 }
82 /// <p>An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using \["DEFAULT", "FACE_OCCLUDED"\] or just \["FACE_OCCLUDED"\]. You can request for all facial attributes by using \["ALL"\]. Requesting more attributes may increase response time.</p>
83 /// <p>If you provide both, <code>\["ALL", "DEFAULT"\]</code>, the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).</p>
84 /// <p>Note that while the FaceOccluded and EyeDirection attributes are supported when using <code>DetectFaces</code>, they aren't supported when analyzing videos with <code>StartFaceDetection</code> and <code>GetFaceDetection</code>.</p>
85 pub fn get_attributes(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Attribute>> {
86 &self.attributes
87 }
88 /// Consumes the builder and constructs a [`DetectFacesInput`](crate::operation::detect_faces::DetectFacesInput).
89 pub fn build(self) -> ::std::result::Result<crate::operation::detect_faces::DetectFacesInput, ::aws_smithy_types::error::operation::BuildError> {
90 ::std::result::Result::Ok(crate::operation::detect_faces::DetectFacesInput {
91 image: self.image,
92 attributes: self.attributes,
93 })
94 }
95}