aws_sdk_rekognition/operation/start_face_detection/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_face_detection::_start_face_detection_output::StartFaceDetectionOutputBuilder;
3
4pub use crate::operation::start_face_detection::_start_face_detection_input::StartFaceDetectionInputBuilder;
5
6impl crate::operation::start_face_detection::builders::StartFaceDetectionInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::start_face_detection::StartFaceDetectionOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::start_face_detection::StartFaceDetectionError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.start_face_detection();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `StartFaceDetection`.
24///
25/// <p>Starts asynchronous detection of faces in a stored video.</p>
26/// <p>Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use <code>Video</code> to specify the bucket name and the filename of the video. <code>StartFaceDetection</code> returns a job identifier (<code>JobId</code>) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in <code>NotificationChannel</code>. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is <code>SUCCEEDED</code>. If so, call <code>GetFaceDetection</code> and pass the job identifier (<code>JobId</code>) from the initial call to <code>StartFaceDetection</code>.</p>
27/// <p>For more information, see Detecting faces in a stored video in the Amazon Rekognition Developer Guide.</p>
28#[derive(::std::clone::Clone, ::std::fmt::Debug)]
29pub struct StartFaceDetectionFluentBuilder {
30    handle: ::std::sync::Arc<crate::client::Handle>,
31    inner: crate::operation::start_face_detection::builders::StartFaceDetectionInputBuilder,
32    config_override: ::std::option::Option<crate::config::Builder>,
33}
34impl
35    crate::client::customize::internal::CustomizableSend<
36        crate::operation::start_face_detection::StartFaceDetectionOutput,
37        crate::operation::start_face_detection::StartFaceDetectionError,
38    > for StartFaceDetectionFluentBuilder
39{
40    fn send(
41        self,
42        config_override: crate::config::Builder,
43    ) -> crate::client::customize::internal::BoxFuture<
44        crate::client::customize::internal::SendResult<
45            crate::operation::start_face_detection::StartFaceDetectionOutput,
46            crate::operation::start_face_detection::StartFaceDetectionError,
47        >,
48    > {
49        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
50    }
51}
52impl StartFaceDetectionFluentBuilder {
53    /// Creates a new `StartFaceDetectionFluentBuilder`.
54    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
55        Self {
56            handle,
57            inner: ::std::default::Default::default(),
58            config_override: ::std::option::Option::None,
59        }
60    }
61    /// Access the StartFaceDetection as a reference.
62    pub fn as_input(&self) -> &crate::operation::start_face_detection::builders::StartFaceDetectionInputBuilder {
63        &self.inner
64    }
65    /// Sends the request and returns the response.
66    ///
67    /// If an error occurs, an `SdkError` will be returned with additional details that
68    /// can be matched against.
69    ///
70    /// By default, any retryable failures will be retried twice. Retry behavior
71    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
72    /// set when configuring the client.
73    pub async fn send(
74        self,
75    ) -> ::std::result::Result<
76        crate::operation::start_face_detection::StartFaceDetectionOutput,
77        ::aws_smithy_runtime_api::client::result::SdkError<
78            crate::operation::start_face_detection::StartFaceDetectionError,
79            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
80        >,
81    > {
82        let input = self
83            .inner
84            .build()
85            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
86        let runtime_plugins = crate::operation::start_face_detection::StartFaceDetection::operation_runtime_plugins(
87            self.handle.runtime_plugins.clone(),
88            &self.handle.conf,
89            self.config_override,
90        );
91        crate::operation::start_face_detection::StartFaceDetection::orchestrate(&runtime_plugins, input).await
92    }
93
94    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
95    pub fn customize(
96        self,
97    ) -> crate::client::customize::CustomizableOperation<
98        crate::operation::start_face_detection::StartFaceDetectionOutput,
99        crate::operation::start_face_detection::StartFaceDetectionError,
100        Self,
101    > {
102        crate::client::customize::CustomizableOperation::new(self)
103    }
104    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
105        self.set_config_override(::std::option::Option::Some(config_override.into()));
106        self
107    }
108
109    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
110        self.config_override = config_override;
111        self
112    }
113    /// <p>The video in which you want to detect faces. The video must be stored in an Amazon S3 bucket.</p>
114    pub fn video(mut self, input: crate::types::Video) -> Self {
115        self.inner = self.inner.video(input);
116        self
117    }
118    /// <p>The video in which you want to detect faces. The video must be stored in an Amazon S3 bucket.</p>
119    pub fn set_video(mut self, input: ::std::option::Option<crate::types::Video>) -> Self {
120        self.inner = self.inner.set_video(input);
121        self
122    }
123    /// <p>The video in which you want to detect faces. The video must be stored in an Amazon S3 bucket.</p>
124    pub fn get_video(&self) -> &::std::option::Option<crate::types::Video> {
125        self.inner.get_video()
126    }
127    /// <p>Idempotent token used to identify the start request. If you use the same token with multiple <code>StartFaceDetection</code> requests, the same <code>JobId</code> is returned. Use <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once.</p>
128    pub fn client_request_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
129        self.inner = self.inner.client_request_token(input.into());
130        self
131    }
132    /// <p>Idempotent token used to identify the start request. If you use the same token with multiple <code>StartFaceDetection</code> requests, the same <code>JobId</code> is returned. Use <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once.</p>
133    pub fn set_client_request_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
134        self.inner = self.inner.set_client_request_token(input);
135        self
136    }
137    /// <p>Idempotent token used to identify the start request. If you use the same token with multiple <code>StartFaceDetection</code> requests, the same <code>JobId</code> is returned. Use <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once.</p>
138    pub fn get_client_request_token(&self) -> &::std::option::Option<::std::string::String> {
139        self.inner.get_client_request_token()
140    }
141    /// <p>The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy.</p>
142    pub fn notification_channel(mut self, input: crate::types::NotificationChannel) -> Self {
143        self.inner = self.inner.notification_channel(input);
144        self
145    }
146    /// <p>The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy.</p>
147    pub fn set_notification_channel(mut self, input: ::std::option::Option<crate::types::NotificationChannel>) -> Self {
148        self.inner = self.inner.set_notification_channel(input);
149        self
150    }
151    /// <p>The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy.</p>
152    pub fn get_notification_channel(&self) -> &::std::option::Option<crate::types::NotificationChannel> {
153        self.inner.get_notification_channel()
154    }
155    /// <p>The face attributes you want returned.</p>
156    /// <p><code>DEFAULT</code> - The following subset of facial attributes are returned: BoundingBox, Confidence, Pose, Quality and Landmarks.</p>
157    /// <p><code>ALL</code> - All facial attributes are returned.</p>
158    pub fn face_attributes(mut self, input: crate::types::FaceAttributes) -> Self {
159        self.inner = self.inner.face_attributes(input);
160        self
161    }
162    /// <p>The face attributes you want returned.</p>
163    /// <p><code>DEFAULT</code> - The following subset of facial attributes are returned: BoundingBox, Confidence, Pose, Quality and Landmarks.</p>
164    /// <p><code>ALL</code> - All facial attributes are returned.</p>
165    pub fn set_face_attributes(mut self, input: ::std::option::Option<crate::types::FaceAttributes>) -> Self {
166        self.inner = self.inner.set_face_attributes(input);
167        self
168    }
169    /// <p>The face attributes you want returned.</p>
170    /// <p><code>DEFAULT</code> - The following subset of facial attributes are returned: BoundingBox, Confidence, Pose, Quality and Landmarks.</p>
171    /// <p><code>ALL</code> - All facial attributes are returned.</p>
172    pub fn get_face_attributes(&self) -> &::std::option::Option<crate::types::FaceAttributes> {
173        self.inner.get_face_attributes()
174    }
175    /// <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
176    pub fn job_tag(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
177        self.inner = self.inner.job_tag(input.into());
178        self
179    }
180    /// <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
181    pub fn set_job_tag(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
182        self.inner = self.inner.set_job_tag(input);
183        self
184    }
185    /// <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
186    pub fn get_job_tag(&self) -> &::std::option::Option<::std::string::String> {
187        self.inner.get_job_tag()
188    }
189}