aws_sdk_rekognition/operation/get_segment_detection/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::get_segment_detection::_get_segment_detection_output::GetSegmentDetectionOutputBuilder;
3
4pub use crate::operation::get_segment_detection::_get_segment_detection_input::GetSegmentDetectionInputBuilder;
5
6impl crate::operation::get_segment_detection::builders::GetSegmentDetectionInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::get_segment_detection::GetSegmentDetectionOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::get_segment_detection::GetSegmentDetectionError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.get_segment_detection();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `GetSegmentDetection`.
24///
25/// <p>Gets the segment detection results of a Amazon Rekognition Video analysis started by <code>StartSegmentDetection</code>.</p>
26/// <p>Segment detection with Amazon Rekognition Video is an asynchronous operation. You start segment detection by calling <code>StartSegmentDetection</code> which returns a job identifier (<code>JobId</code>). When the segment detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to <code>StartSegmentDetection</code>. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is <code>SUCCEEDED</code>. if so, call <code>GetSegmentDetection</code> and pass the job identifier (<code>JobId</code>) from the initial call of <code>StartSegmentDetection</code>.</p>
27/// <p><code>GetSegmentDetection</code> returns detected segments in an array (<code>Segments</code>) of <code>SegmentDetection</code> objects. <code>Segments</code> is sorted by the segment types specified in the <code>SegmentTypes</code> input parameter of <code>StartSegmentDetection</code>. Each element of the array includes the detected segment, the precentage confidence in the acuracy of the detected segment, the type of the segment, and the frame in which the segment was detected.</p>
28/// <p>Use <code>SelectedSegmentTypes</code> to find out the type of segment detection requested in the call to <code>StartSegmentDetection</code>.</p>
29/// <p>Use the <code>MaxResults</code> parameter to limit the number of segment detections returned. If there are more results than specified in <code>MaxResults</code>, the value of <code>NextToken</code> in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call <code>GetSegmentDetection</code> and populate the <code>NextToken</code> request parameter with the token value returned from the previous call to <code>GetSegmentDetection</code>.</p>
30/// <p>For more information, see Detecting video segments in stored video in the Amazon Rekognition Developer Guide.</p>
31#[derive(::std::clone::Clone, ::std::fmt::Debug)]
32pub struct GetSegmentDetectionFluentBuilder {
33    handle: ::std::sync::Arc<crate::client::Handle>,
34    inner: crate::operation::get_segment_detection::builders::GetSegmentDetectionInputBuilder,
35    config_override: ::std::option::Option<crate::config::Builder>,
36}
37impl
38    crate::client::customize::internal::CustomizableSend<
39        crate::operation::get_segment_detection::GetSegmentDetectionOutput,
40        crate::operation::get_segment_detection::GetSegmentDetectionError,
41    > for GetSegmentDetectionFluentBuilder
42{
43    fn send(
44        self,
45        config_override: crate::config::Builder,
46    ) -> crate::client::customize::internal::BoxFuture<
47        crate::client::customize::internal::SendResult<
48            crate::operation::get_segment_detection::GetSegmentDetectionOutput,
49            crate::operation::get_segment_detection::GetSegmentDetectionError,
50        >,
51    > {
52        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
53    }
54}
55impl GetSegmentDetectionFluentBuilder {
56    /// Creates a new `GetSegmentDetectionFluentBuilder`.
57    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
58        Self {
59            handle,
60            inner: ::std::default::Default::default(),
61            config_override: ::std::option::Option::None,
62        }
63    }
64    /// Access the GetSegmentDetection as a reference.
65    pub fn as_input(&self) -> &crate::operation::get_segment_detection::builders::GetSegmentDetectionInputBuilder {
66        &self.inner
67    }
68    /// Sends the request and returns the response.
69    ///
70    /// If an error occurs, an `SdkError` will be returned with additional details that
71    /// can be matched against.
72    ///
73    /// By default, any retryable failures will be retried twice. Retry behavior
74    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
75    /// set when configuring the client.
76    pub async fn send(
77        self,
78    ) -> ::std::result::Result<
79        crate::operation::get_segment_detection::GetSegmentDetectionOutput,
80        ::aws_smithy_runtime_api::client::result::SdkError<
81            crate::operation::get_segment_detection::GetSegmentDetectionError,
82            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
83        >,
84    > {
85        let input = self
86            .inner
87            .build()
88            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
89        let runtime_plugins = crate::operation::get_segment_detection::GetSegmentDetection::operation_runtime_plugins(
90            self.handle.runtime_plugins.clone(),
91            &self.handle.conf,
92            self.config_override,
93        );
94        crate::operation::get_segment_detection::GetSegmentDetection::orchestrate(&runtime_plugins, input).await
95    }
96
97    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
98    pub fn customize(
99        self,
100    ) -> crate::client::customize::CustomizableOperation<
101        crate::operation::get_segment_detection::GetSegmentDetectionOutput,
102        crate::operation::get_segment_detection::GetSegmentDetectionError,
103        Self,
104    > {
105        crate::client::customize::CustomizableOperation::new(self)
106    }
107    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
108        self.set_config_override(::std::option::Option::Some(config_override.into()));
109        self
110    }
111
112    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
113        self.config_override = config_override;
114        self
115    }
116    /// Create a paginator for this request
117    ///
118    /// Paginators are used by calling [`send().await`](crate::operation::get_segment_detection::paginator::GetSegmentDetectionPaginator::send) which returns a [`PaginationStream`](aws_smithy_async::future::pagination_stream::PaginationStream).
119    pub fn into_paginator(self) -> crate::operation::get_segment_detection::paginator::GetSegmentDetectionPaginator {
120        crate::operation::get_segment_detection::paginator::GetSegmentDetectionPaginator::new(self.handle, self.inner)
121    }
122    /// <p>Job identifier for the text detection operation for which you want results returned. You get the job identifer from an initial call to <code>StartSegmentDetection</code>.</p>
123    pub fn job_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
124        self.inner = self.inner.job_id(input.into());
125        self
126    }
127    /// <p>Job identifier for the text detection operation for which you want results returned. You get the job identifer from an initial call to <code>StartSegmentDetection</code>.</p>
128    pub fn set_job_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
129        self.inner = self.inner.set_job_id(input);
130        self
131    }
132    /// <p>Job identifier for the text detection operation for which you want results returned. You get the job identifer from an initial call to <code>StartSegmentDetection</code>.</p>
133    pub fn get_job_id(&self) -> &::std::option::Option<::std::string::String> {
134        self.inner.get_job_id()
135    }
136    /// <p>Maximum number of results to return per paginated call. The largest value you can specify is 1000.</p>
137    pub fn max_results(mut self, input: i32) -> Self {
138        self.inner = self.inner.max_results(input);
139        self
140    }
141    /// <p>Maximum number of results to return per paginated call. The largest value you can specify is 1000.</p>
142    pub fn set_max_results(mut self, input: ::std::option::Option<i32>) -> Self {
143        self.inner = self.inner.set_max_results(input);
144        self
145    }
146    /// <p>Maximum number of results to return per paginated call. The largest value you can specify is 1000.</p>
147    pub fn get_max_results(&self) -> &::std::option::Option<i32> {
148        self.inner.get_max_results()
149    }
150    /// <p>If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of text.</p>
151    pub fn next_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
152        self.inner = self.inner.next_token(input.into());
153        self
154    }
155    /// <p>If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of text.</p>
156    pub fn set_next_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
157        self.inner = self.inner.set_next_token(input);
158        self
159    }
160    /// <p>If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of text.</p>
161    pub fn get_next_token(&self) -> &::std::option::Option<::std::string::String> {
162        self.inner.get_next_token()
163    }
164}