1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
impl super::Client {
    /// Constructs a fluent builder for the [`GetTextDetection`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`job_id(impl Into<String>)`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::job_id) / [`set_job_id(Option<String>)`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::set_job_id):<br>required: **true**<br><p>Job identifier for the text detection operation for which you want results returned. You get the job identifer from an initial call to <code>StartTextDetection</code>.</p><br>
    ///   - [`max_results(i32)`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::max_results) / [`set_max_results(Option<i32>)`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::set_max_results):<br>required: **false**<br><p>Maximum number of results to return per paginated call. The largest value you can specify is 1000.</p><br>
    ///   - [`next_token(impl Into<String>)`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::next_token) / [`set_next_token(Option<String>)`](crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::set_next_token):<br>required: **false**<br><p>If the previous response was incomplete (because there are more labels to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of text.</p><br>
    /// - On success, responds with [`GetTextDetectionOutput`](crate::operation::get_text_detection::GetTextDetectionOutput) with field(s):
    ///   - [`job_status(Option<VideoJobStatus>)`](crate::operation::get_text_detection::GetTextDetectionOutput::job_status): <p>Current status of the text detection job.</p>
    ///   - [`status_message(Option<String>)`](crate::operation::get_text_detection::GetTextDetectionOutput::status_message): <p>If the job fails, <code>StatusMessage</code> provides a descriptive error message.</p>
    ///   - [`video_metadata(Option<VideoMetadata>)`](crate::operation::get_text_detection::GetTextDetectionOutput::video_metadata): <p>Information about a video that Amazon Rekognition analyzed. <code>Videometadata</code> is returned in every page of paginated responses from a Amazon Rekognition video operation.</p>
    ///   - [`text_detections(Option<Vec::<TextDetectionResult>>)`](crate::operation::get_text_detection::GetTextDetectionOutput::text_detections): <p>An array of text detected in the video. Each element contains the detected text, the time in milliseconds from the start of the video that the text was detected, and where it was detected on the screen.</p>
    ///   - [`next_token(Option<String>)`](crate::operation::get_text_detection::GetTextDetectionOutput::next_token): <p>If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of text.</p>
    ///   - [`text_model_version(Option<String>)`](crate::operation::get_text_detection::GetTextDetectionOutput::text_model_version): <p>Version number of the text detection model that was used to detect text.</p>
    ///   - [`job_id(Option<String>)`](crate::operation::get_text_detection::GetTextDetectionOutput::job_id): <p>Job identifier for the text detection operation for which you want to obtain results. The job identifer is returned by an initial call to StartTextDetection.</p>
    ///   - [`video(Option<Video>)`](crate::operation::get_text_detection::GetTextDetectionOutput::video): <p>Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as <code>StartLabelDetection</code> use <code>Video</code> to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.</p>
    ///   - [`job_tag(Option<String>)`](crate::operation::get_text_detection::GetTextDetectionOutput::job_tag): <p>A job identifier specified in the call to StartTextDetection and returned in the job completion notification sent to your Amazon Simple Notification Service topic.</p>
    /// - On failure, responds with [`SdkError<GetTextDetectionError>`](crate::operation::get_text_detection::GetTextDetectionError)
    pub fn get_text_detection(&self) -> crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder {
        crate::operation::get_text_detection::builders::GetTextDetectionFluentBuilder::new(self.handle.clone())
    }
}