// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
impl super::Client {
/// Constructs a fluent builder for the [`GetPersonTracking`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`job_id(impl Into<String>)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::job_id) / [`set_job_id(Option<String>)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::set_job_id):<br>required: **true**<br><p>The identifier for a job that tracks persons in a video. You get the <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p><br>
/// - [`max_results(i32)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::max_results) / [`set_max_results(Option<i32>)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::set_max_results):<br>required: **false**<br><p>Maximum number of results to return per paginated call. The largest value you can specify is 1000. If you specify a value greater than 1000, a maximum of 1000 results is returned. The default value is 1000.</p><br>
/// - [`next_token(impl Into<String>)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::next_token) / [`set_next_token(Option<String>)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::set_next_token):<br>required: **false**<br><p>If the previous response was incomplete (because there are more persons to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of persons. </p><br>
/// - [`sort_by(PersonTrackingSortBy)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::sort_by) / [`set_sort_by(Option<PersonTrackingSortBy>)`](crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::set_sort_by):<br>required: **false**<br><p>Sort to use for elements in the <code>Persons</code> array. Use <code>TIMESTAMP</code> to sort array elements by the time persons are detected. Use <code>INDEX</code> to sort by the tracked persons. If you sort by <code>INDEX</code>, the array elements for each person are sorted by detection confidence. The default sort is by <code>TIMESTAMP</code>.</p><br>
/// - On success, responds with [`GetPersonTrackingOutput`](crate::operation::get_person_tracking::GetPersonTrackingOutput) with field(s):
/// - [`job_status(Option<VideoJobStatus>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::job_status): <p>The current status of the person tracking job.</p>
/// - [`status_message(Option<String>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::status_message): <p>If the job fails, <code>StatusMessage</code> provides a descriptive error message.</p>
/// - [`video_metadata(Option<VideoMetadata>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::video_metadata): <p>Information about a video that Amazon Rekognition Video analyzed. <code>Videometadata</code> is returned in every page of paginated responses from a Amazon Rekognition Video operation.</p>
/// - [`next_token(Option<String>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::next_token): <p>If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of persons. </p>
/// - [`persons(Option<Vec::<PersonDetection>>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::persons): <p>An array of the persons detected in the video and the time(s) their path was tracked throughout the video. An array element will exist for each time a person's path is tracked. </p>
/// - [`job_id(Option<String>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::job_id): <p>Job identifier for the person tracking operation for which you want to obtain results. The job identifer is returned by an initial call to StartPersonTracking.</p>
/// - [`video(Option<Video>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::video): <p>Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as <code>StartLabelDetection</code> use <code>Video</code> to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.</p>
/// - [`job_tag(Option<String>)`](crate::operation::get_person_tracking::GetPersonTrackingOutput::job_tag): <p>A job identifier specified in the call to StartCelebrityRecognition and returned in the job completion notification sent to your Amazon Simple Notification Service topic.</p>
/// - On failure, responds with [`SdkError<GetPersonTrackingError>`](crate::operation::get_person_tracking::GetPersonTrackingError)
pub fn get_person_tracking(&self) -> crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder {
crate::operation::get_person_tracking::builders::GetPersonTrackingFluentBuilder::new(self.handle.clone())
}
}