// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
impl super::Client {
/// Constructs a fluent builder for the [`RecognizeCelebrities`](crate::operation::recognize_celebrities::builders::RecognizeCelebritiesFluentBuilder) operation.
///
/// - The fluent builder is configurable:
/// - [`image(Image)`](crate::operation::recognize_celebrities::builders::RecognizeCelebritiesFluentBuilder::image) / [`set_image(Option<Image>)`](crate::operation::recognize_celebrities::builders::RecognizeCelebritiesFluentBuilder::set_image):<br>required: **true**<br><p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see Images in the Amazon Rekognition developer guide.</p><br>
/// - On success, responds with [`RecognizeCelebritiesOutput`](crate::operation::recognize_celebrities::RecognizeCelebritiesOutput) with field(s):
/// - [`celebrity_faces(Option<Vec::<Celebrity>>)`](crate::operation::recognize_celebrities::RecognizeCelebritiesOutput::celebrity_faces): <p>Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 64 celebrities in an image. Each celebrity object includes the following attributes: <code>Face</code>, <code>Confidence</code>, <code>Emotions</code>, <code>Landmarks</code>, <code>Pose</code>, <code>Quality</code>, <code>Smile</code>, <code>Id</code>, <code>KnownGender</code>, <code>MatchConfidence</code>, <code>Name</code>, <code>Urls</code>.</p>
/// - [`unrecognized_faces(Option<Vec::<ComparedFace>>)`](crate::operation::recognize_celebrities::RecognizeCelebritiesOutput::unrecognized_faces): <p>Details about each unrecognized face in the image.</p>
/// - [`orientation_correction(Option<OrientationCorrection>)`](crate::operation::recognize_celebrities::RecognizeCelebritiesOutput::orientation_correction): <note> <p>Support for estimating image orientation using the the OrientationCorrection field has ceased as of August 2021. Any returned values for this field included in an API response will always be NULL.</p> </note> <p>The orientation of the input image (counterclockwise direction). If your application displays the image, you can use this value to correct the orientation. The bounding box coordinates returned in <code>CelebrityFaces</code> and <code>UnrecognizedFaces</code> represent face locations before the image orientation is corrected. </p> <note> <p>If the input image is in .jpeg format, it might contain exchangeable image (Exif) metadata that includes the image's orientation. If so, and the Exif metadata for the input image populates the orientation field, the value of <code>OrientationCorrection</code> is null. The <code>CelebrityFaces</code> and <code>UnrecognizedFaces</code> bounding box coordinates represent face locations after Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata. </p> </note>
/// - On failure, responds with [`SdkError<RecognizeCelebritiesError>`](crate::operation::recognize_celebrities::RecognizeCelebritiesError)
pub fn recognize_celebrities(&self) -> crate::operation::recognize_celebrities::builders::RecognizeCelebritiesFluentBuilder {
crate::operation::recognize_celebrities::builders::RecognizeCelebritiesFluentBuilder::new(self.handle.clone())
}
}