1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
impl super::Client {
    /// Constructs a fluent builder for the [`StartContentModeration`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`video(Video)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::video) / [`set_video(Option<Video>)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::set_video):<br>required: **true**<br><p>The video in which you want to detect inappropriate, unwanted, or offensive content. The video must be stored in an Amazon S3 bucket.</p><br>
    ///   - [`min_confidence(f32)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::min_confidence) / [`set_min_confidence(Option<f32>)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::set_min_confidence):<br>required: **false**<br><p>Specifies the minimum confidence that Amazon Rekognition must have in order to return a moderated content label. Confidence represents how certain Amazon Rekognition is that the moderated content is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition doesn't return any moderated content labels with a confidence level lower than this specified value. If you don't specify <code>MinConfidence</code>, <code>GetContentModeration</code> returns labels with confidence values greater than or equal to 50 percent.</p><br>
    ///   - [`client_request_token(impl Into<String>)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::client_request_token) / [`set_client_request_token(Option<String>)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::set_client_request_token):<br>required: **false**<br><p>Idempotent token used to identify the start request. If you use the same token with multiple <code>StartContentModeration</code> requests, the same <code>JobId</code> is returned. Use <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p><br>
    ///   - [`notification_channel(NotificationChannel)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::notification_channel) / [`set_notification_channel(Option<NotificationChannel>)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::set_notification_channel):<br>required: **false**<br><p>The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the content analysis to. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.</p><br>
    ///   - [`job_tag(impl Into<String>)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::job_tag) / [`set_job_tag(Option<String>)`](crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::set_job_tag):<br>required: **false**<br><p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p><br>
    /// - On success, responds with [`StartContentModerationOutput`](crate::operation::start_content_moderation::StartContentModerationOutput) with field(s):
    ///   - [`job_id(Option<String>)`](crate::operation::start_content_moderation::StartContentModerationOutput::job_id): <p>The identifier for the content analysis job. Use <code>JobId</code> to identify the job in a subsequent call to <code>GetContentModeration</code>.</p>
    /// - On failure, responds with [`SdkError<StartContentModerationError>`](crate::operation::start_content_moderation::StartContentModerationError)
    pub fn start_content_moderation(&self) -> crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder {
        crate::operation::start_content_moderation::builders::StartContentModerationFluentBuilder::new(self.handle.clone())
    }
}