aws-sdk-lookoutequipment 0.24.0

AWS SDK for Amazon Lookout for Equipment
Documentation
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle {
    pub(crate) client: aws_smithy_client::Client<
        aws_smithy_client::erase::DynConnector,
        aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
    >,
    pub(crate) conf: crate::Config,
}

/// Client for Amazon Lookout for Equipment
///
/// Client for invoking operations on Amazon Lookout for Equipment. Each operation on Amazon Lookout for Equipment is a method on this
/// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service.
///
/// # Examples
/// **Constructing a client and invoking an operation**
/// ```rust,no_run
/// # async fn docs() {
///     // create a shared configuration. This can be used & shared between multiple service clients.
///     let shared_config = aws_config::load_from_env().await;
///     let client = aws_sdk_lookoutequipment::Client::new(&shared_config);
///     // invoke an operation
///     /* let rsp = client
///         .<operation_name>().
///         .<param>("some value")
///         .send().await; */
/// # }
/// ```
/// **Constructing a client with custom configuration**
/// ```rust,no_run
/// use aws_config::retry::RetryConfig;
/// # async fn docs() {
/// let shared_config = aws_config::load_from_env().await;
/// let config = aws_sdk_lookoutequipment::config::Builder::from(&shared_config)
///   .retry_config(RetryConfig::disabled())
///   .build();
/// let client = aws_sdk_lookoutequipment::Client::from_conf(config);
/// # }
#[derive(std::fmt::Debug)]
pub struct Client {
    handle: std::sync::Arc<Handle>,
}

impl std::clone::Clone for Client {
    fn clone(&self) -> Self {
        Self {
            handle: self.handle.clone(),
        }
    }
}

#[doc(inline)]
pub use aws_smithy_client::Builder;

impl
    From<
        aws_smithy_client::Client<
            aws_smithy_client::erase::DynConnector,
            aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
        >,
    > for Client
{
    fn from(
        client: aws_smithy_client::Client<
            aws_smithy_client::erase::DynConnector,
            aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
        >,
    ) -> Self {
        Self::with_config(client, crate::Config::builder().build())
    }
}

impl Client {
    /// Creates a client with the given service configuration.
    pub fn with_config(
        client: aws_smithy_client::Client<
            aws_smithy_client::erase::DynConnector,
            aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
        >,
        conf: crate::Config,
    ) -> Self {
        Self {
            handle: std::sync::Arc::new(Handle { client, conf }),
        }
    }

    /// Returns the client's configuration.
    pub fn conf(&self) -> &crate::Config {
        &self.handle.conf
    }
}
impl Client {
    /// Constructs a fluent builder for the [`CreateDataset`](crate::client::fluent_builders::CreateDataset) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`dataset_name(impl Into<String>)`](crate::client::fluent_builders::CreateDataset::dataset_name) / [`set_dataset_name(Option<String>)`](crate::client::fluent_builders::CreateDataset::set_dataset_name): <p>The name of the dataset being created. </p>
    ///   - [`dataset_schema(DatasetSchema)`](crate::client::fluent_builders::CreateDataset::dataset_schema) / [`set_dataset_schema(Option<DatasetSchema>)`](crate::client::fluent_builders::CreateDataset::set_dataset_schema): <p>A JSON description of the data that is in each time series dataset, including names, column names, and data types. </p>
    ///   - [`server_side_kms_key_id(impl Into<String>)`](crate::client::fluent_builders::CreateDataset::server_side_kms_key_id) / [`set_server_side_kms_key_id(Option<String>)`](crate::client::fluent_builders::CreateDataset::set_server_side_kms_key_id): <p>Provides the identifier of the KMS key used to encrypt dataset data by Amazon Lookout for Equipment. </p>
    ///   - [`client_token(impl Into<String>)`](crate::client::fluent_builders::CreateDataset::client_token) / [`set_client_token(Option<String>)`](crate::client::fluent_builders::CreateDataset::set_client_token): <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
    ///   - [`tags(Vec<Tag>)`](crate::client::fluent_builders::CreateDataset::tags) / [`set_tags(Option<Vec<Tag>>)`](crate::client::fluent_builders::CreateDataset::set_tags): <p>Any tags associated with the ingested data described in the dataset. </p>
    /// - On success, responds with [`CreateDatasetOutput`](crate::output::CreateDatasetOutput) with field(s):
    ///   - [`dataset_name(Option<String>)`](crate::output::CreateDatasetOutput::dataset_name): <p>The name of the dataset being created. </p>
    ///   - [`dataset_arn(Option<String>)`](crate::output::CreateDatasetOutput::dataset_arn): <p> The Amazon Resource Name (ARN) of the dataset being created. </p>
    ///   - [`status(Option<DatasetStatus>)`](crate::output::CreateDatasetOutput::status): <p>Indicates the status of the <code>CreateDataset</code> operation. </p>
    /// - On failure, responds with [`SdkError<CreateDatasetError>`](crate::error::CreateDatasetError)
    pub fn create_dataset(&self) -> fluent_builders::CreateDataset {
        fluent_builders::CreateDataset::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`CreateInferenceScheduler`](crate::client::fluent_builders::CreateInferenceScheduler) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`model_name(impl Into<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::model_name) / [`set_model_name(Option<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_model_name): <p>The name of the previously trained ML model being used to create the inference scheduler. </p>
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_inference_scheduler_name): <p>The name of the inference scheduler being created. </p>
    ///   - [`data_delay_offset_in_minutes(i64)`](crate::client::fluent_builders::CreateInferenceScheduler::data_delay_offset_in_minutes) / [`set_data_delay_offset_in_minutes(Option<i64>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_data_delay_offset_in_minutes): <p>The interval (in minutes) of planned delay at the start of each inference segment. For example, if inference is set to run every ten minutes, the delay is set to five minutes and the time is 09:08. The inference scheduler will wake up at the configured interval (which, without a delay configured, would be 09:10) plus the additional five minute delay time (so 09:15) to check your Amazon S3 bucket. The delay provides a buffer for you to upload data at the same frequency, so that you don't have to stop and restart the scheduler when uploading new data.</p>  <p>For more information, see <a href="https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html">Understanding the inference process</a>.</p>
    ///   - [`data_upload_frequency(DataUploadFrequency)`](crate::client::fluent_builders::CreateInferenceScheduler::data_upload_frequency) / [`set_data_upload_frequency(Option<DataUploadFrequency>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_data_upload_frequency): <p> How often data is uploaded to the source Amazon S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment runs inference on your data.</p>  <p>For more information, see <a href="https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html">Understanding the inference process</a>.</p>
    ///   - [`data_input_configuration(InferenceInputConfiguration)`](crate::client::fluent_builders::CreateInferenceScheduler::data_input_configuration) / [`set_data_input_configuration(Option<InferenceInputConfiguration>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_data_input_configuration): <p>Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>
    ///   - [`data_output_configuration(InferenceOutputConfiguration)`](crate::client::fluent_builders::CreateInferenceScheduler::data_output_configuration) / [`set_data_output_configuration(Option<InferenceOutputConfiguration>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_data_output_configuration): <p>Specifies configuration information for the output results for the inference scheduler, including the S3 location for the output. </p>
    ///   - [`role_arn(impl Into<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::role_arn) / [`set_role_arn(Option<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_role_arn): <p>The Amazon Resource Name (ARN) of a role with permission to access the data source being used for the inference. </p>
    ///   - [`server_side_kms_key_id(impl Into<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::server_side_kms_key_id) / [`set_server_side_kms_key_id(Option<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_server_side_kms_key_id): <p>Provides the identifier of the KMS key used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>
    ///   - [`client_token(impl Into<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::client_token) / [`set_client_token(Option<String>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_client_token): <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
    ///   - [`tags(Vec<Tag>)`](crate::client::fluent_builders::CreateInferenceScheduler::tags) / [`set_tags(Option<Vec<Tag>>)`](crate::client::fluent_builders::CreateInferenceScheduler::set_tags): <p>Any tags associated with the inference scheduler. </p>
    /// - On success, responds with [`CreateInferenceSchedulerOutput`](crate::output::CreateInferenceSchedulerOutput) with field(s):
    ///   - [`inference_scheduler_arn(Option<String>)`](crate::output::CreateInferenceSchedulerOutput::inference_scheduler_arn): <p>The Amazon Resource Name (ARN) of the inference scheduler being created. </p>
    ///   - [`inference_scheduler_name(Option<String>)`](crate::output::CreateInferenceSchedulerOutput::inference_scheduler_name): <p>The name of inference scheduler being created. </p>
    ///   - [`status(Option<InferenceSchedulerStatus>)`](crate::output::CreateInferenceSchedulerOutput::status): <p>Indicates the status of the <code>CreateInferenceScheduler</code> operation. </p>
    /// - On failure, responds with [`SdkError<CreateInferenceSchedulerError>`](crate::error::CreateInferenceSchedulerError)
    pub fn create_inference_scheduler(&self) -> fluent_builders::CreateInferenceScheduler {
        fluent_builders::CreateInferenceScheduler::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`CreateLabel`](crate::client::fluent_builders::CreateLabel) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::CreateLabel::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::CreateLabel::set_label_group_name): <p> The name of a group of labels. </p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
    ///   - [`start_time(DateTime)`](crate::client::fluent_builders::CreateLabel::start_time) / [`set_start_time(Option<DateTime>)`](crate::client::fluent_builders::CreateLabel::set_start_time): <p> The start time of the labeled event. </p>
    ///   - [`end_time(DateTime)`](crate::client::fluent_builders::CreateLabel::end_time) / [`set_end_time(Option<DateTime>)`](crate::client::fluent_builders::CreateLabel::set_end_time): <p> The end time of the labeled event. </p>
    ///   - [`rating(LabelRating)`](crate::client::fluent_builders::CreateLabel::rating) / [`set_rating(Option<LabelRating>)`](crate::client::fluent_builders::CreateLabel::set_rating): <p> Indicates whether a labeled event represents an anomaly. </p>
    ///   - [`fault_code(impl Into<String>)`](crate::client::fluent_builders::CreateLabel::fault_code) / [`set_fault_code(Option<String>)`](crate::client::fluent_builders::CreateLabel::set_fault_code): <p> Provides additional information about the label. The fault code must be defined in the FaultCodes attribute of the label group.</p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
    ///   - [`notes(impl Into<String>)`](crate::client::fluent_builders::CreateLabel::notes) / [`set_notes(Option<String>)`](crate::client::fluent_builders::CreateLabel::set_notes): <p> Metadata providing additional information about the label. </p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
    ///   - [`equipment(impl Into<String>)`](crate::client::fluent_builders::CreateLabel::equipment) / [`set_equipment(Option<String>)`](crate::client::fluent_builders::CreateLabel::set_equipment): <p> Indicates that a label pertains to a particular piece of equipment. </p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
    ///   - [`client_token(impl Into<String>)`](crate::client::fluent_builders::CreateLabel::client_token) / [`set_client_token(Option<String>)`](crate::client::fluent_builders::CreateLabel::set_client_token): <p> A unique identifier for the request to create a label. If you do not set the client request token, Lookout for Equipment generates one. </p>
    /// - On success, responds with [`CreateLabelOutput`](crate::output::CreateLabelOutput) with field(s):
    ///   - [`label_id(Option<String>)`](crate::output::CreateLabelOutput::label_id): <p> The ID of the label that you have created. </p>
    /// - On failure, responds with [`SdkError<CreateLabelError>`](crate::error::CreateLabelError)
    pub fn create_label(&self) -> fluent_builders::CreateLabel {
        fluent_builders::CreateLabel::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`CreateLabelGroup`](crate::client::fluent_builders::CreateLabelGroup) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::CreateLabelGroup::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::CreateLabelGroup::set_label_group_name): <p> Names a group of labels.</p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
    ///   - [`fault_codes(Vec<String>)`](crate::client::fluent_builders::CreateLabelGroup::fault_codes) / [`set_fault_codes(Option<Vec<String>>)`](crate::client::fluent_builders::CreateLabelGroup::set_fault_codes): <p> The acceptable fault codes (indicating the type of anomaly associated with the label) that can be used with this label group.</p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
    ///   - [`client_token(impl Into<String>)`](crate::client::fluent_builders::CreateLabelGroup::client_token) / [`set_client_token(Option<String>)`](crate::client::fluent_builders::CreateLabelGroup::set_client_token): <p> A unique identifier for the request to create a label group. If you do not set the client request token, Lookout for Equipment generates one. </p>
    ///   - [`tags(Vec<Tag>)`](crate::client::fluent_builders::CreateLabelGroup::tags) / [`set_tags(Option<Vec<Tag>>)`](crate::client::fluent_builders::CreateLabelGroup::set_tags): <p> Tags that provide metadata about the label group you are creating. </p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
    /// - On success, responds with [`CreateLabelGroupOutput`](crate::output::CreateLabelGroupOutput) with field(s):
    ///   - [`label_group_name(Option<String>)`](crate::output::CreateLabelGroupOutput::label_group_name): <p> The name of the label group that you have created. Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
    ///   - [`label_group_arn(Option<String>)`](crate::output::CreateLabelGroupOutput::label_group_arn): <p> The ARN of the label group that you have created. </p>
    /// - On failure, responds with [`SdkError<CreateLabelGroupError>`](crate::error::CreateLabelGroupError)
    pub fn create_label_group(&self) -> fluent_builders::CreateLabelGroup {
        fluent_builders::CreateLabelGroup::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`CreateModel`](crate::client::fluent_builders::CreateModel) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`model_name(impl Into<String>)`](crate::client::fluent_builders::CreateModel::model_name) / [`set_model_name(Option<String>)`](crate::client::fluent_builders::CreateModel::set_model_name): <p>The name for the ML model to be created.</p>
    ///   - [`dataset_name(impl Into<String>)`](crate::client::fluent_builders::CreateModel::dataset_name) / [`set_dataset_name(Option<String>)`](crate::client::fluent_builders::CreateModel::set_dataset_name): <p>The name of the dataset for the ML model being created. </p>
    ///   - [`dataset_schema(DatasetSchema)`](crate::client::fluent_builders::CreateModel::dataset_schema) / [`set_dataset_schema(Option<DatasetSchema>)`](crate::client::fluent_builders::CreateModel::set_dataset_schema): <p>The data schema for the ML model being created. </p>
    ///   - [`labels_input_configuration(LabelsInputConfiguration)`](crate::client::fluent_builders::CreateModel::labels_input_configuration) / [`set_labels_input_configuration(Option<LabelsInputConfiguration>)`](crate::client::fluent_builders::CreateModel::set_labels_input_configuration): <p>The input configuration for the labels being used for the ML model that's being created. </p>
    ///   - [`client_token(impl Into<String>)`](crate::client::fluent_builders::CreateModel::client_token) / [`set_client_token(Option<String>)`](crate::client::fluent_builders::CreateModel::set_client_token): <p>A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
    ///   - [`training_data_start_time(DateTime)`](crate::client::fluent_builders::CreateModel::training_data_start_time) / [`set_training_data_start_time(Option<DateTime>)`](crate::client::fluent_builders::CreateModel::set_training_data_start_time): <p>Indicates the time reference in the dataset that should be used to begin the subset of training data for the ML model. </p>
    ///   - [`training_data_end_time(DateTime)`](crate::client::fluent_builders::CreateModel::training_data_end_time) / [`set_training_data_end_time(Option<DateTime>)`](crate::client::fluent_builders::CreateModel::set_training_data_end_time): <p>Indicates the time reference in the dataset that should be used to end the subset of training data for the ML model. </p>
    ///   - [`evaluation_data_start_time(DateTime)`](crate::client::fluent_builders::CreateModel::evaluation_data_start_time) / [`set_evaluation_data_start_time(Option<DateTime>)`](crate::client::fluent_builders::CreateModel::set_evaluation_data_start_time): <p>Indicates the time reference in the dataset that should be used to begin the subset of evaluation data for the ML model. </p>
    ///   - [`evaluation_data_end_time(DateTime)`](crate::client::fluent_builders::CreateModel::evaluation_data_end_time) / [`set_evaluation_data_end_time(Option<DateTime>)`](crate::client::fluent_builders::CreateModel::set_evaluation_data_end_time): <p> Indicates the time reference in the dataset that should be used to end the subset of evaluation data for the ML model. </p>
    ///   - [`role_arn(impl Into<String>)`](crate::client::fluent_builders::CreateModel::role_arn) / [`set_role_arn(Option<String>)`](crate::client::fluent_builders::CreateModel::set_role_arn): <p> The Amazon Resource Name (ARN) of a role with permission to access the data source being used to create the ML model. </p>
    ///   - [`data_pre_processing_configuration(DataPreProcessingConfiguration)`](crate::client::fluent_builders::CreateModel::data_pre_processing_configuration) / [`set_data_pre_processing_configuration(Option<DataPreProcessingConfiguration>)`](crate::client::fluent_builders::CreateModel::set_data_pre_processing_configuration): <p>The configuration is the <code>TargetSamplingRate</code>, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the <code>TargetSamplingRate</code> is 1 minute.</p>  <p>When providing a value for the <code>TargetSamplingRate</code>, you must attach the prefix "PT" to the rate you want. The value for a 1 second rate is therefore <i>PT1S</i>, the value for a 15 minute rate is <i>PT15M</i>, and the value for a 1 hour rate is <i>PT1H</i> </p>
    ///   - [`server_side_kms_key_id(impl Into<String>)`](crate::client::fluent_builders::CreateModel::server_side_kms_key_id) / [`set_server_side_kms_key_id(Option<String>)`](crate::client::fluent_builders::CreateModel::set_server_side_kms_key_id): <p>Provides the identifier of the KMS key used to encrypt model data by Amazon Lookout for Equipment. </p>
    ///   - [`tags(Vec<Tag>)`](crate::client::fluent_builders::CreateModel::tags) / [`set_tags(Option<Vec<Tag>>)`](crate::client::fluent_builders::CreateModel::set_tags): <p> Any tags associated with the ML model being created. </p>
    ///   - [`off_condition(impl Into<String>)`](crate::client::fluent_builders::CreateModel::off_condition) / [`set_off_condition(Option<String>)`](crate::client::fluent_builders::CreateModel::set_off_condition): <p>Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.</p>
    /// - On success, responds with [`CreateModelOutput`](crate::output::CreateModelOutput) with field(s):
    ///   - [`model_arn(Option<String>)`](crate::output::CreateModelOutput::model_arn): <p>The Amazon Resource Name (ARN) of the model being created. </p>
    ///   - [`status(Option<ModelStatus>)`](crate::output::CreateModelOutput::status): <p>Indicates the status of the <code>CreateModel</code> operation. </p>
    /// - On failure, responds with [`SdkError<CreateModelError>`](crate::error::CreateModelError)
    pub fn create_model(&self) -> fluent_builders::CreateModel {
        fluent_builders::CreateModel::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DeleteDataset`](crate::client::fluent_builders::DeleteDataset) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`dataset_name(impl Into<String>)`](crate::client::fluent_builders::DeleteDataset::dataset_name) / [`set_dataset_name(Option<String>)`](crate::client::fluent_builders::DeleteDataset::set_dataset_name): <p>The name of the dataset to be deleted. </p>
    /// - On success, responds with [`DeleteDatasetOutput`](crate::output::DeleteDatasetOutput)

    /// - On failure, responds with [`SdkError<DeleteDatasetError>`](crate::error::DeleteDatasetError)
    pub fn delete_dataset(&self) -> fluent_builders::DeleteDataset {
        fluent_builders::DeleteDataset::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DeleteInferenceScheduler`](crate::client::fluent_builders::DeleteInferenceScheduler) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::DeleteInferenceScheduler::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::DeleteInferenceScheduler::set_inference_scheduler_name): <p>The name of the inference scheduler to be deleted. </p>
    /// - On success, responds with [`DeleteInferenceSchedulerOutput`](crate::output::DeleteInferenceSchedulerOutput)

    /// - On failure, responds with [`SdkError<DeleteInferenceSchedulerError>`](crate::error::DeleteInferenceSchedulerError)
    pub fn delete_inference_scheduler(&self) -> fluent_builders::DeleteInferenceScheduler {
        fluent_builders::DeleteInferenceScheduler::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DeleteLabel`](crate::client::fluent_builders::DeleteLabel) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::DeleteLabel::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::DeleteLabel::set_label_group_name): <p> The name of the label group that contains the label that you want to delete. Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
    ///   - [`label_id(impl Into<String>)`](crate::client::fluent_builders::DeleteLabel::label_id) / [`set_label_id(Option<String>)`](crate::client::fluent_builders::DeleteLabel::set_label_id): <p> The ID of the label that you want to delete. </p>
    /// - On success, responds with [`DeleteLabelOutput`](crate::output::DeleteLabelOutput)

    /// - On failure, responds with [`SdkError<DeleteLabelError>`](crate::error::DeleteLabelError)
    pub fn delete_label(&self) -> fluent_builders::DeleteLabel {
        fluent_builders::DeleteLabel::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DeleteLabelGroup`](crate::client::fluent_builders::DeleteLabelGroup) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::DeleteLabelGroup::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::DeleteLabelGroup::set_label_group_name): <p> The name of the label group that you want to delete. Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
    /// - On success, responds with [`DeleteLabelGroupOutput`](crate::output::DeleteLabelGroupOutput)

    /// - On failure, responds with [`SdkError<DeleteLabelGroupError>`](crate::error::DeleteLabelGroupError)
    pub fn delete_label_group(&self) -> fluent_builders::DeleteLabelGroup {
        fluent_builders::DeleteLabelGroup::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DeleteModel`](crate::client::fluent_builders::DeleteModel) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`model_name(impl Into<String>)`](crate::client::fluent_builders::DeleteModel::model_name) / [`set_model_name(Option<String>)`](crate::client::fluent_builders::DeleteModel::set_model_name): <p>The name of the ML model to be deleted. </p>
    /// - On success, responds with [`DeleteModelOutput`](crate::output::DeleteModelOutput)

    /// - On failure, responds with [`SdkError<DeleteModelError>`](crate::error::DeleteModelError)
    pub fn delete_model(&self) -> fluent_builders::DeleteModel {
        fluent_builders::DeleteModel::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DescribeDataIngestionJob`](crate::client::fluent_builders::DescribeDataIngestionJob) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`job_id(impl Into<String>)`](crate::client::fluent_builders::DescribeDataIngestionJob::job_id) / [`set_job_id(Option<String>)`](crate::client::fluent_builders::DescribeDataIngestionJob::set_job_id): <p>The job ID of the data ingestion job. </p>
    /// - On success, responds with [`DescribeDataIngestionJobOutput`](crate::output::DescribeDataIngestionJobOutput) with field(s):
    ///   - [`job_id(Option<String>)`](crate::output::DescribeDataIngestionJobOutput::job_id): <p>Indicates the job ID of the data ingestion job. </p>
    ///   - [`dataset_arn(Option<String>)`](crate::output::DescribeDataIngestionJobOutput::dataset_arn): <p>The Amazon Resource Name (ARN) of the dataset being used in the data ingestion job. </p>
    ///   - [`ingestion_input_configuration(Option<IngestionInputConfiguration>)`](crate::output::DescribeDataIngestionJobOutput::ingestion_input_configuration): <p>Specifies the S3 location configuration for the data input for the data ingestion job. </p>
    ///   - [`role_arn(Option<String>)`](crate::output::DescribeDataIngestionJobOutput::role_arn): <p>The Amazon Resource Name (ARN) of an IAM role with permission to access the data source being ingested. </p>
    ///   - [`created_at(Option<DateTime>)`](crate::output::DescribeDataIngestionJobOutput::created_at): <p>The time at which the data ingestion job was created. </p>
    ///   - [`status(Option<IngestionJobStatus>)`](crate::output::DescribeDataIngestionJobOutput::status): <p>Indicates the status of the <code>DataIngestionJob</code> operation. </p>
    ///   - [`failed_reason(Option<String>)`](crate::output::DescribeDataIngestionJobOutput::failed_reason): <p>Specifies the reason for failure when a data ingestion job has failed. </p>
    ///   - [`data_quality_summary(Option<DataQualitySummary>)`](crate::output::DescribeDataIngestionJobOutput::data_quality_summary): <p> Gives statistics about a completed ingestion job. These statistics primarily relate to quantifying incorrect data such as MissingCompleteSensorData, MissingSensorData, UnsupportedDateFormats, InsufficientSensorData, and DuplicateTimeStamps. </p>
    ///   - [`ingested_files_summary(Option<IngestedFilesSummary>)`](crate::output::DescribeDataIngestionJobOutput::ingested_files_summary): <p>Gives statistics about how many files have been ingested, and which files have not been ingested, for a particular ingestion job.</p>
    ///   - [`status_detail(Option<String>)`](crate::output::DescribeDataIngestionJobOutput::status_detail): <p> Provides details about status of the ingestion job that is currently in progress. </p>
    ///   - [`ingested_data_size(Option<i64>)`](crate::output::DescribeDataIngestionJobOutput::ingested_data_size): <p> Indicates the size of the ingested dataset. </p>
    ///   - [`data_start_time(Option<DateTime>)`](crate::output::DescribeDataIngestionJobOutput::data_start_time): <p> Indicates the earliest timestamp corresponding to data that was successfully ingested during this specific ingestion job. </p>
    ///   - [`data_end_time(Option<DateTime>)`](crate::output::DescribeDataIngestionJobOutput::data_end_time): <p> Indicates the latest timestamp corresponding to data that was successfully ingested during this specific ingestion job. </p>
    /// - On failure, responds with [`SdkError<DescribeDataIngestionJobError>`](crate::error::DescribeDataIngestionJobError)
    pub fn describe_data_ingestion_job(&self) -> fluent_builders::DescribeDataIngestionJob {
        fluent_builders::DescribeDataIngestionJob::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DescribeDataset`](crate::client::fluent_builders::DescribeDataset) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`dataset_name(impl Into<String>)`](crate::client::fluent_builders::DescribeDataset::dataset_name) / [`set_dataset_name(Option<String>)`](crate::client::fluent_builders::DescribeDataset::set_dataset_name): <p>The name of the dataset to be described. </p>
    /// - On success, responds with [`DescribeDatasetOutput`](crate::output::DescribeDatasetOutput) with field(s):
    ///   - [`dataset_name(Option<String>)`](crate::output::DescribeDatasetOutput::dataset_name): <p>The name of the dataset being described. </p>
    ///   - [`dataset_arn(Option<String>)`](crate::output::DescribeDatasetOutput::dataset_arn): <p>The Amazon Resource Name (ARN) of the dataset being described. </p>
    ///   - [`created_at(Option<DateTime>)`](crate::output::DescribeDatasetOutput::created_at): <p>Specifies the time the dataset was created in Lookout for Equipment. </p>
    ///   - [`last_updated_at(Option<DateTime>)`](crate::output::DescribeDatasetOutput::last_updated_at): <p>Specifies the time the dataset was last updated, if it was. </p>
    ///   - [`status(Option<DatasetStatus>)`](crate::output::DescribeDatasetOutput::status): <p>Indicates the status of the dataset. </p>
    ///   - [`schema(Option<String>)`](crate::output::DescribeDatasetOutput::schema): <p>A JSON description of the data that is in each time series dataset, including names, column names, and data types. </p>
    ///   - [`server_side_kms_key_id(Option<String>)`](crate::output::DescribeDatasetOutput::server_side_kms_key_id): <p>Provides the identifier of the KMS key used to encrypt dataset data by Amazon Lookout for Equipment. </p>
    ///   - [`ingestion_input_configuration(Option<IngestionInputConfiguration>)`](crate::output::DescribeDatasetOutput::ingestion_input_configuration): <p>Specifies the S3 location configuration for the data input for the data ingestion job. </p>
    ///   - [`data_quality_summary(Option<DataQualitySummary>)`](crate::output::DescribeDatasetOutput::data_quality_summary): <p> Gives statistics associated with the given dataset for the latest successful associated ingestion job id. These statistics primarily relate to quantifying incorrect data such as MissingCompleteSensorData, MissingSensorData, UnsupportedDateFormats, InsufficientSensorData, and DuplicateTimeStamps. </p>
    ///   - [`ingested_files_summary(Option<IngestedFilesSummary>)`](crate::output::DescribeDatasetOutput::ingested_files_summary): <p> IngestedFilesSummary associated with the given dataset for the latest successful associated ingestion job id. </p>
    ///   - [`role_arn(Option<String>)`](crate::output::DescribeDatasetOutput::role_arn): <p> The Amazon Resource Name (ARN) of the IAM role that you are using for this the data ingestion job. </p>
    ///   - [`data_start_time(Option<DateTime>)`](crate::output::DescribeDatasetOutput::data_start_time): <p> Indicates the earliest timestamp corresponding to data that was successfully ingested during the most recent ingestion of this particular dataset. </p>
    ///   - [`data_end_time(Option<DateTime>)`](crate::output::DescribeDatasetOutput::data_end_time): <p> Indicates the latest timestamp corresponding to data that was successfully ingested during the most recent ingestion of this particular dataset. </p>
    /// - On failure, responds with [`SdkError<DescribeDatasetError>`](crate::error::DescribeDatasetError)
    pub fn describe_dataset(&self) -> fluent_builders::DescribeDataset {
        fluent_builders::DescribeDataset::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DescribeInferenceScheduler`](crate::client::fluent_builders::DescribeInferenceScheduler) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::DescribeInferenceScheduler::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::DescribeInferenceScheduler::set_inference_scheduler_name): <p>The name of the inference scheduler being described. </p>
    /// - On success, responds with [`DescribeInferenceSchedulerOutput`](crate::output::DescribeInferenceSchedulerOutput) with field(s):
    ///   - [`model_arn(Option<String>)`](crate::output::DescribeInferenceSchedulerOutput::model_arn): <p>The Amazon Resource Name (ARN) of the ML model of the inference scheduler being described. </p>
    ///   - [`model_name(Option<String>)`](crate::output::DescribeInferenceSchedulerOutput::model_name): <p>The name of the ML model of the inference scheduler being described. </p>
    ///   - [`inference_scheduler_name(Option<String>)`](crate::output::DescribeInferenceSchedulerOutput::inference_scheduler_name): <p>The name of the inference scheduler being described. </p>
    ///   - [`inference_scheduler_arn(Option<String>)`](crate::output::DescribeInferenceSchedulerOutput::inference_scheduler_arn): <p>The Amazon Resource Name (ARN) of the inference scheduler being described. </p>
    ///   - [`status(Option<InferenceSchedulerStatus>)`](crate::output::DescribeInferenceSchedulerOutput::status): <p>Indicates the status of the inference scheduler. </p>
    ///   - [`data_delay_offset_in_minutes(Option<i64>)`](crate::output::DescribeInferenceSchedulerOutput::data_delay_offset_in_minutes): <p> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.</p>
    ///   - [`data_upload_frequency(Option<DataUploadFrequency>)`](crate::output::DescribeInferenceSchedulerOutput::data_upload_frequency): <p>Specifies how often data is uploaded to the source S3 bucket for the input data. This value is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes. </p>
    ///   - [`created_at(Option<DateTime>)`](crate::output::DescribeInferenceSchedulerOutput::created_at): <p>Specifies the time at which the inference scheduler was created. </p>
    ///   - [`updated_at(Option<DateTime>)`](crate::output::DescribeInferenceSchedulerOutput::updated_at): <p>Specifies the time at which the inference scheduler was last updated, if it was. </p>
    ///   - [`data_input_configuration(Option<InferenceInputConfiguration>)`](crate::output::DescribeInferenceSchedulerOutput::data_input_configuration): <p> Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>
    ///   - [`data_output_configuration(Option<InferenceOutputConfiguration>)`](crate::output::DescribeInferenceSchedulerOutput::data_output_configuration): <p> Specifies information for the output results for the inference scheduler, including the output S3 location. </p>
    ///   - [`role_arn(Option<String>)`](crate::output::DescribeInferenceSchedulerOutput::role_arn): <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler being described. </p>
    ///   - [`server_side_kms_key_id(Option<String>)`](crate::output::DescribeInferenceSchedulerOutput::server_side_kms_key_id): <p>Provides the identifier of the KMS key used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>
    ///   - [`latest_inference_result(Option<LatestInferenceResult>)`](crate::output::DescribeInferenceSchedulerOutput::latest_inference_result): <p>Indicates whether the latest execution for the inference scheduler was Anomalous (anomalous events found) or Normal (no anomalous events found).</p>
    /// - On failure, responds with [`SdkError<DescribeInferenceSchedulerError>`](crate::error::DescribeInferenceSchedulerError)
    pub fn describe_inference_scheduler(&self) -> fluent_builders::DescribeInferenceScheduler {
        fluent_builders::DescribeInferenceScheduler::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DescribeLabel`](crate::client::fluent_builders::DescribeLabel) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::DescribeLabel::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::DescribeLabel::set_label_group_name): <p> Returns the name of the group containing the label. </p>
    ///   - [`label_id(impl Into<String>)`](crate::client::fluent_builders::DescribeLabel::label_id) / [`set_label_id(Option<String>)`](crate::client::fluent_builders::DescribeLabel::set_label_id): <p> Returns the ID of the label. </p>
    /// - On success, responds with [`DescribeLabelOutput`](crate::output::DescribeLabelOutput) with field(s):
    ///   - [`label_group_name(Option<String>)`](crate::output::DescribeLabelOutput::label_group_name): <p> The name of the requested label group. </p>
    ///   - [`label_group_arn(Option<String>)`](crate::output::DescribeLabelOutput::label_group_arn): <p> The ARN of the requested label group. </p>
    ///   - [`label_id(Option<String>)`](crate::output::DescribeLabelOutput::label_id): <p> The ID of the requested label. </p>
    ///   - [`start_time(Option<DateTime>)`](crate::output::DescribeLabelOutput::start_time): <p> The start time of the requested label. </p>
    ///   - [`end_time(Option<DateTime>)`](crate::output::DescribeLabelOutput::end_time): <p> The end time of the requested label. </p>
    ///   - [`rating(Option<LabelRating>)`](crate::output::DescribeLabelOutput::rating): <p> Indicates whether a labeled event represents an anomaly. </p>
    ///   - [`fault_code(Option<String>)`](crate::output::DescribeLabelOutput::fault_code): <p> Indicates the type of anomaly associated with the label. </p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
    ///   - [`notes(Option<String>)`](crate::output::DescribeLabelOutput::notes): <p>Metadata providing additional information about the label.</p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
    ///   - [`equipment(Option<String>)`](crate::output::DescribeLabelOutput::equipment): <p> Indicates that a label pertains to a particular piece of equipment. </p>
    ///   - [`created_at(Option<DateTime>)`](crate::output::DescribeLabelOutput::created_at): <p> The time at which the label was created. </p>
    /// - On failure, responds with [`SdkError<DescribeLabelError>`](crate::error::DescribeLabelError)
    pub fn describe_label(&self) -> fluent_builders::DescribeLabel {
        fluent_builders::DescribeLabel::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DescribeLabelGroup`](crate::client::fluent_builders::DescribeLabelGroup) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::DescribeLabelGroup::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::DescribeLabelGroup::set_label_group_name): <p> Returns the name of the label group. </p>
    /// - On success, responds with [`DescribeLabelGroupOutput`](crate::output::DescribeLabelGroupOutput) with field(s):
    ///   - [`label_group_name(Option<String>)`](crate::output::DescribeLabelGroupOutput::label_group_name): <p> The name of the label group. </p>
    ///   - [`label_group_arn(Option<String>)`](crate::output::DescribeLabelGroupOutput::label_group_arn): <p> The ARN of the label group. </p>
    ///   - [`fault_codes(Option<Vec<String>>)`](crate::output::DescribeLabelGroupOutput::fault_codes): <p> Codes indicating the type of anomaly associated with the labels in the lagbel group. </p>
    ///   - [`created_at(Option<DateTime>)`](crate::output::DescribeLabelGroupOutput::created_at): <p> The time at which the label group was created. </p>
    ///   - [`updated_at(Option<DateTime>)`](crate::output::DescribeLabelGroupOutput::updated_at): <p> The time at which the label group was updated. </p>
    /// - On failure, responds with [`SdkError<DescribeLabelGroupError>`](crate::error::DescribeLabelGroupError)
    pub fn describe_label_group(&self) -> fluent_builders::DescribeLabelGroup {
        fluent_builders::DescribeLabelGroup::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`DescribeModel`](crate::client::fluent_builders::DescribeModel) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`model_name(impl Into<String>)`](crate::client::fluent_builders::DescribeModel::model_name) / [`set_model_name(Option<String>)`](crate::client::fluent_builders::DescribeModel::set_model_name): <p>The name of the ML model to be described. </p>
    /// - On success, responds with [`DescribeModelOutput`](crate::output::DescribeModelOutput) with field(s):
    ///   - [`model_name(Option<String>)`](crate::output::DescribeModelOutput::model_name): <p>The name of the ML model being described. </p>
    ///   - [`model_arn(Option<String>)`](crate::output::DescribeModelOutput::model_arn): <p>The Amazon Resource Name (ARN) of the ML model being described. </p>
    ///   - [`dataset_name(Option<String>)`](crate::output::DescribeModelOutput::dataset_name): <p>The name of the dataset being used by the ML being described. </p>
    ///   - [`dataset_arn(Option<String>)`](crate::output::DescribeModelOutput::dataset_arn): <p>The Amazon Resouce Name (ARN) of the dataset used to create the ML model being described. </p>
    ///   - [`schema(Option<String>)`](crate::output::DescribeModelOutput::schema): <p>A JSON description of the data that is in each time series dataset, including names, column names, and data types. </p>
    ///   - [`labels_input_configuration(Option<LabelsInputConfiguration>)`](crate::output::DescribeModelOutput::labels_input_configuration): <p>Specifies configuration information about the labels input, including its S3 location. </p>
    ///   - [`training_data_start_time(Option<DateTime>)`](crate::output::DescribeModelOutput::training_data_start_time): <p> Indicates the time reference in the dataset that was used to begin the subset of training data for the ML model. </p>
    ///   - [`training_data_end_time(Option<DateTime>)`](crate::output::DescribeModelOutput::training_data_end_time): <p> Indicates the time reference in the dataset that was used to end the subset of training data for the ML model. </p>
    ///   - [`evaluation_data_start_time(Option<DateTime>)`](crate::output::DescribeModelOutput::evaluation_data_start_time): <p> Indicates the time reference in the dataset that was used to begin the subset of evaluation data for the ML model. </p>
    ///   - [`evaluation_data_end_time(Option<DateTime>)`](crate::output::DescribeModelOutput::evaluation_data_end_time): <p> Indicates the time reference in the dataset that was used to end the subset of evaluation data for the ML model. </p>
    ///   - [`role_arn(Option<String>)`](crate::output::DescribeModelOutput::role_arn): <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the ML model being described. </p>
    ///   - [`data_pre_processing_configuration(Option<DataPreProcessingConfiguration>)`](crate::output::DescribeModelOutput::data_pre_processing_configuration): <p>The configuration is the <code>TargetSamplingRate</code>, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the <code>TargetSamplingRate</code> is 1 minute.</p>  <p>When providing a value for the <code>TargetSamplingRate</code>, you must attach the prefix "PT" to the rate you want. The value for a 1 second rate is therefore <i>PT1S</i>, the value for a 15 minute rate is <i>PT15M</i>, and the value for a 1 hour rate is <i>PT1H</i> </p>
    ///   - [`status(Option<ModelStatus>)`](crate::output::DescribeModelOutput::status): <p>Specifies the current status of the model being described. Status describes the status of the most recent action of the model. </p>
    ///   - [`training_execution_start_time(Option<DateTime>)`](crate::output::DescribeModelOutput::training_execution_start_time): <p>Indicates the time at which the training of the ML model began. </p>
    ///   - [`training_execution_end_time(Option<DateTime>)`](crate::output::DescribeModelOutput::training_execution_end_time): <p>Indicates the time at which the training of the ML model was completed. </p>
    ///   - [`failed_reason(Option<String>)`](crate::output::DescribeModelOutput::failed_reason): <p>If the training of the ML model failed, this indicates the reason for that failure. </p>
    ///   - [`model_metrics(Option<String>)`](crate::output::DescribeModelOutput::model_metrics): <p>The Model Metrics show an aggregated summary of the model's performance within the evaluation time range. This is the JSON content of the metrics created when evaluating the model. </p>
    ///   - [`last_updated_time(Option<DateTime>)`](crate::output::DescribeModelOutput::last_updated_time): <p>Indicates the last time the ML model was updated. The type of update is not specified. </p>
    ///   - [`created_at(Option<DateTime>)`](crate::output::DescribeModelOutput::created_at): <p>Indicates the time and date at which the ML model was created. </p>
    ///   - [`server_side_kms_key_id(Option<String>)`](crate::output::DescribeModelOutput::server_side_kms_key_id): <p>Provides the identifier of the KMS key used to encrypt model data by Amazon Lookout for Equipment. </p>
    ///   - [`off_condition(Option<String>)`](crate::output::DescribeModelOutput::off_condition): <p>Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.</p>
    /// - On failure, responds with [`SdkError<DescribeModelError>`](crate::error::DescribeModelError)
    pub fn describe_model(&self) -> fluent_builders::DescribeModel {
        fluent_builders::DescribeModel::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListDataIngestionJobs`](crate::client::fluent_builders::ListDataIngestionJobs) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListDataIngestionJobs::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`dataset_name(impl Into<String>)`](crate::client::fluent_builders::ListDataIngestionJobs::dataset_name) / [`set_dataset_name(Option<String>)`](crate::client::fluent_builders::ListDataIngestionJobs::set_dataset_name): <p>The name of the dataset being used for the data ingestion job. </p>
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListDataIngestionJobs::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListDataIngestionJobs::set_next_token): <p>An opaque pagination token indicating where to continue the listing of data ingestion jobs. </p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListDataIngestionJobs::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListDataIngestionJobs::set_max_results): <p> Specifies the maximum number of data ingestion jobs to list. </p>
    ///   - [`status(IngestionJobStatus)`](crate::client::fluent_builders::ListDataIngestionJobs::status) / [`set_status(Option<IngestionJobStatus>)`](crate::client::fluent_builders::ListDataIngestionJobs::set_status): <p>Indicates the status of the data ingestion job. </p>
    /// - On success, responds with [`ListDataIngestionJobsOutput`](crate::output::ListDataIngestionJobsOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListDataIngestionJobsOutput::next_token): <p> An opaque pagination token indicating where to continue the listing of data ingestion jobs. </p>
    ///   - [`data_ingestion_job_summaries(Option<Vec<DataIngestionJobSummary>>)`](crate::output::ListDataIngestionJobsOutput::data_ingestion_job_summaries): <p>Specifies information about the specific data ingestion job, including dataset name and status. </p>
    /// - On failure, responds with [`SdkError<ListDataIngestionJobsError>`](crate::error::ListDataIngestionJobsError)
    pub fn list_data_ingestion_jobs(&self) -> fluent_builders::ListDataIngestionJobs {
        fluent_builders::ListDataIngestionJobs::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListDatasets`](crate::client::fluent_builders::ListDatasets) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListDatasets::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListDatasets::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListDatasets::set_next_token): <p> An opaque pagination token indicating where to continue the listing of datasets. </p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListDatasets::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListDatasets::set_max_results): <p> Specifies the maximum number of datasets to list. </p>
    ///   - [`dataset_name_begins_with(impl Into<String>)`](crate::client::fluent_builders::ListDatasets::dataset_name_begins_with) / [`set_dataset_name_begins_with(Option<String>)`](crate::client::fluent_builders::ListDatasets::set_dataset_name_begins_with): <p>The beginning of the name of the datasets to be listed. </p>
    /// - On success, responds with [`ListDatasetsOutput`](crate::output::ListDatasetsOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListDatasetsOutput::next_token): <p> An opaque pagination token indicating where to continue the listing of datasets. </p>
    ///   - [`dataset_summaries(Option<Vec<DatasetSummary>>)`](crate::output::ListDatasetsOutput::dataset_summaries): <p>Provides information about the specified dataset, including creation time, dataset ARN, and status. </p>
    /// - On failure, responds with [`SdkError<ListDatasetsError>`](crate::error::ListDatasetsError)
    pub fn list_datasets(&self) -> fluent_builders::ListDatasets {
        fluent_builders::ListDatasets::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListInferenceEvents`](crate::client::fluent_builders::ListInferenceEvents) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListInferenceEvents::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListInferenceEvents::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListInferenceEvents::set_next_token): <p>An opaque pagination token indicating where to continue the listing of inference events.</p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListInferenceEvents::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListInferenceEvents::set_max_results): <p>Specifies the maximum number of inference events to list. </p>
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::ListInferenceEvents::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::ListInferenceEvents::set_inference_scheduler_name): <p>The name of the inference scheduler for the inference events listed. </p>
    ///   - [`interval_start_time(DateTime)`](crate::client::fluent_builders::ListInferenceEvents::interval_start_time) / [`set_interval_start_time(Option<DateTime>)`](crate::client::fluent_builders::ListInferenceEvents::set_interval_start_time): <p> Lookout for Equipment will return all the inference events with an end time equal to or greater than the start time given.</p>
    ///   - [`interval_end_time(DateTime)`](crate::client::fluent_builders::ListInferenceEvents::interval_end_time) / [`set_interval_end_time(Option<DateTime>)`](crate::client::fluent_builders::ListInferenceEvents::set_interval_end_time): <p>Returns all the inference events with an end start time equal to or greater than less than the end time given</p>
    /// - On success, responds with [`ListInferenceEventsOutput`](crate::output::ListInferenceEventsOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListInferenceEventsOutput::next_token): <p>An opaque pagination token indicating where to continue the listing of inference executions. </p>
    ///   - [`inference_event_summaries(Option<Vec<InferenceEventSummary>>)`](crate::output::ListInferenceEventsOutput::inference_event_summaries): <p>Provides an array of information about the individual inference events returned from the <code>ListInferenceEvents</code> operation, including scheduler used, event start time, event end time, diagnostics, and so on. </p>
    /// - On failure, responds with [`SdkError<ListInferenceEventsError>`](crate::error::ListInferenceEventsError)
    pub fn list_inference_events(&self) -> fluent_builders::ListInferenceEvents {
        fluent_builders::ListInferenceEvents::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListInferenceExecutions`](crate::client::fluent_builders::ListInferenceExecutions) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListInferenceExecutions::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListInferenceExecutions::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListInferenceExecutions::set_next_token): <p>An opaque pagination token indicating where to continue the listing of inference executions.</p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListInferenceExecutions::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListInferenceExecutions::set_max_results): <p>Specifies the maximum number of inference executions to list. </p>
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::ListInferenceExecutions::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::ListInferenceExecutions::set_inference_scheduler_name): <p>The name of the inference scheduler for the inference execution listed. </p>
    ///   - [`data_start_time_after(DateTime)`](crate::client::fluent_builders::ListInferenceExecutions::data_start_time_after) / [`set_data_start_time_after(Option<DateTime>)`](crate::client::fluent_builders::ListInferenceExecutions::set_data_start_time_after): <p>The time reference in the inferenced dataset after which Amazon Lookout for Equipment started the inference execution. </p>
    ///   - [`data_end_time_before(DateTime)`](crate::client::fluent_builders::ListInferenceExecutions::data_end_time_before) / [`set_data_end_time_before(Option<DateTime>)`](crate::client::fluent_builders::ListInferenceExecutions::set_data_end_time_before): <p>The time reference in the inferenced dataset before which Amazon Lookout for Equipment stopped the inference execution. </p>
    ///   - [`status(InferenceExecutionStatus)`](crate::client::fluent_builders::ListInferenceExecutions::status) / [`set_status(Option<InferenceExecutionStatus>)`](crate::client::fluent_builders::ListInferenceExecutions::set_status): <p>The status of the inference execution. </p>
    /// - On success, responds with [`ListInferenceExecutionsOutput`](crate::output::ListInferenceExecutionsOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListInferenceExecutionsOutput::next_token): <p> An opaque pagination token indicating where to continue the listing of inference executions. </p>
    ///   - [`inference_execution_summaries(Option<Vec<InferenceExecutionSummary>>)`](crate::output::ListInferenceExecutionsOutput::inference_execution_summaries): <p>Provides an array of information about the individual inference executions returned from the <code>ListInferenceExecutions</code> operation, including model used, inference scheduler, data configuration, and so on. </p>
    /// - On failure, responds with [`SdkError<ListInferenceExecutionsError>`](crate::error::ListInferenceExecutionsError)
    pub fn list_inference_executions(&self) -> fluent_builders::ListInferenceExecutions {
        fluent_builders::ListInferenceExecutions::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListInferenceSchedulers`](crate::client::fluent_builders::ListInferenceSchedulers) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListInferenceSchedulers::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListInferenceSchedulers::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListInferenceSchedulers::set_next_token): <p> An opaque pagination token indicating where to continue the listing of inference schedulers. </p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListInferenceSchedulers::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListInferenceSchedulers::set_max_results): <p> Specifies the maximum number of inference schedulers to list. </p>
    ///   - [`inference_scheduler_name_begins_with(impl Into<String>)`](crate::client::fluent_builders::ListInferenceSchedulers::inference_scheduler_name_begins_with) / [`set_inference_scheduler_name_begins_with(Option<String>)`](crate::client::fluent_builders::ListInferenceSchedulers::set_inference_scheduler_name_begins_with): <p>The beginning of the name of the inference schedulers to be listed. </p>
    ///   - [`model_name(impl Into<String>)`](crate::client::fluent_builders::ListInferenceSchedulers::model_name) / [`set_model_name(Option<String>)`](crate::client::fluent_builders::ListInferenceSchedulers::set_model_name): <p>The name of the ML model used by the inference scheduler to be listed. </p>
    ///   - [`status(InferenceSchedulerStatus)`](crate::client::fluent_builders::ListInferenceSchedulers::status) / [`set_status(Option<InferenceSchedulerStatus>)`](crate::client::fluent_builders::ListInferenceSchedulers::set_status): <p>Specifies the current status of the inference schedulers to list.</p>
    /// - On success, responds with [`ListInferenceSchedulersOutput`](crate::output::ListInferenceSchedulersOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListInferenceSchedulersOutput::next_token): <p> An opaque pagination token indicating where to continue the listing of inference schedulers. </p>
    ///   - [`inference_scheduler_summaries(Option<Vec<InferenceSchedulerSummary>>)`](crate::output::ListInferenceSchedulersOutput::inference_scheduler_summaries): <p>Provides information about the specified inference scheduler, including data upload frequency, model name and ARN, and status. </p>
    /// - On failure, responds with [`SdkError<ListInferenceSchedulersError>`](crate::error::ListInferenceSchedulersError)
    pub fn list_inference_schedulers(&self) -> fluent_builders::ListInferenceSchedulers {
        fluent_builders::ListInferenceSchedulers::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListLabelGroups`](crate::client::fluent_builders::ListLabelGroups) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListLabelGroups::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name_begins_with(impl Into<String>)`](crate::client::fluent_builders::ListLabelGroups::label_group_name_begins_with) / [`set_label_group_name_begins_with(Option<String>)`](crate::client::fluent_builders::ListLabelGroups::set_label_group_name_begins_with): <p> The beginning of the name of the label groups to be listed. </p>
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListLabelGroups::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListLabelGroups::set_next_token): <p> An opaque pagination token indicating where to continue the listing of label groups. </p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListLabelGroups::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListLabelGroups::set_max_results): <p> Specifies the maximum number of label groups to list. </p>
    /// - On success, responds with [`ListLabelGroupsOutput`](crate::output::ListLabelGroupsOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListLabelGroupsOutput::next_token): <p> An opaque pagination token indicating where to continue the listing of label groups. </p>
    ///   - [`label_group_summaries(Option<Vec<LabelGroupSummary>>)`](crate::output::ListLabelGroupsOutput::label_group_summaries): <p> A summary of the label groups. </p>
    /// - On failure, responds with [`SdkError<ListLabelGroupsError>`](crate::error::ListLabelGroupsError)
    pub fn list_label_groups(&self) -> fluent_builders::ListLabelGroups {
        fluent_builders::ListLabelGroups::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListLabels`](crate::client::fluent_builders::ListLabels) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListLabels::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::ListLabels::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::ListLabels::set_label_group_name): <p> Retruns the name of the label group. </p>
    ///   - [`interval_start_time(DateTime)`](crate::client::fluent_builders::ListLabels::interval_start_time) / [`set_interval_start_time(Option<DateTime>)`](crate::client::fluent_builders::ListLabels::set_interval_start_time): <p> Returns all the labels with a end time equal to or later than the start time given. </p>
    ///   - [`interval_end_time(DateTime)`](crate::client::fluent_builders::ListLabels::interval_end_time) / [`set_interval_end_time(Option<DateTime>)`](crate::client::fluent_builders::ListLabels::set_interval_end_time): <p> Returns all labels with a start time earlier than the end time given. </p>
    ///   - [`fault_code(impl Into<String>)`](crate::client::fluent_builders::ListLabels::fault_code) / [`set_fault_code(Option<String>)`](crate::client::fluent_builders::ListLabels::set_fault_code): <p> Returns labels with a particular fault code. </p>
    ///   - [`equipment(impl Into<String>)`](crate::client::fluent_builders::ListLabels::equipment) / [`set_equipment(Option<String>)`](crate::client::fluent_builders::ListLabels::set_equipment): <p> Lists the labels that pertain to a particular piece of equipment. </p>
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListLabels::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListLabels::set_next_token): <p> An opaque pagination token indicating where to continue the listing of label groups. </p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListLabels::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListLabels::set_max_results): <p> Specifies the maximum number of labels to list. </p>
    /// - On success, responds with [`ListLabelsOutput`](crate::output::ListLabelsOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListLabelsOutput::next_token): <p> An opaque pagination token indicating where to continue the listing of datasets. </p>
    ///   - [`label_summaries(Option<Vec<LabelSummary>>)`](crate::output::ListLabelsOutput::label_summaries): <p> A summary of the items in the label group. </p>
    /// - On failure, responds with [`SdkError<ListLabelsError>`](crate::error::ListLabelsError)
    pub fn list_labels(&self) -> fluent_builders::ListLabels {
        fluent_builders::ListLabels::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListModels`](crate::client::fluent_builders::ListModels) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListModels::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListModels::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListModels::set_next_token): <p> An opaque pagination token indicating where to continue the listing of ML models. </p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListModels::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListModels::set_max_results): <p> Specifies the maximum number of ML models to list. </p>
    ///   - [`status(ModelStatus)`](crate::client::fluent_builders::ListModels::status) / [`set_status(Option<ModelStatus>)`](crate::client::fluent_builders::ListModels::set_status): <p>The status of the ML model. </p>
    ///   - [`model_name_begins_with(impl Into<String>)`](crate::client::fluent_builders::ListModels::model_name_begins_with) / [`set_model_name_begins_with(Option<String>)`](crate::client::fluent_builders::ListModels::set_model_name_begins_with): <p>The beginning of the name of the ML models being listed. </p>
    ///   - [`dataset_name_begins_with(impl Into<String>)`](crate::client::fluent_builders::ListModels::dataset_name_begins_with) / [`set_dataset_name_begins_with(Option<String>)`](crate::client::fluent_builders::ListModels::set_dataset_name_begins_with): <p>The beginning of the name of the dataset of the ML models to be listed. </p>
    /// - On success, responds with [`ListModelsOutput`](crate::output::ListModelsOutput) with field(s):
    ///   - [`next_token(Option<String>)`](crate::output::ListModelsOutput::next_token): <p> An opaque pagination token indicating where to continue the listing of ML models. </p>
    ///   - [`model_summaries(Option<Vec<ModelSummary>>)`](crate::output::ListModelsOutput::model_summaries): <p>Provides information on the specified model, including created time, model and dataset ARNs, and status. </p>
    /// - On failure, responds with [`SdkError<ListModelsError>`](crate::error::ListModelsError)
    pub fn list_models(&self) -> fluent_builders::ListModels {
        fluent_builders::ListModels::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListSensorStatistics`](crate::client::fluent_builders::ListSensorStatistics) operation.
    /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListSensorStatistics::into_paginator).
    ///
    /// - The fluent builder is configurable:
    ///   - [`dataset_name(impl Into<String>)`](crate::client::fluent_builders::ListSensorStatistics::dataset_name) / [`set_dataset_name(Option<String>)`](crate::client::fluent_builders::ListSensorStatistics::set_dataset_name): <p> The name of the dataset associated with the list of Sensor Statistics. </p>
    ///   - [`ingestion_job_id(impl Into<String>)`](crate::client::fluent_builders::ListSensorStatistics::ingestion_job_id) / [`set_ingestion_job_id(Option<String>)`](crate::client::fluent_builders::ListSensorStatistics::set_ingestion_job_id): <p> The ingestion job id associated with the list of Sensor Statistics. To get sensor statistics for a particular ingestion job id, both dataset name and ingestion job id must be submitted as inputs. </p>
    ///   - [`max_results(i32)`](crate::client::fluent_builders::ListSensorStatistics::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListSensorStatistics::set_max_results): <p>Specifies the maximum number of sensors for which to retrieve statistics. </p>
    ///   - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListSensorStatistics::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListSensorStatistics::set_next_token): <p>An opaque pagination token indicating where to continue the listing of sensor statistics. </p>
    /// - On success, responds with [`ListSensorStatisticsOutput`](crate::output::ListSensorStatisticsOutput) with field(s):
    ///   - [`sensor_statistics_summaries(Option<Vec<SensorStatisticsSummary>>)`](crate::output::ListSensorStatisticsOutput::sensor_statistics_summaries): <p>Provides ingestion-based statistics regarding the specified sensor with respect to various validation types, such as whether data exists, the number and percentage of missing values, and the number and percentage of duplicate timestamps. </p>
    ///   - [`next_token(Option<String>)`](crate::output::ListSensorStatisticsOutput::next_token): <p>An opaque pagination token indicating where to continue the listing of sensor statistics. </p>
    /// - On failure, responds with [`SdkError<ListSensorStatisticsError>`](crate::error::ListSensorStatisticsError)
    pub fn list_sensor_statistics(&self) -> fluent_builders::ListSensorStatistics {
        fluent_builders::ListSensorStatistics::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::ListTagsForResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::ListTagsForResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the resource (such as the dataset or model) that is the focus of the <code>ListTagsForResource</code> operation. </p>
    /// - On success, responds with [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) with field(s):
    ///   - [`tags(Option<Vec<Tag>>)`](crate::output::ListTagsForResourceOutput::tags): <p> Any tags associated with the resource. </p>
    /// - On failure, responds with [`SdkError<ListTagsForResourceError>`](crate::error::ListTagsForResourceError)
    pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource {
        fluent_builders::ListTagsForResource::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`StartDataIngestionJob`](crate::client::fluent_builders::StartDataIngestionJob) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`dataset_name(impl Into<String>)`](crate::client::fluent_builders::StartDataIngestionJob::dataset_name) / [`set_dataset_name(Option<String>)`](crate::client::fluent_builders::StartDataIngestionJob::set_dataset_name): <p>The name of the dataset being used by the data ingestion job. </p>
    ///   - [`ingestion_input_configuration(IngestionInputConfiguration)`](crate::client::fluent_builders::StartDataIngestionJob::ingestion_input_configuration) / [`set_ingestion_input_configuration(Option<IngestionInputConfiguration>)`](crate::client::fluent_builders::StartDataIngestionJob::set_ingestion_input_configuration): <p> Specifies information for the input data for the data ingestion job, including dataset S3 location. </p>
    ///   - [`role_arn(impl Into<String>)`](crate::client::fluent_builders::StartDataIngestionJob::role_arn) / [`set_role_arn(Option<String>)`](crate::client::fluent_builders::StartDataIngestionJob::set_role_arn): <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the data ingestion job. </p>
    ///   - [`client_token(impl Into<String>)`](crate::client::fluent_builders::StartDataIngestionJob::client_token) / [`set_client_token(Option<String>)`](crate::client::fluent_builders::StartDataIngestionJob::set_client_token): <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
    /// - On success, responds with [`StartDataIngestionJobOutput`](crate::output::StartDataIngestionJobOutput) with field(s):
    ///   - [`job_id(Option<String>)`](crate::output::StartDataIngestionJobOutput::job_id): <p>Indicates the job ID of the data ingestion job. </p>
    ///   - [`status(Option<IngestionJobStatus>)`](crate::output::StartDataIngestionJobOutput::status): <p>Indicates the status of the <code>StartDataIngestionJob</code> operation. </p>
    /// - On failure, responds with [`SdkError<StartDataIngestionJobError>`](crate::error::StartDataIngestionJobError)
    pub fn start_data_ingestion_job(&self) -> fluent_builders::StartDataIngestionJob {
        fluent_builders::StartDataIngestionJob::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`StartInferenceScheduler`](crate::client::fluent_builders::StartInferenceScheduler) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::StartInferenceScheduler::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::StartInferenceScheduler::set_inference_scheduler_name): <p>The name of the inference scheduler to be started. </p>
    /// - On success, responds with [`StartInferenceSchedulerOutput`](crate::output::StartInferenceSchedulerOutput) with field(s):
    ///   - [`model_arn(Option<String>)`](crate::output::StartInferenceSchedulerOutput::model_arn): <p>The Amazon Resource Name (ARN) of the ML model being used by the inference scheduler. </p>
    ///   - [`model_name(Option<String>)`](crate::output::StartInferenceSchedulerOutput::model_name): <p>The name of the ML model being used by the inference scheduler. </p>
    ///   - [`inference_scheduler_name(Option<String>)`](crate::output::StartInferenceSchedulerOutput::inference_scheduler_name): <p>The name of the inference scheduler being started. </p>
    ///   - [`inference_scheduler_arn(Option<String>)`](crate::output::StartInferenceSchedulerOutput::inference_scheduler_arn): <p>The Amazon Resource Name (ARN) of the inference scheduler being started. </p>
    ///   - [`status(Option<InferenceSchedulerStatus>)`](crate::output::StartInferenceSchedulerOutput::status): <p>Indicates the status of the inference scheduler. </p>
    /// - On failure, responds with [`SdkError<StartInferenceSchedulerError>`](crate::error::StartInferenceSchedulerError)
    pub fn start_inference_scheduler(&self) -> fluent_builders::StartInferenceScheduler {
        fluent_builders::StartInferenceScheduler::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`StopInferenceScheduler`](crate::client::fluent_builders::StopInferenceScheduler) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::StopInferenceScheduler::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::StopInferenceScheduler::set_inference_scheduler_name): <p>The name of the inference scheduler to be stopped. </p>
    /// - On success, responds with [`StopInferenceSchedulerOutput`](crate::output::StopInferenceSchedulerOutput) with field(s):
    ///   - [`model_arn(Option<String>)`](crate::output::StopInferenceSchedulerOutput::model_arn): <p>The Amazon Resource Name (ARN) of the ML model used by the inference scheduler being stopped. </p>
    ///   - [`model_name(Option<String>)`](crate::output::StopInferenceSchedulerOutput::model_name): <p>The name of the ML model used by the inference scheduler being stopped. </p>
    ///   - [`inference_scheduler_name(Option<String>)`](crate::output::StopInferenceSchedulerOutput::inference_scheduler_name): <p>The name of the inference scheduler being stopped. </p>
    ///   - [`inference_scheduler_arn(Option<String>)`](crate::output::StopInferenceSchedulerOutput::inference_scheduler_arn): <p>The Amazon Resource Name (ARN) of the inference schedule being stopped. </p>
    ///   - [`status(Option<InferenceSchedulerStatus>)`](crate::output::StopInferenceSchedulerOutput::status): <p>Indicates the status of the inference scheduler. </p>
    /// - On failure, responds with [`SdkError<StopInferenceSchedulerError>`](crate::error::StopInferenceSchedulerError)
    pub fn stop_inference_scheduler(&self) -> fluent_builders::StopInferenceScheduler {
        fluent_builders::StopInferenceScheduler::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`TagResource`](crate::client::fluent_builders::TagResource) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::TagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::TagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the specific resource to which the tag should be associated. </p>
    ///   - [`tags(Vec<Tag>)`](crate::client::fluent_builders::TagResource::tags) / [`set_tags(Option<Vec<Tag>>)`](crate::client::fluent_builders::TagResource::set_tags): <p>The tag or tags to be associated with a specific resource. Both the tag key and value are specified. </p>
    /// - On success, responds with [`TagResourceOutput`](crate::output::TagResourceOutput)

    /// - On failure, responds with [`SdkError<TagResourceError>`](crate::error::TagResourceError)
    pub fn tag_resource(&self) -> fluent_builders::TagResource {
        fluent_builders::TagResource::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`UntagResource`](crate::client::fluent_builders::UntagResource) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::UntagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::UntagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the resource to which the tag is currently associated. </p>
    ///   - [`tag_keys(Vec<String>)`](crate::client::fluent_builders::UntagResource::tag_keys) / [`set_tag_keys(Option<Vec<String>>)`](crate::client::fluent_builders::UntagResource::set_tag_keys): <p>Specifies the key of the tag to be removed from a specified resource. </p>
    /// - On success, responds with [`UntagResourceOutput`](crate::output::UntagResourceOutput)

    /// - On failure, responds with [`SdkError<UntagResourceError>`](crate::error::UntagResourceError)
    pub fn untag_resource(&self) -> fluent_builders::UntagResource {
        fluent_builders::UntagResource::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`UpdateInferenceScheduler`](crate::client::fluent_builders::UpdateInferenceScheduler) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`inference_scheduler_name(impl Into<String>)`](crate::client::fluent_builders::UpdateInferenceScheduler::inference_scheduler_name) / [`set_inference_scheduler_name(Option<String>)`](crate::client::fluent_builders::UpdateInferenceScheduler::set_inference_scheduler_name): <p>The name of the inference scheduler to be updated. </p>
    ///   - [`data_delay_offset_in_minutes(i64)`](crate::client::fluent_builders::UpdateInferenceScheduler::data_delay_offset_in_minutes) / [`set_data_delay_offset_in_minutes(Option<i64>)`](crate::client::fluent_builders::UpdateInferenceScheduler::set_data_delay_offset_in_minutes): <p> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.</p>
    ///   - [`data_upload_frequency(DataUploadFrequency)`](crate::client::fluent_builders::UpdateInferenceScheduler::data_upload_frequency) / [`set_data_upload_frequency(Option<DataUploadFrequency>)`](crate::client::fluent_builders::UpdateInferenceScheduler::set_data_upload_frequency): <p>How often data is uploaded to the source S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes. </p>
    ///   - [`data_input_configuration(InferenceInputConfiguration)`](crate::client::fluent_builders::UpdateInferenceScheduler::data_input_configuration) / [`set_data_input_configuration(Option<InferenceInputConfiguration>)`](crate::client::fluent_builders::UpdateInferenceScheduler::set_data_input_configuration): <p> Specifies information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>
    ///   - [`data_output_configuration(InferenceOutputConfiguration)`](crate::client::fluent_builders::UpdateInferenceScheduler::data_output_configuration) / [`set_data_output_configuration(Option<InferenceOutputConfiguration>)`](crate::client::fluent_builders::UpdateInferenceScheduler::set_data_output_configuration): <p> Specifies information for the output results from the inference scheduler, including the output S3 location. </p>
    ///   - [`role_arn(impl Into<String>)`](crate::client::fluent_builders::UpdateInferenceScheduler::role_arn) / [`set_role_arn(Option<String>)`](crate::client::fluent_builders::UpdateInferenceScheduler::set_role_arn): <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler. </p>
    /// - On success, responds with [`UpdateInferenceSchedulerOutput`](crate::output::UpdateInferenceSchedulerOutput)

    /// - On failure, responds with [`SdkError<UpdateInferenceSchedulerError>`](crate::error::UpdateInferenceSchedulerError)
    pub fn update_inference_scheduler(&self) -> fluent_builders::UpdateInferenceScheduler {
        fluent_builders::UpdateInferenceScheduler::new(self.handle.clone())
    }
    /// Constructs a fluent builder for the [`UpdateLabelGroup`](crate::client::fluent_builders::UpdateLabelGroup) operation.
    ///
    /// - The fluent builder is configurable:
    ///   - [`label_group_name(impl Into<String>)`](crate::client::fluent_builders::UpdateLabelGroup::label_group_name) / [`set_label_group_name(Option<String>)`](crate::client::fluent_builders::UpdateLabelGroup::set_label_group_name): <p> The name of the label group to be updated. </p>
    ///   - [`fault_codes(Vec<String>)`](crate::client::fluent_builders::UpdateLabelGroup::fault_codes) / [`set_fault_codes(Option<Vec<String>>)`](crate::client::fluent_builders::UpdateLabelGroup::set_fault_codes): <p> Updates the code indicating the type of anomaly associated with the label. </p>  <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
    /// - On success, responds with [`UpdateLabelGroupOutput`](crate::output::UpdateLabelGroupOutput)

    /// - On failure, responds with [`SdkError<UpdateLabelGroupError>`](crate::error::UpdateLabelGroupError)
    pub fn update_label_group(&self) -> fluent_builders::UpdateLabelGroup {
        fluent_builders::UpdateLabelGroup::new(self.handle.clone())
    }
}
pub mod fluent_builders {

    //! Utilities to ergonomically construct a request to the service.
    //!
    //! Fluent builders are created through the [`Client`](crate::client::Client) by calling
    //! one if its operation methods. After parameters are set using the builder methods,
    //! the `send` method can be called to initiate the request.
    /// Fluent builder constructing a request to `CreateDataset`.
    ///
    /// <p>Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. In other words, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct CreateDataset {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::create_dataset_input::Builder,
    }
    impl CreateDataset {
        /// Creates a new `CreateDataset`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::CreateDataset,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::CreateDatasetError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::CreateDatasetOutput,
            aws_smithy_http::result::SdkError<crate::error::CreateDatasetError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the dataset being created. </p>
        pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name(input.into());
            self
        }
        /// <p>The name of the dataset being created. </p>
        pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_dataset_name(input);
            self
        }
        /// <p>A JSON description of the data that is in each time series dataset, including names, column names, and data types. </p>
        pub fn dataset_schema(mut self, input: crate::model::DatasetSchema) -> Self {
            self.inner = self.inner.dataset_schema(input);
            self
        }
        /// <p>A JSON description of the data that is in each time series dataset, including names, column names, and data types. </p>
        pub fn set_dataset_schema(
            mut self,
            input: std::option::Option<crate::model::DatasetSchema>,
        ) -> Self {
            self.inner = self.inner.set_dataset_schema(input);
            self
        }
        /// <p>Provides the identifier of the KMS key used to encrypt dataset data by Amazon Lookout for Equipment. </p>
        pub fn server_side_kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.server_side_kms_key_id(input.into());
            self
        }
        /// <p>Provides the identifier of the KMS key used to encrypt dataset data by Amazon Lookout for Equipment. </p>
        pub fn set_server_side_kms_key_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_server_side_kms_key_id(input);
            self
        }
        /// <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.client_token(input.into());
            self
        }
        /// <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_client_token(input);
            self
        }
        /// Appends an item to `Tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>Any tags associated with the ingested data described in the dataset. </p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            self.inner = self.inner.tags(input);
            self
        }
        /// <p>Any tags associated with the ingested data described in the dataset. </p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.inner = self.inner.set_tags(input);
            self
        }
    }
    /// Fluent builder constructing a request to `CreateInferenceScheduler`.
    ///
    /// <p> Creates a scheduled inference. Scheduling an inference is setting up a continuous real-time inference plan to analyze new measurement data. When setting up the schedule, you provide an S3 bucket location for the input data, assign it a delimiter between separate entries in the data, set an offset delay if desired, and set the frequency of inferencing. You must also provide an S3 bucket location for the output data. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct CreateInferenceScheduler {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::create_inference_scheduler_input::Builder,
    }
    impl CreateInferenceScheduler {
        /// Creates a new `CreateInferenceScheduler`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::CreateInferenceScheduler,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::CreateInferenceSchedulerError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::CreateInferenceSchedulerOutput,
            aws_smithy_http::result::SdkError<crate::error::CreateInferenceSchedulerError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the previously trained ML model being used to create the inference scheduler. </p>
        pub fn model_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.model_name(input.into());
            self
        }
        /// <p>The name of the previously trained ML model being used to create the inference scheduler. </p>
        pub fn set_model_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_model_name(input);
            self
        }
        /// <p>The name of the inference scheduler being created. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler being created. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
        /// <p>The interval (in minutes) of planned delay at the start of each inference segment. For example, if inference is set to run every ten minutes, the delay is set to five minutes and the time is 09:08. The inference scheduler will wake up at the configured interval (which, without a delay configured, would be 09:10) plus the additional five minute delay time (so 09:15) to check your Amazon S3 bucket. The delay provides a buffer for you to upload data at the same frequency, so that you don't have to stop and restart the scheduler when uploading new data.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html">Understanding the inference process</a>.</p>
        pub fn data_delay_offset_in_minutes(mut self, input: i64) -> Self {
            self.inner = self.inner.data_delay_offset_in_minutes(input);
            self
        }
        /// <p>The interval (in minutes) of planned delay at the start of each inference segment. For example, if inference is set to run every ten minutes, the delay is set to five minutes and the time is 09:08. The inference scheduler will wake up at the configured interval (which, without a delay configured, would be 09:10) plus the additional five minute delay time (so 09:15) to check your Amazon S3 bucket. The delay provides a buffer for you to upload data at the same frequency, so that you don't have to stop and restart the scheduler when uploading new data.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html">Understanding the inference process</a>.</p>
        pub fn set_data_delay_offset_in_minutes(mut self, input: std::option::Option<i64>) -> Self {
            self.inner = self.inner.set_data_delay_offset_in_minutes(input);
            self
        }
        /// <p> How often data is uploaded to the source Amazon S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment runs inference on your data.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html">Understanding the inference process</a>.</p>
        pub fn data_upload_frequency(mut self, input: crate::model::DataUploadFrequency) -> Self {
            self.inner = self.inner.data_upload_frequency(input);
            self
        }
        /// <p> How often data is uploaded to the source Amazon S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment runs inference on your data.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html">Understanding the inference process</a>.</p>
        pub fn set_data_upload_frequency(
            mut self,
            input: std::option::Option<crate::model::DataUploadFrequency>,
        ) -> Self {
            self.inner = self.inner.set_data_upload_frequency(input);
            self
        }
        /// <p>Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>
        pub fn data_input_configuration(
            mut self,
            input: crate::model::InferenceInputConfiguration,
        ) -> Self {
            self.inner = self.inner.data_input_configuration(input);
            self
        }
        /// <p>Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>
        pub fn set_data_input_configuration(
            mut self,
            input: std::option::Option<crate::model::InferenceInputConfiguration>,
        ) -> Self {
            self.inner = self.inner.set_data_input_configuration(input);
            self
        }
        /// <p>Specifies configuration information for the output results for the inference scheduler, including the S3 location for the output. </p>
        pub fn data_output_configuration(
            mut self,
            input: crate::model::InferenceOutputConfiguration,
        ) -> Self {
            self.inner = self.inner.data_output_configuration(input);
            self
        }
        /// <p>Specifies configuration information for the output results for the inference scheduler, including the S3 location for the output. </p>
        pub fn set_data_output_configuration(
            mut self,
            input: std::option::Option<crate::model::InferenceOutputConfiguration>,
        ) -> Self {
            self.inner = self.inner.set_data_output_configuration(input);
            self
        }
        /// <p>The Amazon Resource Name (ARN) of a role with permission to access the data source being used for the inference. </p>
        pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.role_arn(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) of a role with permission to access the data source being used for the inference. </p>
        pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_role_arn(input);
            self
        }
        /// <p>Provides the identifier of the KMS key used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>
        pub fn server_side_kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.server_side_kms_key_id(input.into());
            self
        }
        /// <p>Provides the identifier of the KMS key used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>
        pub fn set_server_side_kms_key_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_server_side_kms_key_id(input);
            self
        }
        /// <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.client_token(input.into());
            self
        }
        /// <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_client_token(input);
            self
        }
        /// Appends an item to `Tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>Any tags associated with the inference scheduler. </p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            self.inner = self.inner.tags(input);
            self
        }
        /// <p>Any tags associated with the inference scheduler. </p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.inner = self.inner.set_tags(input);
            self
        }
    }
    /// Fluent builder constructing a request to `CreateLabel`.
    ///
    /// <p> Creates a label for an event. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct CreateLabel {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::create_label_input::Builder,
    }
    impl CreateLabel {
        /// Creates a new `CreateLabel`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::CreateLabel,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::CreateLabelError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::CreateLabelOutput,
            aws_smithy_http::result::SdkError<crate::error::CreateLabelError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p> The name of a group of labels. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> The name of a group of labels. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
        /// <p> The start time of the labeled event. </p>
        pub fn start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.start_time(input);
            self
        }
        /// <p> The start time of the labeled event. </p>
        pub fn set_start_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_start_time(input);
            self
        }
        /// <p> The end time of the labeled event. </p>
        pub fn end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.end_time(input);
            self
        }
        /// <p> The end time of the labeled event. </p>
        pub fn set_end_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_end_time(input);
            self
        }
        /// <p> Indicates whether a labeled event represents an anomaly. </p>
        pub fn rating(mut self, input: crate::model::LabelRating) -> Self {
            self.inner = self.inner.rating(input);
            self
        }
        /// <p> Indicates whether a labeled event represents an anomaly. </p>
        pub fn set_rating(mut self, input: std::option::Option<crate::model::LabelRating>) -> Self {
            self.inner = self.inner.set_rating(input);
            self
        }
        /// <p> Provides additional information about the label. The fault code must be defined in the FaultCodes attribute of the label group.</p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn fault_code(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.fault_code(input.into());
            self
        }
        /// <p> Provides additional information about the label. The fault code must be defined in the FaultCodes attribute of the label group.</p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn set_fault_code(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_fault_code(input);
            self
        }
        /// <p> Metadata providing additional information about the label. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn notes(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.notes(input.into());
            self
        }
        /// <p> Metadata providing additional information about the label. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn set_notes(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_notes(input);
            self
        }
        /// <p> Indicates that a label pertains to a particular piece of equipment. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn equipment(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.equipment(input.into());
            self
        }
        /// <p> Indicates that a label pertains to a particular piece of equipment. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn set_equipment(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_equipment(input);
            self
        }
        /// <p> A unique identifier for the request to create a label. If you do not set the client request token, Lookout for Equipment generates one. </p>
        pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.client_token(input.into());
            self
        }
        /// <p> A unique identifier for the request to create a label. If you do not set the client request token, Lookout for Equipment generates one. </p>
        pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_client_token(input);
            self
        }
    }
    /// Fluent builder constructing a request to `CreateLabelGroup`.
    ///
    /// <p> Creates a group of labels. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct CreateLabelGroup {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::create_label_group_input::Builder,
    }
    impl CreateLabelGroup {
        /// Creates a new `CreateLabelGroup`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::CreateLabelGroup,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::CreateLabelGroupError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::CreateLabelGroupOutput,
            aws_smithy_http::result::SdkError<crate::error::CreateLabelGroupError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p> Names a group of labels.</p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> Names a group of labels.</p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
        /// Appends an item to `FaultCodes`.
        ///
        /// To override the contents of this collection use [`set_fault_codes`](Self::set_fault_codes).
        ///
        /// <p> The acceptable fault codes (indicating the type of anomaly associated with the label) that can be used with this label group.</p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn fault_codes(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.fault_codes(input.into());
            self
        }
        /// <p> The acceptable fault codes (indicating the type of anomaly associated with the label) that can be used with this label group.</p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn set_fault_codes(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.inner = self.inner.set_fault_codes(input);
            self
        }
        /// <p> A unique identifier for the request to create a label group. If you do not set the client request token, Lookout for Equipment generates one. </p>
        pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.client_token(input.into());
            self
        }
        /// <p> A unique identifier for the request to create a label group. If you do not set the client request token, Lookout for Equipment generates one. </p>
        pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_client_token(input);
            self
        }
        /// Appends an item to `Tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p> Tags that provide metadata about the label group you are creating. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            self.inner = self.inner.tags(input);
            self
        }
        /// <p> Tags that provide metadata about the label group you are creating. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.inner = self.inner.set_tags(input);
            self
        }
    }
    /// Fluent builder constructing a request to `CreateModel`.
    ///
    /// <p>Creates an ML model for data inference. </p>
    /// <p>A machine-learning (ML) model is a mathematical model that finds patterns in your data. In Amazon Lookout for Equipment, the model learns the patterns of normal behavior and detects abnormal behavior that could be potential equipment failure (or maintenance events). The models are made by analyzing normal data and abnormalities in machine behavior that have already occurred.</p>
    /// <p>Your model is trained using a portion of the data from your dataset and uses that data to learn patterns of normal behavior and abnormal patterns that lead to equipment failure. Another portion of the data is used to evaluate the model's accuracy. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct CreateModel {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::create_model_input::Builder,
    }
    impl CreateModel {
        /// Creates a new `CreateModel`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::CreateModel,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::CreateModelError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::CreateModelOutput,
            aws_smithy_http::result::SdkError<crate::error::CreateModelError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name for the ML model to be created.</p>
        pub fn model_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.model_name(input.into());
            self
        }
        /// <p>The name for the ML model to be created.</p>
        pub fn set_model_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_model_name(input);
            self
        }
        /// <p>The name of the dataset for the ML model being created. </p>
        pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name(input.into());
            self
        }
        /// <p>The name of the dataset for the ML model being created. </p>
        pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_dataset_name(input);
            self
        }
        /// <p>The data schema for the ML model being created. </p>
        pub fn dataset_schema(mut self, input: crate::model::DatasetSchema) -> Self {
            self.inner = self.inner.dataset_schema(input);
            self
        }
        /// <p>The data schema for the ML model being created. </p>
        pub fn set_dataset_schema(
            mut self,
            input: std::option::Option<crate::model::DatasetSchema>,
        ) -> Self {
            self.inner = self.inner.set_dataset_schema(input);
            self
        }
        /// <p>The input configuration for the labels being used for the ML model that's being created. </p>
        pub fn labels_input_configuration(
            mut self,
            input: crate::model::LabelsInputConfiguration,
        ) -> Self {
            self.inner = self.inner.labels_input_configuration(input);
            self
        }
        /// <p>The input configuration for the labels being used for the ML model that's being created. </p>
        pub fn set_labels_input_configuration(
            mut self,
            input: std::option::Option<crate::model::LabelsInputConfiguration>,
        ) -> Self {
            self.inner = self.inner.set_labels_input_configuration(input);
            self
        }
        /// <p>A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.client_token(input.into());
            self
        }
        /// <p>A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_client_token(input);
            self
        }
        /// <p>Indicates the time reference in the dataset that should be used to begin the subset of training data for the ML model. </p>
        pub fn training_data_start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.training_data_start_time(input);
            self
        }
        /// <p>Indicates the time reference in the dataset that should be used to begin the subset of training data for the ML model. </p>
        pub fn set_training_data_start_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_training_data_start_time(input);
            self
        }
        /// <p>Indicates the time reference in the dataset that should be used to end the subset of training data for the ML model. </p>
        pub fn training_data_end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.training_data_end_time(input);
            self
        }
        /// <p>Indicates the time reference in the dataset that should be used to end the subset of training data for the ML model. </p>
        pub fn set_training_data_end_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_training_data_end_time(input);
            self
        }
        /// <p>Indicates the time reference in the dataset that should be used to begin the subset of evaluation data for the ML model. </p>
        pub fn evaluation_data_start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.evaluation_data_start_time(input);
            self
        }
        /// <p>Indicates the time reference in the dataset that should be used to begin the subset of evaluation data for the ML model. </p>
        pub fn set_evaluation_data_start_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_evaluation_data_start_time(input);
            self
        }
        /// <p> Indicates the time reference in the dataset that should be used to end the subset of evaluation data for the ML model. </p>
        pub fn evaluation_data_end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.evaluation_data_end_time(input);
            self
        }
        /// <p> Indicates the time reference in the dataset that should be used to end the subset of evaluation data for the ML model. </p>
        pub fn set_evaluation_data_end_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_evaluation_data_end_time(input);
            self
        }
        /// <p> The Amazon Resource Name (ARN) of a role with permission to access the data source being used to create the ML model. </p>
        pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.role_arn(input.into());
            self
        }
        /// <p> The Amazon Resource Name (ARN) of a role with permission to access the data source being used to create the ML model. </p>
        pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_role_arn(input);
            self
        }
        /// <p>The configuration is the <code>TargetSamplingRate</code>, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the <code>TargetSamplingRate</code> is 1 minute.</p>
        /// <p>When providing a value for the <code>TargetSamplingRate</code>, you must attach the prefix "PT" to the rate you want. The value for a 1 second rate is therefore <i>PT1S</i>, the value for a 15 minute rate is <i>PT15M</i>, and the value for a 1 hour rate is <i>PT1H</i> </p>
        pub fn data_pre_processing_configuration(
            mut self,
            input: crate::model::DataPreProcessingConfiguration,
        ) -> Self {
            self.inner = self.inner.data_pre_processing_configuration(input);
            self
        }
        /// <p>The configuration is the <code>TargetSamplingRate</code>, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the <code>TargetSamplingRate</code> is 1 minute.</p>
        /// <p>When providing a value for the <code>TargetSamplingRate</code>, you must attach the prefix "PT" to the rate you want. The value for a 1 second rate is therefore <i>PT1S</i>, the value for a 15 minute rate is <i>PT15M</i>, and the value for a 1 hour rate is <i>PT1H</i> </p>
        pub fn set_data_pre_processing_configuration(
            mut self,
            input: std::option::Option<crate::model::DataPreProcessingConfiguration>,
        ) -> Self {
            self.inner = self.inner.set_data_pre_processing_configuration(input);
            self
        }
        /// <p>Provides the identifier of the KMS key used to encrypt model data by Amazon Lookout for Equipment. </p>
        pub fn server_side_kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.server_side_kms_key_id(input.into());
            self
        }
        /// <p>Provides the identifier of the KMS key used to encrypt model data by Amazon Lookout for Equipment. </p>
        pub fn set_server_side_kms_key_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_server_side_kms_key_id(input);
            self
        }
        /// Appends an item to `Tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p> Any tags associated with the ML model being created. </p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            self.inner = self.inner.tags(input);
            self
        }
        /// <p> Any tags associated with the ML model being created. </p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.inner = self.inner.set_tags(input);
            self
        }
        /// <p>Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.</p>
        pub fn off_condition(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.off_condition(input.into());
            self
        }
        /// <p>Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.</p>
        pub fn set_off_condition(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_off_condition(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DeleteDataset`.
    ///
    /// <p> Deletes a dataset and associated artifacts. The operation will check to see if any inference scheduler or data ingestion job is currently using the dataset, and if there isn't, the dataset, its metadata, and any associated data stored in S3 will be deleted. This does not affect any models that used this dataset for training and evaluation, but does prevent it from being used in the future. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DeleteDataset {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::delete_dataset_input::Builder,
    }
    impl DeleteDataset {
        /// Creates a new `DeleteDataset`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DeleteDataset,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DeleteDatasetError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DeleteDatasetOutput,
            aws_smithy_http::result::SdkError<crate::error::DeleteDatasetError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the dataset to be deleted. </p>
        pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name(input.into());
            self
        }
        /// <p>The name of the dataset to be deleted. </p>
        pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_dataset_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DeleteInferenceScheduler`.
    ///
    /// <p>Deletes an inference scheduler that has been set up. Already processed output results are not affected. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DeleteInferenceScheduler {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::delete_inference_scheduler_input::Builder,
    }
    impl DeleteInferenceScheduler {
        /// Creates a new `DeleteInferenceScheduler`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DeleteInferenceScheduler,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DeleteInferenceSchedulerError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DeleteInferenceSchedulerOutput,
            aws_smithy_http::result::SdkError<crate::error::DeleteInferenceSchedulerError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the inference scheduler to be deleted. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler to be deleted. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DeleteLabel`.
    ///
    /// <p> Deletes a label. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DeleteLabel {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::delete_label_input::Builder,
    }
    impl DeleteLabel {
        /// Creates a new `DeleteLabel`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DeleteLabel,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DeleteLabelError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DeleteLabelOutput,
            aws_smithy_http::result::SdkError<crate::error::DeleteLabelError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p> The name of the label group that contains the label that you want to delete. Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> The name of the label group that contains the label that you want to delete. Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
        /// <p> The ID of the label that you want to delete. </p>
        pub fn label_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_id(input.into());
            self
        }
        /// <p> The ID of the label that you want to delete. </p>
        pub fn set_label_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_label_id(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DeleteLabelGroup`.
    ///
    /// <p> Deletes a group of labels. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DeleteLabelGroup {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::delete_label_group_input::Builder,
    }
    impl DeleteLabelGroup {
        /// Creates a new `DeleteLabelGroup`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DeleteLabelGroup,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DeleteLabelGroupError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DeleteLabelGroupOutput,
            aws_smithy_http::result::SdkError<crate::error::DeleteLabelGroupError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p> The name of the label group that you want to delete. Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> The name of the label group that you want to delete. Data in this field will be retained for service usage. Follow best practices for the security of your data. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DeleteModel`.
    ///
    /// <p>Deletes an ML model currently available for Amazon Lookout for Equipment. This will prevent it from being used with an inference scheduler, even one that is already set up. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DeleteModel {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::delete_model_input::Builder,
    }
    impl DeleteModel {
        /// Creates a new `DeleteModel`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DeleteModel,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DeleteModelError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DeleteModelOutput,
            aws_smithy_http::result::SdkError<crate::error::DeleteModelError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the ML model to be deleted. </p>
        pub fn model_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.model_name(input.into());
            self
        }
        /// <p>The name of the ML model to be deleted. </p>
        pub fn set_model_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_model_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DescribeDataIngestionJob`.
    ///
    /// <p>Provides information on a specific data ingestion job such as creation time, dataset ARN, and status.</p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DescribeDataIngestionJob {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::describe_data_ingestion_job_input::Builder,
    }
    impl DescribeDataIngestionJob {
        /// Creates a new `DescribeDataIngestionJob`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DescribeDataIngestionJob,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DescribeDataIngestionJobError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DescribeDataIngestionJobOutput,
            aws_smithy_http::result::SdkError<crate::error::DescribeDataIngestionJobError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The job ID of the data ingestion job. </p>
        pub fn job_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.job_id(input.into());
            self
        }
        /// <p>The job ID of the data ingestion job. </p>
        pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_job_id(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DescribeDataset`.
    ///
    /// <p>Provides a JSON description of the data in each time series dataset, including names, column names, and data types.</p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DescribeDataset {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::describe_dataset_input::Builder,
    }
    impl DescribeDataset {
        /// Creates a new `DescribeDataset`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DescribeDataset,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DescribeDatasetError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DescribeDatasetOutput,
            aws_smithy_http::result::SdkError<crate::error::DescribeDatasetError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the dataset to be described. </p>
        pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name(input.into());
            self
        }
        /// <p>The name of the dataset to be described. </p>
        pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_dataset_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DescribeInferenceScheduler`.
    ///
    /// <p> Specifies information about the inference scheduler being used, including name, model, status, and associated metadata </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DescribeInferenceScheduler {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::describe_inference_scheduler_input::Builder,
    }
    impl DescribeInferenceScheduler {
        /// Creates a new `DescribeInferenceScheduler`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DescribeInferenceScheduler,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DescribeInferenceSchedulerError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DescribeInferenceSchedulerOutput,
            aws_smithy_http::result::SdkError<crate::error::DescribeInferenceSchedulerError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the inference scheduler being described. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler being described. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DescribeLabel`.
    ///
    /// <p> Returns the name of the label. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DescribeLabel {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::describe_label_input::Builder,
    }
    impl DescribeLabel {
        /// Creates a new `DescribeLabel`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DescribeLabel,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DescribeLabelError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DescribeLabelOutput,
            aws_smithy_http::result::SdkError<crate::error::DescribeLabelError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p> Returns the name of the group containing the label. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> Returns the name of the group containing the label. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
        /// <p> Returns the ID of the label. </p>
        pub fn label_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_id(input.into());
            self
        }
        /// <p> Returns the ID of the label. </p>
        pub fn set_label_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_label_id(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DescribeLabelGroup`.
    ///
    /// <p> Returns information about the label group. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DescribeLabelGroup {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::describe_label_group_input::Builder,
    }
    impl DescribeLabelGroup {
        /// Creates a new `DescribeLabelGroup`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DescribeLabelGroup,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DescribeLabelGroupError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DescribeLabelGroupOutput,
            aws_smithy_http::result::SdkError<crate::error::DescribeLabelGroupError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p> Returns the name of the label group. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> Returns the name of the label group. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `DescribeModel`.
    ///
    /// <p>Provides a JSON containing the overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct DescribeModel {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::describe_model_input::Builder,
    }
    impl DescribeModel {
        /// Creates a new `DescribeModel`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::DescribeModel,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::DescribeModelError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::DescribeModelOutput,
            aws_smithy_http::result::SdkError<crate::error::DescribeModelError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the ML model to be described. </p>
        pub fn model_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.model_name(input.into());
            self
        }
        /// <p>The name of the ML model to be described. </p>
        pub fn set_model_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_model_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListDataIngestionJobs`.
    ///
    /// <p>Provides a list of all data ingestion jobs, including dataset name and ARN, S3 location of the input data, status, and so on. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListDataIngestionJobs {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_data_ingestion_jobs_input::Builder,
    }
    impl ListDataIngestionJobs {
        /// Creates a new `ListDataIngestionJobs`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListDataIngestionJobs,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListDataIngestionJobsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListDataIngestionJobsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListDataIngestionJobsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListDataIngestionJobsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListDataIngestionJobsPaginator {
            crate::paginator::ListDataIngestionJobsPaginator::new(self.handle, self.inner)
        }
        /// <p>The name of the dataset being used for the data ingestion job. </p>
        pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name(input.into());
            self
        }
        /// <p>The name of the dataset being used for the data ingestion job. </p>
        pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_dataset_name(input);
            self
        }
        /// <p>An opaque pagination token indicating where to continue the listing of data ingestion jobs. </p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p>An opaque pagination token indicating where to continue the listing of data ingestion jobs. </p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p> Specifies the maximum number of data ingestion jobs to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p> Specifies the maximum number of data ingestion jobs to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
        /// <p>Indicates the status of the data ingestion job. </p>
        pub fn status(mut self, input: crate::model::IngestionJobStatus) -> Self {
            self.inner = self.inner.status(input);
            self
        }
        /// <p>Indicates the status of the data ingestion job. </p>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::IngestionJobStatus>,
        ) -> Self {
            self.inner = self.inner.set_status(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListDatasets`.
    ///
    /// <p>Lists all datasets currently available in your account, filtering on the dataset name. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListDatasets {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_datasets_input::Builder,
    }
    impl ListDatasets {
        /// Creates a new `ListDatasets`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListDatasets,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListDatasetsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListDatasetsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListDatasetsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListDatasetsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListDatasetsPaginator {
            crate::paginator::ListDatasetsPaginator::new(self.handle, self.inner)
        }
        /// <p> An opaque pagination token indicating where to continue the listing of datasets. </p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p> An opaque pagination token indicating where to continue the listing of datasets. </p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p> Specifies the maximum number of datasets to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p> Specifies the maximum number of datasets to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
        /// <p>The beginning of the name of the datasets to be listed. </p>
        pub fn dataset_name_begins_with(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name_begins_with(input.into());
            self
        }
        /// <p>The beginning of the name of the datasets to be listed. </p>
        pub fn set_dataset_name_begins_with(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_dataset_name_begins_with(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListInferenceEvents`.
    ///
    /// <p> Lists all inference events that have been found for the specified inference scheduler. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListInferenceEvents {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_inference_events_input::Builder,
    }
    impl ListInferenceEvents {
        /// Creates a new `ListInferenceEvents`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListInferenceEvents,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListInferenceEventsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListInferenceEventsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListInferenceEventsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListInferenceEventsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListInferenceEventsPaginator {
            crate::paginator::ListInferenceEventsPaginator::new(self.handle, self.inner)
        }
        /// <p>An opaque pagination token indicating where to continue the listing of inference events.</p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p>An opaque pagination token indicating where to continue the listing of inference events.</p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p>Specifies the maximum number of inference events to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p>Specifies the maximum number of inference events to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
        /// <p>The name of the inference scheduler for the inference events listed. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler for the inference events listed. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
        /// <p> Lookout for Equipment will return all the inference events with an end time equal to or greater than the start time given.</p>
        pub fn interval_start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.interval_start_time(input);
            self
        }
        /// <p> Lookout for Equipment will return all the inference events with an end time equal to or greater than the start time given.</p>
        pub fn set_interval_start_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_interval_start_time(input);
            self
        }
        /// <p>Returns all the inference events with an end start time equal to or greater than less than the end time given</p>
        pub fn interval_end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.interval_end_time(input);
            self
        }
        /// <p>Returns all the inference events with an end start time equal to or greater than less than the end time given</p>
        pub fn set_interval_end_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_interval_end_time(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListInferenceExecutions`.
    ///
    /// <p> Lists all inference executions that have been performed by the specified inference scheduler. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListInferenceExecutions {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_inference_executions_input::Builder,
    }
    impl ListInferenceExecutions {
        /// Creates a new `ListInferenceExecutions`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListInferenceExecutions,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListInferenceExecutionsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListInferenceExecutionsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListInferenceExecutionsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListInferenceExecutionsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListInferenceExecutionsPaginator {
            crate::paginator::ListInferenceExecutionsPaginator::new(self.handle, self.inner)
        }
        /// <p>An opaque pagination token indicating where to continue the listing of inference executions.</p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p>An opaque pagination token indicating where to continue the listing of inference executions.</p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p>Specifies the maximum number of inference executions to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p>Specifies the maximum number of inference executions to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
        /// <p>The name of the inference scheduler for the inference execution listed. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler for the inference execution listed. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
        /// <p>The time reference in the inferenced dataset after which Amazon Lookout for Equipment started the inference execution. </p>
        pub fn data_start_time_after(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.data_start_time_after(input);
            self
        }
        /// <p>The time reference in the inferenced dataset after which Amazon Lookout for Equipment started the inference execution. </p>
        pub fn set_data_start_time_after(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_data_start_time_after(input);
            self
        }
        /// <p>The time reference in the inferenced dataset before which Amazon Lookout for Equipment stopped the inference execution. </p>
        pub fn data_end_time_before(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.data_end_time_before(input);
            self
        }
        /// <p>The time reference in the inferenced dataset before which Amazon Lookout for Equipment stopped the inference execution. </p>
        pub fn set_data_end_time_before(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_data_end_time_before(input);
            self
        }
        /// <p>The status of the inference execution. </p>
        pub fn status(mut self, input: crate::model::InferenceExecutionStatus) -> Self {
            self.inner = self.inner.status(input);
            self
        }
        /// <p>The status of the inference execution. </p>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::InferenceExecutionStatus>,
        ) -> Self {
            self.inner = self.inner.set_status(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListInferenceSchedulers`.
    ///
    /// <p>Retrieves a list of all inference schedulers currently available for your account. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListInferenceSchedulers {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_inference_schedulers_input::Builder,
    }
    impl ListInferenceSchedulers {
        /// Creates a new `ListInferenceSchedulers`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListInferenceSchedulers,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListInferenceSchedulersError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListInferenceSchedulersOutput,
            aws_smithy_http::result::SdkError<crate::error::ListInferenceSchedulersError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListInferenceSchedulersPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListInferenceSchedulersPaginator {
            crate::paginator::ListInferenceSchedulersPaginator::new(self.handle, self.inner)
        }
        /// <p> An opaque pagination token indicating where to continue the listing of inference schedulers. </p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p> An opaque pagination token indicating where to continue the listing of inference schedulers. </p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p> Specifies the maximum number of inference schedulers to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p> Specifies the maximum number of inference schedulers to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
        /// <p>The beginning of the name of the inference schedulers to be listed. </p>
        pub fn inference_scheduler_name_begins_with(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.inner = self
                .inner
                .inference_scheduler_name_begins_with(input.into());
            self
        }
        /// <p>The beginning of the name of the inference schedulers to be listed. </p>
        pub fn set_inference_scheduler_name_begins_with(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name_begins_with(input);
            self
        }
        /// <p>The name of the ML model used by the inference scheduler to be listed. </p>
        pub fn model_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.model_name(input.into());
            self
        }
        /// <p>The name of the ML model used by the inference scheduler to be listed. </p>
        pub fn set_model_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_model_name(input);
            self
        }
        /// <p>Specifies the current status of the inference schedulers to list.</p>
        pub fn status(mut self, input: crate::model::InferenceSchedulerStatus) -> Self {
            self.inner = self.inner.status(input);
            self
        }
        /// <p>Specifies the current status of the inference schedulers to list.</p>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::InferenceSchedulerStatus>,
        ) -> Self {
            self.inner = self.inner.set_status(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListLabelGroups`.
    ///
    /// <p> Returns a list of the label groups. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListLabelGroups {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_label_groups_input::Builder,
    }
    impl ListLabelGroups {
        /// Creates a new `ListLabelGroups`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListLabelGroups,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListLabelGroupsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListLabelGroupsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListLabelGroupsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListLabelGroupsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListLabelGroupsPaginator {
            crate::paginator::ListLabelGroupsPaginator::new(self.handle, self.inner)
        }
        /// <p> The beginning of the name of the label groups to be listed. </p>
        pub fn label_group_name_begins_with(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.inner = self.inner.label_group_name_begins_with(input.into());
            self
        }
        /// <p> The beginning of the name of the label groups to be listed. </p>
        pub fn set_label_group_name_begins_with(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name_begins_with(input);
            self
        }
        /// <p> An opaque pagination token indicating where to continue the listing of label groups. </p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p> An opaque pagination token indicating where to continue the listing of label groups. </p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p> Specifies the maximum number of label groups to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p> Specifies the maximum number of label groups to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListLabels`.
    ///
    /// <p> Provides a list of labels. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListLabels {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_labels_input::Builder,
    }
    impl ListLabels {
        /// Creates a new `ListLabels`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListLabels,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListLabelsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListLabelsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListLabelsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListLabelsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListLabelsPaginator {
            crate::paginator::ListLabelsPaginator::new(self.handle, self.inner)
        }
        /// <p> Retruns the name of the label group. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> Retruns the name of the label group. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
        /// <p> Returns all the labels with a end time equal to or later than the start time given. </p>
        pub fn interval_start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.interval_start_time(input);
            self
        }
        /// <p> Returns all the labels with a end time equal to or later than the start time given. </p>
        pub fn set_interval_start_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_interval_start_time(input);
            self
        }
        /// <p> Returns all labels with a start time earlier than the end time given. </p>
        pub fn interval_end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.inner = self.inner.interval_end_time(input);
            self
        }
        /// <p> Returns all labels with a start time earlier than the end time given. </p>
        pub fn set_interval_end_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.inner = self.inner.set_interval_end_time(input);
            self
        }
        /// <p> Returns labels with a particular fault code. </p>
        pub fn fault_code(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.fault_code(input.into());
            self
        }
        /// <p> Returns labels with a particular fault code. </p>
        pub fn set_fault_code(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_fault_code(input);
            self
        }
        /// <p> Lists the labels that pertain to a particular piece of equipment. </p>
        pub fn equipment(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.equipment(input.into());
            self
        }
        /// <p> Lists the labels that pertain to a particular piece of equipment. </p>
        pub fn set_equipment(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_equipment(input);
            self
        }
        /// <p> An opaque pagination token indicating where to continue the listing of label groups. </p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p> An opaque pagination token indicating where to continue the listing of label groups. </p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p> Specifies the maximum number of labels to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p> Specifies the maximum number of labels to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListModels`.
    ///
    /// <p>Generates a list of all models in the account, including model name and ARN, dataset, and status. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListModels {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_models_input::Builder,
    }
    impl ListModels {
        /// Creates a new `ListModels`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListModels,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListModelsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListModelsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListModelsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListModelsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListModelsPaginator {
            crate::paginator::ListModelsPaginator::new(self.handle, self.inner)
        }
        /// <p> An opaque pagination token indicating where to continue the listing of ML models. </p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p> An opaque pagination token indicating where to continue the listing of ML models. </p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
        /// <p> Specifies the maximum number of ML models to list. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p> Specifies the maximum number of ML models to list. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
        /// <p>The status of the ML model. </p>
        pub fn status(mut self, input: crate::model::ModelStatus) -> Self {
            self.inner = self.inner.status(input);
            self
        }
        /// <p>The status of the ML model. </p>
        pub fn set_status(mut self, input: std::option::Option<crate::model::ModelStatus>) -> Self {
            self.inner = self.inner.set_status(input);
            self
        }
        /// <p>The beginning of the name of the ML models being listed. </p>
        pub fn model_name_begins_with(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.model_name_begins_with(input.into());
            self
        }
        /// <p>The beginning of the name of the ML models being listed. </p>
        pub fn set_model_name_begins_with(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_model_name_begins_with(input);
            self
        }
        /// <p>The beginning of the name of the dataset of the ML models to be listed. </p>
        pub fn dataset_name_begins_with(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name_begins_with(input.into());
            self
        }
        /// <p>The beginning of the name of the dataset of the ML models to be listed. </p>
        pub fn set_dataset_name_begins_with(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_dataset_name_begins_with(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListSensorStatistics`.
    ///
    /// <p> Lists statistics about the data collected for each of the sensors that have been successfully ingested in the particular dataset. Can also be used to retreive Sensor Statistics for a previous ingestion job. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListSensorStatistics {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_sensor_statistics_input::Builder,
    }
    impl ListSensorStatistics {
        /// Creates a new `ListSensorStatistics`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListSensorStatistics,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListSensorStatisticsError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListSensorStatisticsOutput,
            aws_smithy_http::result::SdkError<crate::error::ListSensorStatisticsError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// Create a paginator for this request
        ///
        /// Paginators are used by calling [`send().await`](crate::paginator::ListSensorStatisticsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
        pub fn into_paginator(self) -> crate::paginator::ListSensorStatisticsPaginator {
            crate::paginator::ListSensorStatisticsPaginator::new(self.handle, self.inner)
        }
        /// <p> The name of the dataset associated with the list of Sensor Statistics. </p>
        pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name(input.into());
            self
        }
        /// <p> The name of the dataset associated with the list of Sensor Statistics. </p>
        pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_dataset_name(input);
            self
        }
        /// <p> The ingestion job id associated with the list of Sensor Statistics. To get sensor statistics for a particular ingestion job id, both dataset name and ingestion job id must be submitted as inputs. </p>
        pub fn ingestion_job_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.ingestion_job_id(input.into());
            self
        }
        /// <p> The ingestion job id associated with the list of Sensor Statistics. To get sensor statistics for a particular ingestion job id, both dataset name and ingestion job id must be submitted as inputs. </p>
        pub fn set_ingestion_job_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_ingestion_job_id(input);
            self
        }
        /// <p>Specifies the maximum number of sensors for which to retrieve statistics. </p>
        pub fn max_results(mut self, input: i32) -> Self {
            self.inner = self.inner.max_results(input);
            self
        }
        /// <p>Specifies the maximum number of sensors for which to retrieve statistics. </p>
        pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
            self.inner = self.inner.set_max_results(input);
            self
        }
        /// <p>An opaque pagination token indicating where to continue the listing of sensor statistics. </p>
        pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.next_token(input.into());
            self
        }
        /// <p>An opaque pagination token indicating where to continue the listing of sensor statistics. </p>
        pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_next_token(input);
            self
        }
    }
    /// Fluent builder constructing a request to `ListTagsForResource`.
    ///
    /// <p>Lists all the tags for a specified resource, including key and value. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct ListTagsForResource {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::list_tags_for_resource_input::Builder,
    }
    impl ListTagsForResource {
        /// Creates a new `ListTagsForResource`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::ListTagsForResource,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::ListTagsForResourceOutput,
            aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The Amazon Resource Name (ARN) of the resource (such as the dataset or model) that is the focus of the <code>ListTagsForResource</code> operation. </p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.resource_arn(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) of the resource (such as the dataset or model) that is the focus of the <code>ListTagsForResource</code> operation. </p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_resource_arn(input);
            self
        }
    }
    /// Fluent builder constructing a request to `StartDataIngestionJob`.
    ///
    /// <p>Starts a data ingestion job. Amazon Lookout for Equipment returns the job status. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct StartDataIngestionJob {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::start_data_ingestion_job_input::Builder,
    }
    impl StartDataIngestionJob {
        /// Creates a new `StartDataIngestionJob`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::StartDataIngestionJob,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::StartDataIngestionJobError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::StartDataIngestionJobOutput,
            aws_smithy_http::result::SdkError<crate::error::StartDataIngestionJobError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the dataset being used by the data ingestion job. </p>
        pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.dataset_name(input.into());
            self
        }
        /// <p>The name of the dataset being used by the data ingestion job. </p>
        pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_dataset_name(input);
            self
        }
        /// <p> Specifies information for the input data for the data ingestion job, including dataset S3 location. </p>
        pub fn ingestion_input_configuration(
            mut self,
            input: crate::model::IngestionInputConfiguration,
        ) -> Self {
            self.inner = self.inner.ingestion_input_configuration(input);
            self
        }
        /// <p> Specifies information for the input data for the data ingestion job, including dataset S3 location. </p>
        pub fn set_ingestion_input_configuration(
            mut self,
            input: std::option::Option<crate::model::IngestionInputConfiguration>,
        ) -> Self {
            self.inner = self.inner.set_ingestion_input_configuration(input);
            self
        }
        /// <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the data ingestion job. </p>
        pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.role_arn(input.into());
            self
        }
        /// <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the data ingestion job. </p>
        pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_role_arn(input);
            self
        }
        /// <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.client_token(input.into());
            self
        }
        /// <p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>
        pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_client_token(input);
            self
        }
    }
    /// Fluent builder constructing a request to `StartInferenceScheduler`.
    ///
    /// <p>Starts an inference scheduler. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct StartInferenceScheduler {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::start_inference_scheduler_input::Builder,
    }
    impl StartInferenceScheduler {
        /// Creates a new `StartInferenceScheduler`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::StartInferenceScheduler,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::StartInferenceSchedulerError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::StartInferenceSchedulerOutput,
            aws_smithy_http::result::SdkError<crate::error::StartInferenceSchedulerError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the inference scheduler to be started. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler to be started. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `StopInferenceScheduler`.
    ///
    /// <p>Stops an inference scheduler. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct StopInferenceScheduler {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::stop_inference_scheduler_input::Builder,
    }
    impl StopInferenceScheduler {
        /// Creates a new `StopInferenceScheduler`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::StopInferenceScheduler,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::StopInferenceSchedulerError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::StopInferenceSchedulerOutput,
            aws_smithy_http::result::SdkError<crate::error::StopInferenceSchedulerError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the inference scheduler to be stopped. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler to be stopped. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
    }
    /// Fluent builder constructing a request to `TagResource`.
    ///
    /// <p>Associates a given tag to a resource in your account. A tag is a key-value pair which can be added to an Amazon Lookout for Equipment resource as metadata. Tags can be used for organizing your resources as well as helping you to search and filter by tag. Multiple tags can be added to a resource, either when you create it, or later. Up to 50 tags can be associated with each resource. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct TagResource {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::tag_resource_input::Builder,
    }
    impl TagResource {
        /// Creates a new `TagResource`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::TagResource,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::TagResourceOutput,
            aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The Amazon Resource Name (ARN) of the specific resource to which the tag should be associated. </p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.resource_arn(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) of the specific resource to which the tag should be associated. </p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_resource_arn(input);
            self
        }
        /// Appends an item to `Tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>The tag or tags to be associated with a specific resource. Both the tag key and value are specified. </p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            self.inner = self.inner.tags(input);
            self
        }
        /// <p>The tag or tags to be associated with a specific resource. Both the tag key and value are specified. </p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.inner = self.inner.set_tags(input);
            self
        }
    }
    /// Fluent builder constructing a request to `UntagResource`.
    ///
    /// <p>Removes a specific tag from a given resource. The tag is specified by its key. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct UntagResource {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::untag_resource_input::Builder,
    }
    impl UntagResource {
        /// Creates a new `UntagResource`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::UntagResource,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::UntagResourceOutput,
            aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The Amazon Resource Name (ARN) of the resource to which the tag is currently associated. </p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.resource_arn(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) of the resource to which the tag is currently associated. </p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_resource_arn(input);
            self
        }
        /// Appends an item to `TagKeys`.
        ///
        /// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
        ///
        /// <p>Specifies the key of the tag to be removed from a specified resource. </p>
        pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.tag_keys(input.into());
            self
        }
        /// <p>Specifies the key of the tag to be removed from a specified resource. </p>
        pub fn set_tag_keys(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.inner = self.inner.set_tag_keys(input);
            self
        }
    }
    /// Fluent builder constructing a request to `UpdateInferenceScheduler`.
    ///
    /// <p>Updates an inference scheduler. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct UpdateInferenceScheduler {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::update_inference_scheduler_input::Builder,
    }
    impl UpdateInferenceScheduler {
        /// Creates a new `UpdateInferenceScheduler`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::UpdateInferenceScheduler,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::UpdateInferenceSchedulerError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::UpdateInferenceSchedulerOutput,
            aws_smithy_http::result::SdkError<crate::error::UpdateInferenceSchedulerError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p>The name of the inference scheduler to be updated. </p>
        pub fn inference_scheduler_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.inference_scheduler_name(input.into());
            self
        }
        /// <p>The name of the inference scheduler to be updated. </p>
        pub fn set_inference_scheduler_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_inference_scheduler_name(input);
            self
        }
        /// <p> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.</p>
        pub fn data_delay_offset_in_minutes(mut self, input: i64) -> Self {
            self.inner = self.inner.data_delay_offset_in_minutes(input);
            self
        }
        /// <p> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.</p>
        pub fn set_data_delay_offset_in_minutes(mut self, input: std::option::Option<i64>) -> Self {
            self.inner = self.inner.set_data_delay_offset_in_minutes(input);
            self
        }
        /// <p>How often data is uploaded to the source S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes. </p>
        pub fn data_upload_frequency(mut self, input: crate::model::DataUploadFrequency) -> Self {
            self.inner = self.inner.data_upload_frequency(input);
            self
        }
        /// <p>How often data is uploaded to the source S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes. </p>
        pub fn set_data_upload_frequency(
            mut self,
            input: std::option::Option<crate::model::DataUploadFrequency>,
        ) -> Self {
            self.inner = self.inner.set_data_upload_frequency(input);
            self
        }
        /// <p> Specifies information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>
        pub fn data_input_configuration(
            mut self,
            input: crate::model::InferenceInputConfiguration,
        ) -> Self {
            self.inner = self.inner.data_input_configuration(input);
            self
        }
        /// <p> Specifies information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>
        pub fn set_data_input_configuration(
            mut self,
            input: std::option::Option<crate::model::InferenceInputConfiguration>,
        ) -> Self {
            self.inner = self.inner.set_data_input_configuration(input);
            self
        }
        /// <p> Specifies information for the output results from the inference scheduler, including the output S3 location. </p>
        pub fn data_output_configuration(
            mut self,
            input: crate::model::InferenceOutputConfiguration,
        ) -> Self {
            self.inner = self.inner.data_output_configuration(input);
            self
        }
        /// <p> Specifies information for the output results from the inference scheduler, including the output S3 location. </p>
        pub fn set_data_output_configuration(
            mut self,
            input: std::option::Option<crate::model::InferenceOutputConfiguration>,
        ) -> Self {
            self.inner = self.inner.set_data_output_configuration(input);
            self
        }
        /// <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler. </p>
        pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.role_arn(input.into());
            self
        }
        /// <p> The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler. </p>
        pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.inner = self.inner.set_role_arn(input);
            self
        }
    }
    /// Fluent builder constructing a request to `UpdateLabelGroup`.
    ///
    /// <p> Updates the label group. </p>
    #[derive(std::clone::Clone, std::fmt::Debug)]
    pub struct UpdateLabelGroup {
        handle: std::sync::Arc<super::Handle>,
        inner: crate::input::update_label_group_input::Builder,
    }
    impl UpdateLabelGroup {
        /// Creates a new `UpdateLabelGroup`.
        pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
            Self {
                handle,
                inner: Default::default(),
            }
        }

        /// Consume this builder, creating a customizable operation that can be modified before being
        /// sent. The operation's inner [http::Request] can be modified as well.
        pub async fn customize(
            self,
        ) -> std::result::Result<
            crate::operation::customize::CustomizableOperation<
                crate::operation::UpdateLabelGroup,
                aws_http::retry::AwsResponseRetryClassifier,
            >,
            aws_smithy_http::result::SdkError<crate::error::UpdateLabelGroupError>,
        > {
            let handle = self.handle.clone();
            let operation = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            Ok(crate::operation::customize::CustomizableOperation { handle, operation })
        }

        /// Sends the request and returns the response.
        ///
        /// If an error occurs, an `SdkError` will be returned with additional details that
        /// can be matched against.
        ///
        /// By default, any retryable failures will be retried twice. Retry behavior
        /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
        /// set when configuring the client.
        pub async fn send(
            self,
        ) -> std::result::Result<
            crate::output::UpdateLabelGroupOutput,
            aws_smithy_http::result::SdkError<crate::error::UpdateLabelGroupError>,
        > {
            let op = self
                .inner
                .build()
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?
                .make_operation(&self.handle.conf)
                .await
                .map_err(aws_smithy_http::result::SdkError::construction_failure)?;
            self.handle.client.call(op).await
        }
        /// <p> The name of the label group to be updated. </p>
        pub fn label_group_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.label_group_name(input.into());
            self
        }
        /// <p> The name of the label group to be updated. </p>
        pub fn set_label_group_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.inner = self.inner.set_label_group_name(input);
            self
        }
        /// Appends an item to `FaultCodes`.
        ///
        /// To override the contents of this collection use [`set_fault_codes`](Self::set_fault_codes).
        ///
        /// <p> Updates the code indicating the type of anomaly associated with the label. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn fault_codes(mut self, input: impl Into<std::string::String>) -> Self {
            self.inner = self.inner.fault_codes(input.into());
            self
        }
        /// <p> Updates the code indicating the type of anomaly associated with the label. </p>
        /// <p>Data in this field will be retained for service usage. Follow best practices for the security of your data.</p>
        pub fn set_fault_codes(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.inner = self.inner.set_fault_codes(input);
            self
        }
    }
}

impl Client {
    /// Creates a new client from an [SDK Config](aws_types::sdk_config::SdkConfig).
    ///
    /// # Panics
    ///
    /// - This method will panic if the `sdk_config` is missing an async sleep implementation. If you experience this panic, set
    ///     the `sleep_impl` on the Config passed into this function to fix it.
    /// - This method will panic if the `sdk_config` is missing an HTTP connector. If you experience this panic, set the
    ///     `http_connector` on the Config passed into this function to fix it.
    pub fn new(sdk_config: &aws_types::sdk_config::SdkConfig) -> Self {
        Self::from_conf(sdk_config.into())
    }

    /// Creates a new client from the service [`Config`](crate::Config).
    ///
    /// # Panics
    ///
    /// - This method will panic if the `conf` is missing an async sleep implementation. If you experience this panic, set
    ///     the `sleep_impl` on the Config passed into this function to fix it.
    /// - This method will panic if the `conf` is missing an HTTP connector. If you experience this panic, set the
    ///     `http_connector` on the Config passed into this function to fix it.
    pub fn from_conf(conf: crate::Config) -> Self {
        let retry_config = conf
            .retry_config()
            .cloned()
            .unwrap_or_else(aws_smithy_types::retry::RetryConfig::disabled);
        let timeout_config = conf
            .timeout_config()
            .cloned()
            .unwrap_or_else(aws_smithy_types::timeout::TimeoutConfig::disabled);
        let sleep_impl = conf.sleep_impl();
        if (retry_config.has_retry() || timeout_config.has_timeouts()) && sleep_impl.is_none() {
            panic!("An async sleep implementation is required for retries or timeouts to work. \
                                    Set the `sleep_impl` on the Config passed into this function to fix this panic.");
        }

        let connector = conf.http_connector().and_then(|c| {
            let timeout_config = conf
                .timeout_config()
                .cloned()
                .unwrap_or_else(aws_smithy_types::timeout::TimeoutConfig::disabled);
            let connector_settings =
                aws_smithy_client::http_connector::ConnectorSettings::from_timeout_config(
                    &timeout_config,
                );
            c.connector(&connector_settings, conf.sleep_impl())
        });

        let builder = aws_smithy_client::Builder::new();

        let builder = match connector {
            // Use provided connector
            Some(c) => builder.connector(c),
            None => {
                #[cfg(any(feature = "rustls", feature = "native-tls"))]
                {
                    // Use default connector based on enabled features
                    builder.dyn_https_connector(
                        aws_smithy_client::http_connector::ConnectorSettings::from_timeout_config(
                            &timeout_config,
                        ),
                    )
                }
                #[cfg(not(any(feature = "rustls", feature = "native-tls")))]
                {
                    panic!("No HTTP connector was available. Enable the `rustls` or `native-tls` crate feature or set a connector to fix this.");
                }
            }
        };
        let mut builder = builder
            .middleware(aws_smithy_client::erase::DynMiddleware::new(
                crate::middleware::DefaultMiddleware::new(),
            ))
            .retry_config(retry_config.into())
            .operation_timeout_config(timeout_config.into());
        builder.set_sleep_impl(sleep_impl);
        let client = builder.build();

        Self {
            handle: std::sync::Arc::new(Handle { client, conf }),
        }
    }
}