aws-sdk-machinelearning 0.24.0

AWS SDK for Amazon Machine Learning
Documentation
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.

/// <p>The output from a <code>Predict</code> operation: </p>
/// <ul>
/// <li> <p> <code>Details</code> - Contains the following attributes: <code>DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASS</code> <code>DetailsAttributes.ALGORITHM - SGD</code> </p> </li>
/// <li> <p> <code>PredictedLabel</code> - Present for either a <code>BINARY</code> or <code>MULTICLASS</code> <code>MLModel</code> request. </p> </li>
/// <li> <p> <code>PredictedScores</code> - Contains the raw classification score corresponding to each label. </p> </li>
/// <li> <p> <code>PredictedValue</code> - Present for a <code>REGRESSION</code> <code>MLModel</code> request. </p> </li>
/// </ul>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Prediction {
    /// <p>The prediction label for either a <code>BINARY</code> or <code>MULTICLASS</code> <code>MLModel</code>.</p>
    #[doc(hidden)]
    pub predicted_label: std::option::Option<std::string::String>,
    /// <p>The prediction value for <code>REGRESSION</code> <code>MLModel</code>.</p>
    #[doc(hidden)]
    pub predicted_value: std::option::Option<f32>,
    /// <p>Provides the raw classification score corresponding to each label.</p>
    #[doc(hidden)]
    pub predicted_scores: std::option::Option<std::collections::HashMap<std::string::String, f32>>,
    /// <p>Provides any additional details regarding the prediction.</p>
    #[doc(hidden)]
    pub details: std::option::Option<
        std::collections::HashMap<crate::model::DetailsAttributes, std::string::String>,
    >,
}
impl Prediction {
    /// <p>The prediction label for either a <code>BINARY</code> or <code>MULTICLASS</code> <code>MLModel</code>.</p>
    pub fn predicted_label(&self) -> std::option::Option<&str> {
        self.predicted_label.as_deref()
    }
    /// <p>The prediction value for <code>REGRESSION</code> <code>MLModel</code>.</p>
    pub fn predicted_value(&self) -> std::option::Option<f32> {
        self.predicted_value
    }
    /// <p>Provides the raw classification score corresponding to each label.</p>
    pub fn predicted_scores(
        &self,
    ) -> std::option::Option<&std::collections::HashMap<std::string::String, f32>> {
        self.predicted_scores.as_ref()
    }
    /// <p>Provides any additional details regarding the prediction.</p>
    pub fn details(
        &self,
    ) -> std::option::Option<
        &std::collections::HashMap<crate::model::DetailsAttributes, std::string::String>,
    > {
        self.details.as_ref()
    }
}
/// See [`Prediction`](crate::model::Prediction).
pub mod prediction {

    /// A builder for [`Prediction`](crate::model::Prediction).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) predicted_label: std::option::Option<std::string::String>,
        pub(crate) predicted_value: std::option::Option<f32>,
        pub(crate) predicted_scores:
            std::option::Option<std::collections::HashMap<std::string::String, f32>>,
        pub(crate) details: std::option::Option<
            std::collections::HashMap<crate::model::DetailsAttributes, std::string::String>,
        >,
    }
    impl Builder {
        /// <p>The prediction label for either a <code>BINARY</code> or <code>MULTICLASS</code> <code>MLModel</code>.</p>
        pub fn predicted_label(mut self, input: impl Into<std::string::String>) -> Self {
            self.predicted_label = Some(input.into());
            self
        }
        /// <p>The prediction label for either a <code>BINARY</code> or <code>MULTICLASS</code> <code>MLModel</code>.</p>
        pub fn set_predicted_label(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.predicted_label = input;
            self
        }
        /// <p>The prediction value for <code>REGRESSION</code> <code>MLModel</code>.</p>
        pub fn predicted_value(mut self, input: f32) -> Self {
            self.predicted_value = Some(input);
            self
        }
        /// <p>The prediction value for <code>REGRESSION</code> <code>MLModel</code>.</p>
        pub fn set_predicted_value(mut self, input: std::option::Option<f32>) -> Self {
            self.predicted_value = input;
            self
        }
        /// Adds a key-value pair to `predicted_scores`.
        ///
        /// To override the contents of this collection use [`set_predicted_scores`](Self::set_predicted_scores).
        ///
        /// <p>Provides the raw classification score corresponding to each label.</p>
        pub fn predicted_scores(mut self, k: impl Into<std::string::String>, v: f32) -> Self {
            let mut hash_map = self.predicted_scores.unwrap_or_default();
            hash_map.insert(k.into(), v);
            self.predicted_scores = Some(hash_map);
            self
        }
        /// <p>Provides the raw classification score corresponding to each label.</p>
        pub fn set_predicted_scores(
            mut self,
            input: std::option::Option<std::collections::HashMap<std::string::String, f32>>,
        ) -> Self {
            self.predicted_scores = input;
            self
        }
        /// Adds a key-value pair to `details`.
        ///
        /// To override the contents of this collection use [`set_details`](Self::set_details).
        ///
        /// <p>Provides any additional details regarding the prediction.</p>
        pub fn details(
            mut self,
            k: crate::model::DetailsAttributes,
            v: impl Into<std::string::String>,
        ) -> Self {
            let mut hash_map = self.details.unwrap_or_default();
            hash_map.insert(k, v.into());
            self.details = Some(hash_map);
            self
        }
        /// <p>Provides any additional details regarding the prediction.</p>
        pub fn set_details(
            mut self,
            input: std::option::Option<
                std::collections::HashMap<crate::model::DetailsAttributes, std::string::String>,
            >,
        ) -> Self {
            self.details = input;
            self
        }
        /// Consumes the builder and constructs a [`Prediction`](crate::model::Prediction).
        pub fn build(self) -> crate::model::Prediction {
            crate::model::Prediction {
                predicted_label: self.predicted_label,
                predicted_value: self.predicted_value,
                predicted_scores: self.predicted_scores,
                details: self.details,
            }
        }
    }
}
impl Prediction {
    /// Creates a new builder-style object to manufacture [`Prediction`](crate::model::Prediction).
    pub fn builder() -> crate::model::prediction::Builder {
        crate::model::prediction::Builder::default()
    }
}

/// When writing a match expression against `DetailsAttributes`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let detailsattributes = unimplemented!();
/// match detailsattributes {
///     DetailsAttributes::Algorithm => { /* ... */ },
///     DetailsAttributes::PredictiveModelType => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `detailsattributes` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DetailsAttributes::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DetailsAttributes::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DetailsAttributes::NewFeature` is defined.
/// Specifically, when `detailsattributes` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DetailsAttributes::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>Contains the key values of <code>DetailsMap</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>PredictiveModelType</code> - Indicates the type of the <code>MLModel</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>Algorithm</code> - Indicates the algorithm that was used for the <code>MLModel</code>.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DetailsAttributes {
    #[allow(missing_docs)] // documentation missing in model
    Algorithm,
    #[allow(missing_docs)] // documentation missing in model
    PredictiveModelType,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DetailsAttributes {
    fn from(s: &str) -> Self {
        match s {
            "Algorithm" => DetailsAttributes::Algorithm,
            "PredictiveModelType" => DetailsAttributes::PredictiveModelType,
            other => {
                DetailsAttributes::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for DetailsAttributes {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DetailsAttributes::from(s))
    }
}
impl DetailsAttributes {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DetailsAttributes::Algorithm => "Algorithm",
            DetailsAttributes::PredictiveModelType => "PredictiveModelType",
            DetailsAttributes::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["Algorithm", "PredictiveModelType"]
    }
}
impl AsRef<str> for DetailsAttributes {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `MlModelType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let mlmodeltype = unimplemented!();
/// match mlmodeltype {
///     MlModelType::Binary => { /* ... */ },
///     MlModelType::Multiclass => { /* ... */ },
///     MlModelType::Regression => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `mlmodeltype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `MlModelType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `MlModelType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `MlModelType::NewFeature` is defined.
/// Specifically, when `mlmodeltype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `MlModelType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum MlModelType {
    #[allow(missing_docs)] // documentation missing in model
    Binary,
    #[allow(missing_docs)] // documentation missing in model
    Multiclass,
    #[allow(missing_docs)] // documentation missing in model
    Regression,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for MlModelType {
    fn from(s: &str) -> Self {
        match s {
            "BINARY" => MlModelType::Binary,
            "MULTICLASS" => MlModelType::Multiclass,
            "REGRESSION" => MlModelType::Regression,
            other => MlModelType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for MlModelType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(MlModelType::from(s))
    }
}
impl MlModelType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            MlModelType::Binary => "BINARY",
            MlModelType::Multiclass => "MULTICLASS",
            MlModelType::Regression => "REGRESSION",
            MlModelType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["BINARY", "MULTICLASS", "REGRESSION"]
    }
}
impl AsRef<str> for MlModelType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p> Describes the real-time endpoint information for an <code>MLModel</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RealtimeEndpointInfo {
    /// <p> The maximum processing rate for the real-time endpoint for <code>MLModel</code>, measured in incoming requests per second.</p>
    #[doc(hidden)]
    pub peak_requests_per_second: i32,
    /// <p>The time that the request to create the real-time endpoint for the <code>MLModel</code> was received. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub created_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The URI that specifies where to send real-time prediction requests for the <code>MLModel</code>.</p>
    /// <p> <b>Note:</b> The application must wait until the real-time endpoint is ready before using this URI.</p>
    #[doc(hidden)]
    pub endpoint_url: std::option::Option<std::string::String>,
    /// <p> The current status of the real-time endpoint for the <code>MLModel</code>. This element can have one of the following values: </p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Endpoint does not exist or was previously deleted.</p> </li>
    /// <li> <p> <code>READY</code> - Endpoint is ready to be used for real-time predictions.</p> </li>
    /// <li> <p> <code>UPDATING</code> - Updating/creating the endpoint. </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub endpoint_status: std::option::Option<crate::model::RealtimeEndpointStatus>,
}
impl RealtimeEndpointInfo {
    /// <p> The maximum processing rate for the real-time endpoint for <code>MLModel</code>, measured in incoming requests per second.</p>
    pub fn peak_requests_per_second(&self) -> i32 {
        self.peak_requests_per_second
    }
    /// <p>The time that the request to create the real-time endpoint for the <code>MLModel</code> was received. The time is expressed in epoch time.</p>
    pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.created_at.as_ref()
    }
    /// <p>The URI that specifies where to send real-time prediction requests for the <code>MLModel</code>.</p>
    /// <p> <b>Note:</b> The application must wait until the real-time endpoint is ready before using this URI.</p>
    pub fn endpoint_url(&self) -> std::option::Option<&str> {
        self.endpoint_url.as_deref()
    }
    /// <p> The current status of the real-time endpoint for the <code>MLModel</code>. This element can have one of the following values: </p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Endpoint does not exist or was previously deleted.</p> </li>
    /// <li> <p> <code>READY</code> - Endpoint is ready to be used for real-time predictions.</p> </li>
    /// <li> <p> <code>UPDATING</code> - Updating/creating the endpoint. </p> </li>
    /// </ul>
    pub fn endpoint_status(&self) -> std::option::Option<&crate::model::RealtimeEndpointStatus> {
        self.endpoint_status.as_ref()
    }
}
/// See [`RealtimeEndpointInfo`](crate::model::RealtimeEndpointInfo).
pub mod realtime_endpoint_info {

    /// A builder for [`RealtimeEndpointInfo`](crate::model::RealtimeEndpointInfo).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) peak_requests_per_second: std::option::Option<i32>,
        pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) endpoint_url: std::option::Option<std::string::String>,
        pub(crate) endpoint_status: std::option::Option<crate::model::RealtimeEndpointStatus>,
    }
    impl Builder {
        /// <p> The maximum processing rate for the real-time endpoint for <code>MLModel</code>, measured in incoming requests per second.</p>
        pub fn peak_requests_per_second(mut self, input: i32) -> Self {
            self.peak_requests_per_second = Some(input);
            self
        }
        /// <p> The maximum processing rate for the real-time endpoint for <code>MLModel</code>, measured in incoming requests per second.</p>
        pub fn set_peak_requests_per_second(mut self, input: std::option::Option<i32>) -> Self {
            self.peak_requests_per_second = input;
            self
        }
        /// <p>The time that the request to create the real-time endpoint for the <code>MLModel</code> was received. The time is expressed in epoch time.</p>
        pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.created_at = Some(input);
            self
        }
        /// <p>The time that the request to create the real-time endpoint for the <code>MLModel</code> was received. The time is expressed in epoch time.</p>
        pub fn set_created_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.created_at = input;
            self
        }
        /// <p>The URI that specifies where to send real-time prediction requests for the <code>MLModel</code>.</p>
        /// <p> <b>Note:</b> The application must wait until the real-time endpoint is ready before using this URI.</p>
        pub fn endpoint_url(mut self, input: impl Into<std::string::String>) -> Self {
            self.endpoint_url = Some(input.into());
            self
        }
        /// <p>The URI that specifies where to send real-time prediction requests for the <code>MLModel</code>.</p>
        /// <p> <b>Note:</b> The application must wait until the real-time endpoint is ready before using this URI.</p>
        pub fn set_endpoint_url(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.endpoint_url = input;
            self
        }
        /// <p> The current status of the real-time endpoint for the <code>MLModel</code>. This element can have one of the following values: </p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Endpoint does not exist or was previously deleted.</p> </li>
        /// <li> <p> <code>READY</code> - Endpoint is ready to be used for real-time predictions.</p> </li>
        /// <li> <p> <code>UPDATING</code> - Updating/creating the endpoint. </p> </li>
        /// </ul>
        pub fn endpoint_status(mut self, input: crate::model::RealtimeEndpointStatus) -> Self {
            self.endpoint_status = Some(input);
            self
        }
        /// <p> The current status of the real-time endpoint for the <code>MLModel</code>. This element can have one of the following values: </p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Endpoint does not exist or was previously deleted.</p> </li>
        /// <li> <p> <code>READY</code> - Endpoint is ready to be used for real-time predictions.</p> </li>
        /// <li> <p> <code>UPDATING</code> - Updating/creating the endpoint. </p> </li>
        /// </ul>
        pub fn set_endpoint_status(
            mut self,
            input: std::option::Option<crate::model::RealtimeEndpointStatus>,
        ) -> Self {
            self.endpoint_status = input;
            self
        }
        /// Consumes the builder and constructs a [`RealtimeEndpointInfo`](crate::model::RealtimeEndpointInfo).
        pub fn build(self) -> crate::model::RealtimeEndpointInfo {
            crate::model::RealtimeEndpointInfo {
                peak_requests_per_second: self.peak_requests_per_second.unwrap_or_default(),
                created_at: self.created_at,
                endpoint_url: self.endpoint_url,
                endpoint_status: self.endpoint_status,
            }
        }
    }
}
impl RealtimeEndpointInfo {
    /// Creates a new builder-style object to manufacture [`RealtimeEndpointInfo`](crate::model::RealtimeEndpointInfo).
    pub fn builder() -> crate::model::realtime_endpoint_info::Builder {
        crate::model::realtime_endpoint_info::Builder::default()
    }
}

/// When writing a match expression against `RealtimeEndpointStatus`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let realtimeendpointstatus = unimplemented!();
/// match realtimeendpointstatus {
///     RealtimeEndpointStatus::Failed => { /* ... */ },
///     RealtimeEndpointStatus::None => { /* ... */ },
///     RealtimeEndpointStatus::Ready => { /* ... */ },
///     RealtimeEndpointStatus::Updating => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `realtimeendpointstatus` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `RealtimeEndpointStatus::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `RealtimeEndpointStatus::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `RealtimeEndpointStatus::NewFeature` is defined.
/// Specifically, when `realtimeendpointstatus` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `RealtimeEndpointStatus::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum RealtimeEndpointStatus {
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    None,
    #[allow(missing_docs)] // documentation missing in model
    Ready,
    #[allow(missing_docs)] // documentation missing in model
    Updating,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for RealtimeEndpointStatus {
    fn from(s: &str) -> Self {
        match s {
            "FAILED" => RealtimeEndpointStatus::Failed,
            "NONE" => RealtimeEndpointStatus::None,
            "READY" => RealtimeEndpointStatus::Ready,
            "UPDATING" => RealtimeEndpointStatus::Updating,
            other => {
                RealtimeEndpointStatus::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for RealtimeEndpointStatus {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(RealtimeEndpointStatus::from(s))
    }
}
impl RealtimeEndpointStatus {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            RealtimeEndpointStatus::Failed => "FAILED",
            RealtimeEndpointStatus::None => "NONE",
            RealtimeEndpointStatus::Ready => "READY",
            RealtimeEndpointStatus::Updating => "UPDATING",
            RealtimeEndpointStatus::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["FAILED", "NONE", "READY", "UPDATING"]
    }
}
impl AsRef<str> for RealtimeEndpointStatus {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `EntityStatus`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let entitystatus = unimplemented!();
/// match entitystatus {
///     EntityStatus::Completed => { /* ... */ },
///     EntityStatus::Deleted => { /* ... */ },
///     EntityStatus::Failed => { /* ... */ },
///     EntityStatus::Inprogress => { /* ... */ },
///     EntityStatus::Pending => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `entitystatus` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `EntityStatus::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `EntityStatus::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `EntityStatus::NewFeature` is defined.
/// Specifically, when `entitystatus` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `EntityStatus::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>Object status with the following possible values:</p>
/// <ul>
/// <li>
/// <p>
/// <code>PENDING</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>INPROGRESS</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>FAILED</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>COMPLETED</code>
/// </p>
/// </li>
/// <li>
/// <p>
/// <code>DELETED</code>
/// </p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum EntityStatus {
    #[allow(missing_docs)] // documentation missing in model
    Completed,
    #[allow(missing_docs)] // documentation missing in model
    Deleted,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Inprogress,
    #[allow(missing_docs)] // documentation missing in model
    Pending,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for EntityStatus {
    fn from(s: &str) -> Self {
        match s {
            "COMPLETED" => EntityStatus::Completed,
            "DELETED" => EntityStatus::Deleted,
            "FAILED" => EntityStatus::Failed,
            "INPROGRESS" => EntityStatus::Inprogress,
            "PENDING" => EntityStatus::Pending,
            other => EntityStatus::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for EntityStatus {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(EntityStatus::from(s))
    }
}
impl EntityStatus {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            EntityStatus::Completed => "COMPLETED",
            EntityStatus::Deleted => "DELETED",
            EntityStatus::Failed => "FAILED",
            EntityStatus::Inprogress => "INPROGRESS",
            EntityStatus::Pending => "PENDING",
            EntityStatus::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["COMPLETED", "DELETED", "FAILED", "INPROGRESS", "PENDING"]
    }
}
impl AsRef<str> for EntityStatus {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Measurements of how well the <code>MLModel</code> performed on known observations. One of the following metrics is returned, based on the type of the <code>MLModel</code>: </p>
/// <ul>
/// <li> <p>BinaryAUC: The binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li>
/// <li> <p>RegressionRMSE: The regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li>
/// <li> <p>MulticlassAvgFScore: The multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li>
/// </ul>
/// <p> For more information about performance metrics, please see the <a href="https://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct PerformanceMetrics {
    #[allow(missing_docs)] // documentation missing in model
    #[doc(hidden)]
    pub properties:
        std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl PerformanceMetrics {
    #[allow(missing_docs)] // documentation missing in model
    pub fn properties(
        &self,
    ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
    {
        self.properties.as_ref()
    }
}
/// See [`PerformanceMetrics`](crate::model::PerformanceMetrics).
pub mod performance_metrics {

    /// A builder for [`PerformanceMetrics`](crate::model::PerformanceMetrics).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) properties: std::option::Option<
            std::collections::HashMap<std::string::String, std::string::String>,
        >,
    }
    impl Builder {
        /// Adds a key-value pair to `properties`.
        ///
        /// To override the contents of this collection use [`set_properties`](Self::set_properties).
        ///
        pub fn properties(
            mut self,
            k: impl Into<std::string::String>,
            v: impl Into<std::string::String>,
        ) -> Self {
            let mut hash_map = self.properties.unwrap_or_default();
            hash_map.insert(k.into(), v.into());
            self.properties = Some(hash_map);
            self
        }
        #[allow(missing_docs)] // documentation missing in model
        pub fn set_properties(
            mut self,
            input: std::option::Option<
                std::collections::HashMap<std::string::String, std::string::String>,
            >,
        ) -> Self {
            self.properties = input;
            self
        }
        /// Consumes the builder and constructs a [`PerformanceMetrics`](crate::model::PerformanceMetrics).
        pub fn build(self) -> crate::model::PerformanceMetrics {
            crate::model::PerformanceMetrics {
                properties: self.properties,
            }
        }
    }
}
impl PerformanceMetrics {
    /// Creates a new builder-style object to manufacture [`PerformanceMetrics`](crate::model::PerformanceMetrics).
    pub fn builder() -> crate::model::performance_metrics::Builder {
        crate::model::performance_metrics::Builder::default()
    }
}

/// <p>The datasource details that are specific to Amazon RDS.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RdsMetadata {
    /// <p>The database details required to connect to an Amazon RDS.</p>
    #[doc(hidden)]
    pub database: std::option::Option<crate::model::RdsDatabase>,
    /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
    #[doc(hidden)]
    pub database_user_name: std::option::Option<std::string::String>,
    /// <p>The SQL query that is supplied during <code>CreateDataSourceFromRDS</code>. Returns only if <code>Verbose</code> is true in <code>GetDataSourceInput</code>. </p>
    #[doc(hidden)]
    pub select_sql_query: std::option::Option<std::string::String>,
    /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    #[doc(hidden)]
    pub resource_role: std::option::Option<std::string::String>,
    /// <p>The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    #[doc(hidden)]
    pub service_role: std::option::Option<std::string::String>,
    /// <p>The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.</p>
    #[doc(hidden)]
    pub data_pipeline_id: std::option::Option<std::string::String>,
}
impl RdsMetadata {
    /// <p>The database details required to connect to an Amazon RDS.</p>
    pub fn database(&self) -> std::option::Option<&crate::model::RdsDatabase> {
        self.database.as_ref()
    }
    /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
    pub fn database_user_name(&self) -> std::option::Option<&str> {
        self.database_user_name.as_deref()
    }
    /// <p>The SQL query that is supplied during <code>CreateDataSourceFromRDS</code>. Returns only if <code>Verbose</code> is true in <code>GetDataSourceInput</code>. </p>
    pub fn select_sql_query(&self) -> std::option::Option<&str> {
        self.select_sql_query.as_deref()
    }
    /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    pub fn resource_role(&self) -> std::option::Option<&str> {
        self.resource_role.as_deref()
    }
    /// <p>The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    pub fn service_role(&self) -> std::option::Option<&str> {
        self.service_role.as_deref()
    }
    /// <p>The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.</p>
    pub fn data_pipeline_id(&self) -> std::option::Option<&str> {
        self.data_pipeline_id.as_deref()
    }
}
/// See [`RdsMetadata`](crate::model::RdsMetadata).
pub mod rds_metadata {

    /// A builder for [`RdsMetadata`](crate::model::RdsMetadata).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) database: std::option::Option<crate::model::RdsDatabase>,
        pub(crate) database_user_name: std::option::Option<std::string::String>,
        pub(crate) select_sql_query: std::option::Option<std::string::String>,
        pub(crate) resource_role: std::option::Option<std::string::String>,
        pub(crate) service_role: std::option::Option<std::string::String>,
        pub(crate) data_pipeline_id: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The database details required to connect to an Amazon RDS.</p>
        pub fn database(mut self, input: crate::model::RdsDatabase) -> Self {
            self.database = Some(input);
            self
        }
        /// <p>The database details required to connect to an Amazon RDS.</p>
        pub fn set_database(
            mut self,
            input: std::option::Option<crate::model::RdsDatabase>,
        ) -> Self {
            self.database = input;
            self
        }
        /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
        pub fn database_user_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.database_user_name = Some(input.into());
            self
        }
        /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
        pub fn set_database_user_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.database_user_name = input;
            self
        }
        /// <p>The SQL query that is supplied during <code>CreateDataSourceFromRDS</code>. Returns only if <code>Verbose</code> is true in <code>GetDataSourceInput</code>. </p>
        pub fn select_sql_query(mut self, input: impl Into<std::string::String>) -> Self {
            self.select_sql_query = Some(input.into());
            self
        }
        /// <p>The SQL query that is supplied during <code>CreateDataSourceFromRDS</code>. Returns only if <code>Verbose</code> is true in <code>GetDataSourceInput</code>. </p>
        pub fn set_select_sql_query(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.select_sql_query = input;
            self
        }
        /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn resource_role(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_role = Some(input.into());
            self
        }
        /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn set_resource_role(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.resource_role = input;
            self
        }
        /// <p>The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn service_role(mut self, input: impl Into<std::string::String>) -> Self {
            self.service_role = Some(input.into());
            self
        }
        /// <p>The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn set_service_role(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.service_role = input;
            self
        }
        /// <p>The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.</p>
        pub fn data_pipeline_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_pipeline_id = Some(input.into());
            self
        }
        /// <p>The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.</p>
        pub fn set_data_pipeline_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_pipeline_id = input;
            self
        }
        /// Consumes the builder and constructs a [`RdsMetadata`](crate::model::RdsMetadata).
        pub fn build(self) -> crate::model::RdsMetadata {
            crate::model::RdsMetadata {
                database: self.database,
                database_user_name: self.database_user_name,
                select_sql_query: self.select_sql_query,
                resource_role: self.resource_role,
                service_role: self.service_role,
                data_pipeline_id: self.data_pipeline_id,
            }
        }
    }
}
impl RdsMetadata {
    /// Creates a new builder-style object to manufacture [`RdsMetadata`](crate::model::RdsMetadata).
    pub fn builder() -> crate::model::rds_metadata::Builder {
        crate::model::rds_metadata::Builder::default()
    }
}

/// <p>The database details of an Amazon RDS database.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RdsDatabase {
    /// <p>The ID of an RDS DB instance.</p>
    #[doc(hidden)]
    pub instance_identifier: std::option::Option<std::string::String>,
    /// <p>The name of a database hosted on an RDS DB instance.</p>
    #[doc(hidden)]
    pub database_name: std::option::Option<std::string::String>,
}
impl RdsDatabase {
    /// <p>The ID of an RDS DB instance.</p>
    pub fn instance_identifier(&self) -> std::option::Option<&str> {
        self.instance_identifier.as_deref()
    }
    /// <p>The name of a database hosted on an RDS DB instance.</p>
    pub fn database_name(&self) -> std::option::Option<&str> {
        self.database_name.as_deref()
    }
}
/// See [`RdsDatabase`](crate::model::RdsDatabase).
pub mod rds_database {

    /// A builder for [`RdsDatabase`](crate::model::RdsDatabase).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) instance_identifier: std::option::Option<std::string::String>,
        pub(crate) database_name: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The ID of an RDS DB instance.</p>
        pub fn instance_identifier(mut self, input: impl Into<std::string::String>) -> Self {
            self.instance_identifier = Some(input.into());
            self
        }
        /// <p>The ID of an RDS DB instance.</p>
        pub fn set_instance_identifier(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.instance_identifier = input;
            self
        }
        /// <p>The name of a database hosted on an RDS DB instance.</p>
        pub fn database_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.database_name = Some(input.into());
            self
        }
        /// <p>The name of a database hosted on an RDS DB instance.</p>
        pub fn set_database_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.database_name = input;
            self
        }
        /// Consumes the builder and constructs a [`RdsDatabase`](crate::model::RdsDatabase).
        pub fn build(self) -> crate::model::RdsDatabase {
            crate::model::RdsDatabase {
                instance_identifier: self.instance_identifier,
                database_name: self.database_name,
            }
        }
    }
}
impl RdsDatabase {
    /// Creates a new builder-style object to manufacture [`RdsDatabase`](crate::model::RdsDatabase).
    pub fn builder() -> crate::model::rds_database::Builder {
        crate::model::rds_database::Builder::default()
    }
}

/// <p>Describes the <code>DataSource</code> details specific to Amazon Redshift.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RedshiftMetadata {
    /// <p>Describes the database details required to connect to an Amazon Redshift database.</p>
    #[doc(hidden)]
    pub redshift_database: std::option::Option<crate::model::RedshiftDatabase>,
    /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
    #[doc(hidden)]
    pub database_user_name: std::option::Option<std::string::String>,
    /// <p> The SQL query that is specified during <code>CreateDataSourceFromRedshift</code>. Returns only if <code>Verbose</code> is true in GetDataSourceInput. </p>
    #[doc(hidden)]
    pub select_sql_query: std::option::Option<std::string::String>,
}
impl RedshiftMetadata {
    /// <p>Describes the database details required to connect to an Amazon Redshift database.</p>
    pub fn redshift_database(&self) -> std::option::Option<&crate::model::RedshiftDatabase> {
        self.redshift_database.as_ref()
    }
    /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
    pub fn database_user_name(&self) -> std::option::Option<&str> {
        self.database_user_name.as_deref()
    }
    /// <p> The SQL query that is specified during <code>CreateDataSourceFromRedshift</code>. Returns only if <code>Verbose</code> is true in GetDataSourceInput. </p>
    pub fn select_sql_query(&self) -> std::option::Option<&str> {
        self.select_sql_query.as_deref()
    }
}
/// See [`RedshiftMetadata`](crate::model::RedshiftMetadata).
pub mod redshift_metadata {

    /// A builder for [`RedshiftMetadata`](crate::model::RedshiftMetadata).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) redshift_database: std::option::Option<crate::model::RedshiftDatabase>,
        pub(crate) database_user_name: std::option::Option<std::string::String>,
        pub(crate) select_sql_query: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>Describes the database details required to connect to an Amazon Redshift database.</p>
        pub fn redshift_database(mut self, input: crate::model::RedshiftDatabase) -> Self {
            self.redshift_database = Some(input);
            self
        }
        /// <p>Describes the database details required to connect to an Amazon Redshift database.</p>
        pub fn set_redshift_database(
            mut self,
            input: std::option::Option<crate::model::RedshiftDatabase>,
        ) -> Self {
            self.redshift_database = input;
            self
        }
        /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
        pub fn database_user_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.database_user_name = Some(input.into());
            self
        }
        /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
        pub fn set_database_user_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.database_user_name = input;
            self
        }
        /// <p> The SQL query that is specified during <code>CreateDataSourceFromRedshift</code>. Returns only if <code>Verbose</code> is true in GetDataSourceInput. </p>
        pub fn select_sql_query(mut self, input: impl Into<std::string::String>) -> Self {
            self.select_sql_query = Some(input.into());
            self
        }
        /// <p> The SQL query that is specified during <code>CreateDataSourceFromRedshift</code>. Returns only if <code>Verbose</code> is true in GetDataSourceInput. </p>
        pub fn set_select_sql_query(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.select_sql_query = input;
            self
        }
        /// Consumes the builder and constructs a [`RedshiftMetadata`](crate::model::RedshiftMetadata).
        pub fn build(self) -> crate::model::RedshiftMetadata {
            crate::model::RedshiftMetadata {
                redshift_database: self.redshift_database,
                database_user_name: self.database_user_name,
                select_sql_query: self.select_sql_query,
            }
        }
    }
}
impl RedshiftMetadata {
    /// Creates a new builder-style object to manufacture [`RedshiftMetadata`](crate::model::RedshiftMetadata).
    pub fn builder() -> crate::model::redshift_metadata::Builder {
        crate::model::redshift_metadata::Builder::default()
    }
}

/// <p>Describes the database details required to connect to an Amazon Redshift database.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RedshiftDatabase {
    /// <p>The name of a database hosted on an Amazon Redshift cluster.</p>
    #[doc(hidden)]
    pub database_name: std::option::Option<std::string::String>,
    /// <p>The ID of an Amazon Redshift cluster.</p>
    #[doc(hidden)]
    pub cluster_identifier: std::option::Option<std::string::String>,
}
impl RedshiftDatabase {
    /// <p>The name of a database hosted on an Amazon Redshift cluster.</p>
    pub fn database_name(&self) -> std::option::Option<&str> {
        self.database_name.as_deref()
    }
    /// <p>The ID of an Amazon Redshift cluster.</p>
    pub fn cluster_identifier(&self) -> std::option::Option<&str> {
        self.cluster_identifier.as_deref()
    }
}
/// See [`RedshiftDatabase`](crate::model::RedshiftDatabase).
pub mod redshift_database {

    /// A builder for [`RedshiftDatabase`](crate::model::RedshiftDatabase).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) database_name: std::option::Option<std::string::String>,
        pub(crate) cluster_identifier: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The name of a database hosted on an Amazon Redshift cluster.</p>
        pub fn database_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.database_name = Some(input.into());
            self
        }
        /// <p>The name of a database hosted on an Amazon Redshift cluster.</p>
        pub fn set_database_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.database_name = input;
            self
        }
        /// <p>The ID of an Amazon Redshift cluster.</p>
        pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
            self.cluster_identifier = Some(input.into());
            self
        }
        /// <p>The ID of an Amazon Redshift cluster.</p>
        pub fn set_cluster_identifier(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.cluster_identifier = input;
            self
        }
        /// Consumes the builder and constructs a [`RedshiftDatabase`](crate::model::RedshiftDatabase).
        pub fn build(self) -> crate::model::RedshiftDatabase {
            crate::model::RedshiftDatabase {
                database_name: self.database_name,
                cluster_identifier: self.cluster_identifier,
            }
        }
    }
}
impl RedshiftDatabase {
    /// Creates a new builder-style object to manufacture [`RedshiftDatabase`](crate::model::RedshiftDatabase).
    pub fn builder() -> crate::model::redshift_database::Builder {
        crate::model::redshift_database::Builder::default()
    }
}

/// <p>A custom key-value pair associated with an ML object, such as an ML model.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Tag {
    /// <p>A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
    #[doc(hidden)]
    pub key: std::option::Option<std::string::String>,
    /// <p>An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
    #[doc(hidden)]
    pub value: std::option::Option<std::string::String>,
}
impl Tag {
    /// <p>A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
    pub fn key(&self) -> std::option::Option<&str> {
        self.key.as_deref()
    }
    /// <p>An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
    pub fn value(&self) -> std::option::Option<&str> {
        self.value.as_deref()
    }
}
/// See [`Tag`](crate::model::Tag).
pub mod tag {

    /// A builder for [`Tag`](crate::model::Tag).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) key: std::option::Option<std::string::String>,
        pub(crate) value: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
        pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
            self.key = Some(input.into());
            self
        }
        /// <p>A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
        pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.key = input;
            self
        }
        /// <p>An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
        pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
            self.value = Some(input.into());
            self
        }
        /// <p>An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
        pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.value = input;
            self
        }
        /// Consumes the builder and constructs a [`Tag`](crate::model::Tag).
        pub fn build(self) -> crate::model::Tag {
            crate::model::Tag {
                key: self.key,
                value: self.value,
            }
        }
    }
}
impl Tag {
    /// Creates a new builder-style object to manufacture [`Tag`](crate::model::Tag).
    pub fn builder() -> crate::model::tag::Builder {
        crate::model::tag::Builder::default()
    }
}

/// When writing a match expression against `TaggableResourceType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let taggableresourcetype = unimplemented!();
/// match taggableresourcetype {
///     TaggableResourceType::BatchPrediction => { /* ... */ },
///     TaggableResourceType::Datasource => { /* ... */ },
///     TaggableResourceType::Evaluation => { /* ... */ },
///     TaggableResourceType::MlModel => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `taggableresourcetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `TaggableResourceType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `TaggableResourceType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `TaggableResourceType::NewFeature` is defined.
/// Specifically, when `taggableresourcetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `TaggableResourceType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum TaggableResourceType {
    #[allow(missing_docs)] // documentation missing in model
    BatchPrediction,
    #[allow(missing_docs)] // documentation missing in model
    Datasource,
    #[allow(missing_docs)] // documentation missing in model
    Evaluation,
    #[allow(missing_docs)] // documentation missing in model
    MlModel,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for TaggableResourceType {
    fn from(s: &str) -> Self {
        match s {
            "BatchPrediction" => TaggableResourceType::BatchPrediction,
            "DataSource" => TaggableResourceType::Datasource,
            "Evaluation" => TaggableResourceType::Evaluation,
            "MLModel" => TaggableResourceType::MlModel,
            other => {
                TaggableResourceType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for TaggableResourceType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(TaggableResourceType::from(s))
    }
}
impl TaggableResourceType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            TaggableResourceType::BatchPrediction => "BatchPrediction",
            TaggableResourceType::Datasource => "DataSource",
            TaggableResourceType::Evaluation => "Evaluation",
            TaggableResourceType::MlModel => "MLModel",
            TaggableResourceType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["BatchPrediction", "DataSource", "Evaluation", "MLModel"]
    }
}
impl AsRef<str> for TaggableResourceType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p> Represents the output of a <code>GetMLModel</code> operation. </p>
/// <p>The content consists of the detailed metadata and the current status of the <code>MLModel</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct MlModel {
    /// <p>The ID assigned to the <code>MLModel</code> at creation.</p>
    #[doc(hidden)]
    pub ml_model_id: std::option::Option<std::string::String>,
    /// <p>The ID of the training <code>DataSource</code>. The <code>CreateMLModel</code> operation uses the <code>TrainingDataSourceId</code>.</p>
    #[doc(hidden)]
    pub training_data_source_id: std::option::Option<std::string::String>,
    /// <p>The AWS user account from which the <code>MLModel</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    #[doc(hidden)]
    pub created_by_iam_user: std::option::Option<std::string::String>,
    /// <p>The time that the <code>MLModel</code> was created. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub created_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The time of the most recent edit to the <code>MLModel</code>. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>The current status of an <code>MLModel</code>. This element can have one of the following values: </p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to create an <code>MLModel</code>.</p> </li>
    /// <li> <p> <code>INPROGRESS</code> - The creation process is underway.</p> </li>
    /// <li> <p> <code>FAILED</code> - The request to create an <code>MLModel</code> didn't run to completion. The model isn't usable.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - The creation process completed successfully.</p> </li>
    /// <li> <p> <code>DELETED</code> - The <code>MLModel</code> is marked as deleted. It isn't usable.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub status: std::option::Option<crate::model::EntityStatus>,
    /// <p>Long integer type that is a 64-bit signed number.</p>
    #[doc(hidden)]
    pub size_in_bytes: std::option::Option<i64>,
    /// <p>The current endpoint of the <code>MLModel</code>.</p>
    #[doc(hidden)]
    pub endpoint_info: std::option::Option<crate::model::RealtimeEndpointInfo>,
    /// <p>A list of the training parameters in the <code>MLModel</code>. The list is implemented as a map of key-value pairs.</p>
    /// <p>The following is the current set of training parameters:</p>
    /// <ul>
    /// <li> <p> <code>sgd.maxMLModelSizeInBytes</code> - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.</p> <p> The value is an integer that ranges from <code>100000</code> to <code>2147483648</code>. The default value is <code>33554432</code>.</p> </li>
    /// <li> <p> <code>sgd.maxPasses</code> - The number of times that the training process traverses the observations to build the <code>MLModel</code>. The value is an integer that ranges from <code>1</code> to <code>10000</code>. The default value is <code>10</code>.</p> </li>
    /// <li> <p> <code>sgd.shuffleType</code> - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are <code>auto</code> and <code>none</code>. The default value is <code>none</code>.</p> </li>
    /// <li> <p> <code>sgd.l1RegularizationAmount</code> - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L1 normalization. This parameter can't be used when <code>L2</code> is specified. Use this parameter sparingly.</p> </li>
    /// <li> <p> <code>sgd.l2RegularizationAmount</code> - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L2 normalization. This parameter can't be used when <code>L1</code> is specified. Use this parameter sparingly.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub training_parameters:
        std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
    #[doc(hidden)]
    pub input_data_location_s3: std::option::Option<std::string::String>,
    /// <p>The algorithm used to train the <code>MLModel</code>. The following algorithm is supported:</p>
    /// <ul>
    /// <li> <p> <code>SGD</code> -- Stochastic gradient descent. The goal of <code>SGD</code> is to minimize the gradient of the loss function. </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub algorithm: std::option::Option<crate::model::Algorithm>,
    /// <p>Identifies the <code>MLModel</code> category. The following are the available types:</p>
    /// <ul>
    /// <li> <p> <code>REGRESSION</code> - Produces a numeric result. For example, "What price should a house be listed at?"</p> </li>
    /// <li> <p> <code>BINARY</code> - Produces one of two possible results. For example, "Is this a child-friendly web site?".</p> </li>
    /// <li> <p> <code>MULTICLASS</code> - Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub ml_model_type: std::option::Option<crate::model::MlModelType>,
    #[allow(missing_docs)] // documentation missing in model
    #[doc(hidden)]
    pub score_threshold: std::option::Option<f32>,
    /// <p>The time of the most recent edit to the <code>ScoreThreshold</code>. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub score_threshold_last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A description of the most recent details about accessing the <code>MLModel</code>.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
    /// <p>Long integer type that is a 64-bit signed number.</p>
    #[doc(hidden)]
    pub compute_time: std::option::Option<i64>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub finished_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub started_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl MlModel {
    /// <p>The ID assigned to the <code>MLModel</code> at creation.</p>
    pub fn ml_model_id(&self) -> std::option::Option<&str> {
        self.ml_model_id.as_deref()
    }
    /// <p>The ID of the training <code>DataSource</code>. The <code>CreateMLModel</code> operation uses the <code>TrainingDataSourceId</code>.</p>
    pub fn training_data_source_id(&self) -> std::option::Option<&str> {
        self.training_data_source_id.as_deref()
    }
    /// <p>The AWS user account from which the <code>MLModel</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    pub fn created_by_iam_user(&self) -> std::option::Option<&str> {
        self.created_by_iam_user.as_deref()
    }
    /// <p>The time that the <code>MLModel</code> was created. The time is expressed in epoch time.</p>
    pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.created_at.as_ref()
    }
    /// <p>The time of the most recent edit to the <code>MLModel</code>. The time is expressed in epoch time.</p>
    pub fn last_updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.last_updated_at.as_ref()
    }
    /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>The current status of an <code>MLModel</code>. This element can have one of the following values: </p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to create an <code>MLModel</code>.</p> </li>
    /// <li> <p> <code>INPROGRESS</code> - The creation process is underway.</p> </li>
    /// <li> <p> <code>FAILED</code> - The request to create an <code>MLModel</code> didn't run to completion. The model isn't usable.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - The creation process completed successfully.</p> </li>
    /// <li> <p> <code>DELETED</code> - The <code>MLModel</code> is marked as deleted. It isn't usable.</p> </li>
    /// </ul>
    pub fn status(&self) -> std::option::Option<&crate::model::EntityStatus> {
        self.status.as_ref()
    }
    /// <p>Long integer type that is a 64-bit signed number.</p>
    pub fn size_in_bytes(&self) -> std::option::Option<i64> {
        self.size_in_bytes
    }
    /// <p>The current endpoint of the <code>MLModel</code>.</p>
    pub fn endpoint_info(&self) -> std::option::Option<&crate::model::RealtimeEndpointInfo> {
        self.endpoint_info.as_ref()
    }
    /// <p>A list of the training parameters in the <code>MLModel</code>. The list is implemented as a map of key-value pairs.</p>
    /// <p>The following is the current set of training parameters:</p>
    /// <ul>
    /// <li> <p> <code>sgd.maxMLModelSizeInBytes</code> - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.</p> <p> The value is an integer that ranges from <code>100000</code> to <code>2147483648</code>. The default value is <code>33554432</code>.</p> </li>
    /// <li> <p> <code>sgd.maxPasses</code> - The number of times that the training process traverses the observations to build the <code>MLModel</code>. The value is an integer that ranges from <code>1</code> to <code>10000</code>. The default value is <code>10</code>.</p> </li>
    /// <li> <p> <code>sgd.shuffleType</code> - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are <code>auto</code> and <code>none</code>. The default value is <code>none</code>.</p> </li>
    /// <li> <p> <code>sgd.l1RegularizationAmount</code> - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L1 normalization. This parameter can't be used when <code>L2</code> is specified. Use this parameter sparingly.</p> </li>
    /// <li> <p> <code>sgd.l2RegularizationAmount</code> - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L2 normalization. This parameter can't be used when <code>L1</code> is specified. Use this parameter sparingly.</p> </li>
    /// </ul>
    pub fn training_parameters(
        &self,
    ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
    {
        self.training_parameters.as_ref()
    }
    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
    pub fn input_data_location_s3(&self) -> std::option::Option<&str> {
        self.input_data_location_s3.as_deref()
    }
    /// <p>The algorithm used to train the <code>MLModel</code>. The following algorithm is supported:</p>
    /// <ul>
    /// <li> <p> <code>SGD</code> -- Stochastic gradient descent. The goal of <code>SGD</code> is to minimize the gradient of the loss function. </p> </li>
    /// </ul>
    pub fn algorithm(&self) -> std::option::Option<&crate::model::Algorithm> {
        self.algorithm.as_ref()
    }
    /// <p>Identifies the <code>MLModel</code> category. The following are the available types:</p>
    /// <ul>
    /// <li> <p> <code>REGRESSION</code> - Produces a numeric result. For example, "What price should a house be listed at?"</p> </li>
    /// <li> <p> <code>BINARY</code> - Produces one of two possible results. For example, "Is this a child-friendly web site?".</p> </li>
    /// <li> <p> <code>MULTICLASS</code> - Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".</p> </li>
    /// </ul>
    pub fn ml_model_type(&self) -> std::option::Option<&crate::model::MlModelType> {
        self.ml_model_type.as_ref()
    }
    #[allow(missing_docs)] // documentation missing in model
    pub fn score_threshold(&self) -> std::option::Option<f32> {
        self.score_threshold
    }
    /// <p>The time of the most recent edit to the <code>ScoreThreshold</code>. The time is expressed in epoch time.</p>
    pub fn score_threshold_last_updated_at(
        &self,
    ) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.score_threshold_last_updated_at.as_ref()
    }
    /// <p>A description of the most recent details about accessing the <code>MLModel</code>.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
    /// <p>Long integer type that is a 64-bit signed number.</p>
    pub fn compute_time(&self) -> std::option::Option<i64> {
        self.compute_time
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn finished_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.finished_at.as_ref()
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn started_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.started_at.as_ref()
    }
}
/// See [`MlModel`](crate::model::MlModel).
pub mod ml_model {

    /// A builder for [`MlModel`](crate::model::MlModel).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) ml_model_id: std::option::Option<std::string::String>,
        pub(crate) training_data_source_id: std::option::Option<std::string::String>,
        pub(crate) created_by_iam_user: std::option::Option<std::string::String>,
        pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) status: std::option::Option<crate::model::EntityStatus>,
        pub(crate) size_in_bytes: std::option::Option<i64>,
        pub(crate) endpoint_info: std::option::Option<crate::model::RealtimeEndpointInfo>,
        pub(crate) training_parameters: std::option::Option<
            std::collections::HashMap<std::string::String, std::string::String>,
        >,
        pub(crate) input_data_location_s3: std::option::Option<std::string::String>,
        pub(crate) algorithm: std::option::Option<crate::model::Algorithm>,
        pub(crate) ml_model_type: std::option::Option<crate::model::MlModelType>,
        pub(crate) score_threshold: std::option::Option<f32>,
        pub(crate) score_threshold_last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) message: std::option::Option<std::string::String>,
        pub(crate) compute_time: std::option::Option<i64>,
        pub(crate) finished_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) started_at: std::option::Option<aws_smithy_types::DateTime>,
    }
    impl Builder {
        /// <p>The ID assigned to the <code>MLModel</code> at creation.</p>
        pub fn ml_model_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.ml_model_id = Some(input.into());
            self
        }
        /// <p>The ID assigned to the <code>MLModel</code> at creation.</p>
        pub fn set_ml_model_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.ml_model_id = input;
            self
        }
        /// <p>The ID of the training <code>DataSource</code>. The <code>CreateMLModel</code> operation uses the <code>TrainingDataSourceId</code>.</p>
        pub fn training_data_source_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.training_data_source_id = Some(input.into());
            self
        }
        /// <p>The ID of the training <code>DataSource</code>. The <code>CreateMLModel</code> operation uses the <code>TrainingDataSourceId</code>.</p>
        pub fn set_training_data_source_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.training_data_source_id = input;
            self
        }
        /// <p>The AWS user account from which the <code>MLModel</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn created_by_iam_user(mut self, input: impl Into<std::string::String>) -> Self {
            self.created_by_iam_user = Some(input.into());
            self
        }
        /// <p>The AWS user account from which the <code>MLModel</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn set_created_by_iam_user(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.created_by_iam_user = input;
            self
        }
        /// <p>The time that the <code>MLModel</code> was created. The time is expressed in epoch time.</p>
        pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.created_at = Some(input);
            self
        }
        /// <p>The time that the <code>MLModel</code> was created. The time is expressed in epoch time.</p>
        pub fn set_created_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.created_at = input;
            self
        }
        /// <p>The time of the most recent edit to the <code>MLModel</code>. The time is expressed in epoch time.</p>
        pub fn last_updated_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.last_updated_at = Some(input);
            self
        }
        /// <p>The time of the most recent edit to the <code>MLModel</code>. The time is expressed in epoch time.</p>
        pub fn set_last_updated_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.last_updated_at = input;
            self
        }
        /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>The current status of an <code>MLModel</code>. This element can have one of the following values: </p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to create an <code>MLModel</code>.</p> </li>
        /// <li> <p> <code>INPROGRESS</code> - The creation process is underway.</p> </li>
        /// <li> <p> <code>FAILED</code> - The request to create an <code>MLModel</code> didn't run to completion. The model isn't usable.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - The creation process completed successfully.</p> </li>
        /// <li> <p> <code>DELETED</code> - The <code>MLModel</code> is marked as deleted. It isn't usable.</p> </li>
        /// </ul>
        pub fn status(mut self, input: crate::model::EntityStatus) -> Self {
            self.status = Some(input);
            self
        }
        /// <p>The current status of an <code>MLModel</code>. This element can have one of the following values: </p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to create an <code>MLModel</code>.</p> </li>
        /// <li> <p> <code>INPROGRESS</code> - The creation process is underway.</p> </li>
        /// <li> <p> <code>FAILED</code> - The request to create an <code>MLModel</code> didn't run to completion. The model isn't usable.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - The creation process completed successfully.</p> </li>
        /// <li> <p> <code>DELETED</code> - The <code>MLModel</code> is marked as deleted. It isn't usable.</p> </li>
        /// </ul>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::EntityStatus>,
        ) -> Self {
            self.status = input;
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn size_in_bytes(mut self, input: i64) -> Self {
            self.size_in_bytes = Some(input);
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn set_size_in_bytes(mut self, input: std::option::Option<i64>) -> Self {
            self.size_in_bytes = input;
            self
        }
        /// <p>The current endpoint of the <code>MLModel</code>.</p>
        pub fn endpoint_info(mut self, input: crate::model::RealtimeEndpointInfo) -> Self {
            self.endpoint_info = Some(input);
            self
        }
        /// <p>The current endpoint of the <code>MLModel</code>.</p>
        pub fn set_endpoint_info(
            mut self,
            input: std::option::Option<crate::model::RealtimeEndpointInfo>,
        ) -> Self {
            self.endpoint_info = input;
            self
        }
        /// Adds a key-value pair to `training_parameters`.
        ///
        /// To override the contents of this collection use [`set_training_parameters`](Self::set_training_parameters).
        ///
        /// <p>A list of the training parameters in the <code>MLModel</code>. The list is implemented as a map of key-value pairs.</p>
        /// <p>The following is the current set of training parameters:</p>
        /// <ul>
        /// <li> <p> <code>sgd.maxMLModelSizeInBytes</code> - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.</p> <p> The value is an integer that ranges from <code>100000</code> to <code>2147483648</code>. The default value is <code>33554432</code>.</p> </li>
        /// <li> <p> <code>sgd.maxPasses</code> - The number of times that the training process traverses the observations to build the <code>MLModel</code>. The value is an integer that ranges from <code>1</code> to <code>10000</code>. The default value is <code>10</code>.</p> </li>
        /// <li> <p> <code>sgd.shuffleType</code> - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are <code>auto</code> and <code>none</code>. The default value is <code>none</code>.</p> </li>
        /// <li> <p> <code>sgd.l1RegularizationAmount</code> - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L1 normalization. This parameter can't be used when <code>L2</code> is specified. Use this parameter sparingly.</p> </li>
        /// <li> <p> <code>sgd.l2RegularizationAmount</code> - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L2 normalization. This parameter can't be used when <code>L1</code> is specified. Use this parameter sparingly.</p> </li>
        /// </ul>
        pub fn training_parameters(
            mut self,
            k: impl Into<std::string::String>,
            v: impl Into<std::string::String>,
        ) -> Self {
            let mut hash_map = self.training_parameters.unwrap_or_default();
            hash_map.insert(k.into(), v.into());
            self.training_parameters = Some(hash_map);
            self
        }
        /// <p>A list of the training parameters in the <code>MLModel</code>. The list is implemented as a map of key-value pairs.</p>
        /// <p>The following is the current set of training parameters:</p>
        /// <ul>
        /// <li> <p> <code>sgd.maxMLModelSizeInBytes</code> - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.</p> <p> The value is an integer that ranges from <code>100000</code> to <code>2147483648</code>. The default value is <code>33554432</code>.</p> </li>
        /// <li> <p> <code>sgd.maxPasses</code> - The number of times that the training process traverses the observations to build the <code>MLModel</code>. The value is an integer that ranges from <code>1</code> to <code>10000</code>. The default value is <code>10</code>.</p> </li>
        /// <li> <p> <code>sgd.shuffleType</code> - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are <code>auto</code> and <code>none</code>. The default value is <code>none</code>.</p> </li>
        /// <li> <p> <code>sgd.l1RegularizationAmount</code> - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L1 normalization. This parameter can't be used when <code>L2</code> is specified. Use this parameter sparingly.</p> </li>
        /// <li> <p> <code>sgd.l2RegularizationAmount</code> - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX_DOUBLE</code>. The default is to not use L2 normalization. This parameter can't be used when <code>L1</code> is specified. Use this parameter sparingly.</p> </li>
        /// </ul>
        pub fn set_training_parameters(
            mut self,
            input: std::option::Option<
                std::collections::HashMap<std::string::String, std::string::String>,
            >,
        ) -> Self {
            self.training_parameters = input;
            self
        }
        /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
        pub fn input_data_location_s3(mut self, input: impl Into<std::string::String>) -> Self {
            self.input_data_location_s3 = Some(input.into());
            self
        }
        /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
        pub fn set_input_data_location_s3(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.input_data_location_s3 = input;
            self
        }
        /// <p>The algorithm used to train the <code>MLModel</code>. The following algorithm is supported:</p>
        /// <ul>
        /// <li> <p> <code>SGD</code> -- Stochastic gradient descent. The goal of <code>SGD</code> is to minimize the gradient of the loss function. </p> </li>
        /// </ul>
        pub fn algorithm(mut self, input: crate::model::Algorithm) -> Self {
            self.algorithm = Some(input);
            self
        }
        /// <p>The algorithm used to train the <code>MLModel</code>. The following algorithm is supported:</p>
        /// <ul>
        /// <li> <p> <code>SGD</code> -- Stochastic gradient descent. The goal of <code>SGD</code> is to minimize the gradient of the loss function. </p> </li>
        /// </ul>
        pub fn set_algorithm(
            mut self,
            input: std::option::Option<crate::model::Algorithm>,
        ) -> Self {
            self.algorithm = input;
            self
        }
        /// <p>Identifies the <code>MLModel</code> category. The following are the available types:</p>
        /// <ul>
        /// <li> <p> <code>REGRESSION</code> - Produces a numeric result. For example, "What price should a house be listed at?"</p> </li>
        /// <li> <p> <code>BINARY</code> - Produces one of two possible results. For example, "Is this a child-friendly web site?".</p> </li>
        /// <li> <p> <code>MULTICLASS</code> - Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".</p> </li>
        /// </ul>
        pub fn ml_model_type(mut self, input: crate::model::MlModelType) -> Self {
            self.ml_model_type = Some(input);
            self
        }
        /// <p>Identifies the <code>MLModel</code> category. The following are the available types:</p>
        /// <ul>
        /// <li> <p> <code>REGRESSION</code> - Produces a numeric result. For example, "What price should a house be listed at?"</p> </li>
        /// <li> <p> <code>BINARY</code> - Produces one of two possible results. For example, "Is this a child-friendly web site?".</p> </li>
        /// <li> <p> <code>MULTICLASS</code> - Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".</p> </li>
        /// </ul>
        pub fn set_ml_model_type(
            mut self,
            input: std::option::Option<crate::model::MlModelType>,
        ) -> Self {
            self.ml_model_type = input;
            self
        }
        #[allow(missing_docs)] // documentation missing in model
        pub fn score_threshold(mut self, input: f32) -> Self {
            self.score_threshold = Some(input);
            self
        }
        #[allow(missing_docs)] // documentation missing in model
        pub fn set_score_threshold(mut self, input: std::option::Option<f32>) -> Self {
            self.score_threshold = input;
            self
        }
        /// <p>The time of the most recent edit to the <code>ScoreThreshold</code>. The time is expressed in epoch time.</p>
        pub fn score_threshold_last_updated_at(
            mut self,
            input: aws_smithy_types::DateTime,
        ) -> Self {
            self.score_threshold_last_updated_at = Some(input);
            self
        }
        /// <p>The time of the most recent edit to the <code>ScoreThreshold</code>. The time is expressed in epoch time.</p>
        pub fn set_score_threshold_last_updated_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.score_threshold_last_updated_at = input;
            self
        }
        /// <p>A description of the most recent details about accessing the <code>MLModel</code>.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A description of the most recent details about accessing the <code>MLModel</code>.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn compute_time(mut self, input: i64) -> Self {
            self.compute_time = Some(input);
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn set_compute_time(mut self, input: std::option::Option<i64>) -> Self {
            self.compute_time = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn finished_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.finished_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_finished_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.finished_at = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn started_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.started_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_started_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.started_at = input;
            self
        }
        /// Consumes the builder and constructs a [`MlModel`](crate::model::MlModel).
        pub fn build(self) -> crate::model::MlModel {
            crate::model::MlModel {
                ml_model_id: self.ml_model_id,
                training_data_source_id: self.training_data_source_id,
                created_by_iam_user: self.created_by_iam_user,
                created_at: self.created_at,
                last_updated_at: self.last_updated_at,
                name: self.name,
                status: self.status,
                size_in_bytes: self.size_in_bytes,
                endpoint_info: self.endpoint_info,
                training_parameters: self.training_parameters,
                input_data_location_s3: self.input_data_location_s3,
                algorithm: self.algorithm,
                ml_model_type: self.ml_model_type,
                score_threshold: self.score_threshold,
                score_threshold_last_updated_at: self.score_threshold_last_updated_at,
                message: self.message,
                compute_time: self.compute_time,
                finished_at: self.finished_at,
                started_at: self.started_at,
            }
        }
    }
}
impl MlModel {
    /// Creates a new builder-style object to manufacture [`MlModel`](crate::model::MlModel).
    pub fn builder() -> crate::model::ml_model::Builder {
        crate::model::ml_model::Builder::default()
    }
}

/// When writing a match expression against `Algorithm`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let algorithm = unimplemented!();
/// match algorithm {
///     Algorithm::Sgd => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `algorithm` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `Algorithm::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `Algorithm::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `Algorithm::NewFeature` is defined.
/// Specifically, when `algorithm` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `Algorithm::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The function used to train an <code>MLModel</code>. Training choices supported by Amazon ML include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>SGD</code> - Stochastic Gradient Descent.</p>
/// </li>
/// <li>
/// <p>
/// <code>RandomForest</code> - Random forest of decision trees.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum Algorithm {
    #[allow(missing_docs)] // documentation missing in model
    Sgd,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for Algorithm {
    fn from(s: &str) -> Self {
        match s {
            "sgd" => Algorithm::Sgd,
            other => Algorithm::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for Algorithm {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(Algorithm::from(s))
    }
}
impl Algorithm {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            Algorithm::Sgd => "sgd",
            Algorithm::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["sgd"]
    }
}
impl AsRef<str> for Algorithm {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `SortOrder`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let sortorder = unimplemented!();
/// match sortorder {
///     SortOrder::Asc => { /* ... */ },
///     SortOrder::Dsc => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `sortorder` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `SortOrder::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `SortOrder::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `SortOrder::NewFeature` is defined.
/// Specifically, when `sortorder` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `SortOrder::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The sort order specified in a listing condition. Possible values include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>asc</code> - Present the information in ascending order (from A-Z).</p>
/// </li>
/// <li>
/// <p>
/// <code>dsc</code> - Present the information in descending order (from Z-A).</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum SortOrder {
    #[allow(missing_docs)] // documentation missing in model
    Asc,
    #[allow(missing_docs)] // documentation missing in model
    Dsc,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for SortOrder {
    fn from(s: &str) -> Self {
        match s {
            "asc" => SortOrder::Asc,
            "dsc" => SortOrder::Dsc,
            other => SortOrder::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for SortOrder {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(SortOrder::from(s))
    }
}
impl SortOrder {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            SortOrder::Asc => "asc",
            SortOrder::Dsc => "dsc",
            SortOrder::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["asc", "dsc"]
    }
}
impl AsRef<str> for SortOrder {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `MlModelFilterVariable`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let mlmodelfiltervariable = unimplemented!();
/// match mlmodelfiltervariable {
///     MlModelFilterVariable::Algorithm => { /* ... */ },
///     MlModelFilterVariable::CreatedAt => { /* ... */ },
///     MlModelFilterVariable::IamUser => { /* ... */ },
///     MlModelFilterVariable::LastUpdatedAt => { /* ... */ },
///     MlModelFilterVariable::MlModelType => { /* ... */ },
///     MlModelFilterVariable::Name => { /* ... */ },
///     MlModelFilterVariable::RealTimeEndpointStatus => { /* ... */ },
///     MlModelFilterVariable::Status => { /* ... */ },
///     MlModelFilterVariable::TrainingDatasourceId => { /* ... */ },
///     MlModelFilterVariable::TrainingDataUri => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `mlmodelfiltervariable` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `MlModelFilterVariable::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `MlModelFilterVariable::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `MlModelFilterVariable::NewFeature` is defined.
/// Specifically, when `mlmodelfiltervariable` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `MlModelFilterVariable::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum MlModelFilterVariable {
    #[allow(missing_docs)] // documentation missing in model
    Algorithm,
    #[allow(missing_docs)] // documentation missing in model
    CreatedAt,
    #[allow(missing_docs)] // documentation missing in model
    IamUser,
    #[allow(missing_docs)] // documentation missing in model
    LastUpdatedAt,
    #[allow(missing_docs)] // documentation missing in model
    MlModelType,
    #[allow(missing_docs)] // documentation missing in model
    Name,
    #[allow(missing_docs)] // documentation missing in model
    RealTimeEndpointStatus,
    #[allow(missing_docs)] // documentation missing in model
    Status,
    #[allow(missing_docs)] // documentation missing in model
    TrainingDatasourceId,
    #[allow(missing_docs)] // documentation missing in model
    TrainingDataUri,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for MlModelFilterVariable {
    fn from(s: &str) -> Self {
        match s {
            "Algorithm" => MlModelFilterVariable::Algorithm,
            "CreatedAt" => MlModelFilterVariable::CreatedAt,
            "IAMUser" => MlModelFilterVariable::IamUser,
            "LastUpdatedAt" => MlModelFilterVariable::LastUpdatedAt,
            "MLModelType" => MlModelFilterVariable::MlModelType,
            "Name" => MlModelFilterVariable::Name,
            "RealtimeEndpointStatus" => MlModelFilterVariable::RealTimeEndpointStatus,
            "Status" => MlModelFilterVariable::Status,
            "TrainingDataSourceId" => MlModelFilterVariable::TrainingDatasourceId,
            "TrainingDataURI" => MlModelFilterVariable::TrainingDataUri,
            other => {
                MlModelFilterVariable::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for MlModelFilterVariable {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(MlModelFilterVariable::from(s))
    }
}
impl MlModelFilterVariable {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            MlModelFilterVariable::Algorithm => "Algorithm",
            MlModelFilterVariable::CreatedAt => "CreatedAt",
            MlModelFilterVariable::IamUser => "IAMUser",
            MlModelFilterVariable::LastUpdatedAt => "LastUpdatedAt",
            MlModelFilterVariable::MlModelType => "MLModelType",
            MlModelFilterVariable::Name => "Name",
            MlModelFilterVariable::RealTimeEndpointStatus => "RealtimeEndpointStatus",
            MlModelFilterVariable::Status => "Status",
            MlModelFilterVariable::TrainingDatasourceId => "TrainingDataSourceId",
            MlModelFilterVariable::TrainingDataUri => "TrainingDataURI",
            MlModelFilterVariable::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "Algorithm",
            "CreatedAt",
            "IAMUser",
            "LastUpdatedAt",
            "MLModelType",
            "Name",
            "RealtimeEndpointStatus",
            "Status",
            "TrainingDataSourceId",
            "TrainingDataURI",
        ]
    }
}
impl AsRef<str> for MlModelFilterVariable {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p> Represents the output of <code>GetEvaluation</code> operation. </p>
/// <p>The content consists of the detailed metadata and data file information and the current status of the <code>Evaluation</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Evaluation {
    /// <p>The ID that is assigned to the <code>Evaluation</code> at creation.</p>
    #[doc(hidden)]
    pub evaluation_id: std::option::Option<std::string::String>,
    /// <p>The ID of the <code>MLModel</code> that is the focus of the evaluation.</p>
    #[doc(hidden)]
    pub ml_model_id: std::option::Option<std::string::String>,
    /// <p>The ID of the <code>DataSource</code> that is used to evaluate the <code>MLModel</code>.</p>
    #[doc(hidden)]
    pub evaluation_data_source_id: std::option::Option<std::string::String>,
    /// <p>The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.</p>
    #[doc(hidden)]
    pub input_data_location_s3: std::option::Option<std::string::String>,
    /// <p>The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    #[doc(hidden)]
    pub created_by_iam_user: std::option::Option<std::string::String>,
    /// <p>The time that the <code>Evaluation</code> was created. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub created_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The time of the most recent edit to the <code>Evaluation</code>. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A user-supplied name or description of the <code>Evaluation</code>. </p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>The status of the evaluation. This element can have one of the following values:</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an <code>MLModel</code>.</p> </li>
    /// <li> <p> <code>INPROGRESS</code> - The evaluation is underway.</p> </li>
    /// <li> <p> <code>FAILED</code> - The request to evaluate an <code>MLModel</code> did not run to completion. It is not usable.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - The evaluation process completed successfully.</p> </li>
    /// <li> <p> <code>DELETED</code> - The <code>Evaluation</code> is marked as deleted. It is not usable.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub status: std::option::Option<crate::model::EntityStatus>,
    /// <p>Measurements of how well the <code>MLModel</code> performed, using observations referenced by the <code>DataSource</code>. One of the following metrics is returned, based on the type of the <code>MLModel</code>: </p>
    /// <ul>
    /// <li> <p>BinaryAUC: A binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li>
    /// <li> <p>RegressionRMSE: A regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li>
    /// <li> <p>MulticlassAvgFScore: A multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li>
    /// </ul>
    /// <p> For more information about performance metrics, please see the <a href="https://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
    #[doc(hidden)]
    pub performance_metrics: std::option::Option<crate::model::PerformanceMetrics>,
    /// <p>A description of the most recent details about evaluating the <code>MLModel</code>.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
    /// <p>Long integer type that is a 64-bit signed number.</p>
    #[doc(hidden)]
    pub compute_time: std::option::Option<i64>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub finished_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub started_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl Evaluation {
    /// <p>The ID that is assigned to the <code>Evaluation</code> at creation.</p>
    pub fn evaluation_id(&self) -> std::option::Option<&str> {
        self.evaluation_id.as_deref()
    }
    /// <p>The ID of the <code>MLModel</code> that is the focus of the evaluation.</p>
    pub fn ml_model_id(&self) -> std::option::Option<&str> {
        self.ml_model_id.as_deref()
    }
    /// <p>The ID of the <code>DataSource</code> that is used to evaluate the <code>MLModel</code>.</p>
    pub fn evaluation_data_source_id(&self) -> std::option::Option<&str> {
        self.evaluation_data_source_id.as_deref()
    }
    /// <p>The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.</p>
    pub fn input_data_location_s3(&self) -> std::option::Option<&str> {
        self.input_data_location_s3.as_deref()
    }
    /// <p>The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    pub fn created_by_iam_user(&self) -> std::option::Option<&str> {
        self.created_by_iam_user.as_deref()
    }
    /// <p>The time that the <code>Evaluation</code> was created. The time is expressed in epoch time.</p>
    pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.created_at.as_ref()
    }
    /// <p>The time of the most recent edit to the <code>Evaluation</code>. The time is expressed in epoch time.</p>
    pub fn last_updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.last_updated_at.as_ref()
    }
    /// <p>A user-supplied name or description of the <code>Evaluation</code>. </p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>The status of the evaluation. This element can have one of the following values:</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an <code>MLModel</code>.</p> </li>
    /// <li> <p> <code>INPROGRESS</code> - The evaluation is underway.</p> </li>
    /// <li> <p> <code>FAILED</code> - The request to evaluate an <code>MLModel</code> did not run to completion. It is not usable.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - The evaluation process completed successfully.</p> </li>
    /// <li> <p> <code>DELETED</code> - The <code>Evaluation</code> is marked as deleted. It is not usable.</p> </li>
    /// </ul>
    pub fn status(&self) -> std::option::Option<&crate::model::EntityStatus> {
        self.status.as_ref()
    }
    /// <p>Measurements of how well the <code>MLModel</code> performed, using observations referenced by the <code>DataSource</code>. One of the following metrics is returned, based on the type of the <code>MLModel</code>: </p>
    /// <ul>
    /// <li> <p>BinaryAUC: A binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li>
    /// <li> <p>RegressionRMSE: A regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li>
    /// <li> <p>MulticlassAvgFScore: A multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li>
    /// </ul>
    /// <p> For more information about performance metrics, please see the <a href="https://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
    pub fn performance_metrics(&self) -> std::option::Option<&crate::model::PerformanceMetrics> {
        self.performance_metrics.as_ref()
    }
    /// <p>A description of the most recent details about evaluating the <code>MLModel</code>.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
    /// <p>Long integer type that is a 64-bit signed number.</p>
    pub fn compute_time(&self) -> std::option::Option<i64> {
        self.compute_time
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn finished_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.finished_at.as_ref()
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn started_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.started_at.as_ref()
    }
}
/// See [`Evaluation`](crate::model::Evaluation).
pub mod evaluation {

    /// A builder for [`Evaluation`](crate::model::Evaluation).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) evaluation_id: std::option::Option<std::string::String>,
        pub(crate) ml_model_id: std::option::Option<std::string::String>,
        pub(crate) evaluation_data_source_id: std::option::Option<std::string::String>,
        pub(crate) input_data_location_s3: std::option::Option<std::string::String>,
        pub(crate) created_by_iam_user: std::option::Option<std::string::String>,
        pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) status: std::option::Option<crate::model::EntityStatus>,
        pub(crate) performance_metrics: std::option::Option<crate::model::PerformanceMetrics>,
        pub(crate) message: std::option::Option<std::string::String>,
        pub(crate) compute_time: std::option::Option<i64>,
        pub(crate) finished_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) started_at: std::option::Option<aws_smithy_types::DateTime>,
    }
    impl Builder {
        /// <p>The ID that is assigned to the <code>Evaluation</code> at creation.</p>
        pub fn evaluation_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.evaluation_id = Some(input.into());
            self
        }
        /// <p>The ID that is assigned to the <code>Evaluation</code> at creation.</p>
        pub fn set_evaluation_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.evaluation_id = input;
            self
        }
        /// <p>The ID of the <code>MLModel</code> that is the focus of the evaluation.</p>
        pub fn ml_model_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.ml_model_id = Some(input.into());
            self
        }
        /// <p>The ID of the <code>MLModel</code> that is the focus of the evaluation.</p>
        pub fn set_ml_model_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.ml_model_id = input;
            self
        }
        /// <p>The ID of the <code>DataSource</code> that is used to evaluate the <code>MLModel</code>.</p>
        pub fn evaluation_data_source_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.evaluation_data_source_id = Some(input.into());
            self
        }
        /// <p>The ID of the <code>DataSource</code> that is used to evaluate the <code>MLModel</code>.</p>
        pub fn set_evaluation_data_source_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.evaluation_data_source_id = input;
            self
        }
        /// <p>The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.</p>
        pub fn input_data_location_s3(mut self, input: impl Into<std::string::String>) -> Self {
            self.input_data_location_s3 = Some(input.into());
            self
        }
        /// <p>The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.</p>
        pub fn set_input_data_location_s3(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.input_data_location_s3 = input;
            self
        }
        /// <p>The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn created_by_iam_user(mut self, input: impl Into<std::string::String>) -> Self {
            self.created_by_iam_user = Some(input.into());
            self
        }
        /// <p>The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn set_created_by_iam_user(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.created_by_iam_user = input;
            self
        }
        /// <p>The time that the <code>Evaluation</code> was created. The time is expressed in epoch time.</p>
        pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.created_at = Some(input);
            self
        }
        /// <p>The time that the <code>Evaluation</code> was created. The time is expressed in epoch time.</p>
        pub fn set_created_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.created_at = input;
            self
        }
        /// <p>The time of the most recent edit to the <code>Evaluation</code>. The time is expressed in epoch time.</p>
        pub fn last_updated_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.last_updated_at = Some(input);
            self
        }
        /// <p>The time of the most recent edit to the <code>Evaluation</code>. The time is expressed in epoch time.</p>
        pub fn set_last_updated_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.last_updated_at = input;
            self
        }
        /// <p>A user-supplied name or description of the <code>Evaluation</code>. </p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>A user-supplied name or description of the <code>Evaluation</code>. </p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>The status of the evaluation. This element can have one of the following values:</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an <code>MLModel</code>.</p> </li>
        /// <li> <p> <code>INPROGRESS</code> - The evaluation is underway.</p> </li>
        /// <li> <p> <code>FAILED</code> - The request to evaluate an <code>MLModel</code> did not run to completion. It is not usable.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - The evaluation process completed successfully.</p> </li>
        /// <li> <p> <code>DELETED</code> - The <code>Evaluation</code> is marked as deleted. It is not usable.</p> </li>
        /// </ul>
        pub fn status(mut self, input: crate::model::EntityStatus) -> Self {
            self.status = Some(input);
            self
        }
        /// <p>The status of the evaluation. This element can have one of the following values:</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an <code>MLModel</code>.</p> </li>
        /// <li> <p> <code>INPROGRESS</code> - The evaluation is underway.</p> </li>
        /// <li> <p> <code>FAILED</code> - The request to evaluate an <code>MLModel</code> did not run to completion. It is not usable.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - The evaluation process completed successfully.</p> </li>
        /// <li> <p> <code>DELETED</code> - The <code>Evaluation</code> is marked as deleted. It is not usable.</p> </li>
        /// </ul>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::EntityStatus>,
        ) -> Self {
            self.status = input;
            self
        }
        /// <p>Measurements of how well the <code>MLModel</code> performed, using observations referenced by the <code>DataSource</code>. One of the following metrics is returned, based on the type of the <code>MLModel</code>: </p>
        /// <ul>
        /// <li> <p>BinaryAUC: A binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li>
        /// <li> <p>RegressionRMSE: A regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li>
        /// <li> <p>MulticlassAvgFScore: A multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li>
        /// </ul>
        /// <p> For more information about performance metrics, please see the <a href="https://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
        pub fn performance_metrics(mut self, input: crate::model::PerformanceMetrics) -> Self {
            self.performance_metrics = Some(input);
            self
        }
        /// <p>Measurements of how well the <code>MLModel</code> performed, using observations referenced by the <code>DataSource</code>. One of the following metrics is returned, based on the type of the <code>MLModel</code>: </p>
        /// <ul>
        /// <li> <p>BinaryAUC: A binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li>
        /// <li> <p>RegressionRMSE: A regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li>
        /// <li> <p>MulticlassAvgFScore: A multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li>
        /// </ul>
        /// <p> For more information about performance metrics, please see the <a href="https://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
        pub fn set_performance_metrics(
            mut self,
            input: std::option::Option<crate::model::PerformanceMetrics>,
        ) -> Self {
            self.performance_metrics = input;
            self
        }
        /// <p>A description of the most recent details about evaluating the <code>MLModel</code>.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A description of the most recent details about evaluating the <code>MLModel</code>.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn compute_time(mut self, input: i64) -> Self {
            self.compute_time = Some(input);
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn set_compute_time(mut self, input: std::option::Option<i64>) -> Self {
            self.compute_time = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn finished_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.finished_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_finished_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.finished_at = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn started_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.started_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_started_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.started_at = input;
            self
        }
        /// Consumes the builder and constructs a [`Evaluation`](crate::model::Evaluation).
        pub fn build(self) -> crate::model::Evaluation {
            crate::model::Evaluation {
                evaluation_id: self.evaluation_id,
                ml_model_id: self.ml_model_id,
                evaluation_data_source_id: self.evaluation_data_source_id,
                input_data_location_s3: self.input_data_location_s3,
                created_by_iam_user: self.created_by_iam_user,
                created_at: self.created_at,
                last_updated_at: self.last_updated_at,
                name: self.name,
                status: self.status,
                performance_metrics: self.performance_metrics,
                message: self.message,
                compute_time: self.compute_time,
                finished_at: self.finished_at,
                started_at: self.started_at,
            }
        }
    }
}
impl Evaluation {
    /// Creates a new builder-style object to manufacture [`Evaluation`](crate::model::Evaluation).
    pub fn builder() -> crate::model::evaluation::Builder {
        crate::model::evaluation::Builder::default()
    }
}

/// When writing a match expression against `EvaluationFilterVariable`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let evaluationfiltervariable = unimplemented!();
/// match evaluationfiltervariable {
///     EvaluationFilterVariable::CreatedAt => { /* ... */ },
///     EvaluationFilterVariable::DatasourceId => { /* ... */ },
///     EvaluationFilterVariable::DataUri => { /* ... */ },
///     EvaluationFilterVariable::IamUser => { /* ... */ },
///     EvaluationFilterVariable::LastUpdatedAt => { /* ... */ },
///     EvaluationFilterVariable::MlModelId => { /* ... */ },
///     EvaluationFilterVariable::Name => { /* ... */ },
///     EvaluationFilterVariable::Status => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `evaluationfiltervariable` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `EvaluationFilterVariable::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `EvaluationFilterVariable::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `EvaluationFilterVariable::NewFeature` is defined.
/// Specifically, when `evaluationfiltervariable` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `EvaluationFilterVariable::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>A list of the variables to use in searching or filtering <code>Evaluation</code>.</p>
/// <ul>
/// <li>
/// <p>
/// <code>CreatedAt</code> - Sets the search criteria to <code>Evaluation</code> creation date.</p>
/// </li>
/// <li>
/// <p>
/// <code>Status</code> - Sets the search criteria to <code>Evaluation</code> status.</p>
/// </li>
/// <li>
/// <p>
/// <code>Name</code> - Sets the search criteria to the contents of <code>Evaluation</code>
/// <b> </b>
/// <code>Name</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>IAMUser</code> - Sets the search criteria to the user account that invoked an evaluation.</p>
/// </li>
/// <li>
/// <p>
/// <code>MLModelId</code> - Sets the search criteria to the <code>Predictor</code> that was evaluated.</p>
/// </li>
/// <li>
/// <p>
/// <code>DataSourceId</code> - Sets the search criteria to the <code>DataSource</code> used in evaluation.</p>
/// </li>
/// <li>
/// <p>
/// <code>DataUri</code> - Sets the search criteria to the data file(s) used in evaluation. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum EvaluationFilterVariable {
    #[allow(missing_docs)] // documentation missing in model
    CreatedAt,
    #[allow(missing_docs)] // documentation missing in model
    DatasourceId,
    #[allow(missing_docs)] // documentation missing in model
    DataUri,
    #[allow(missing_docs)] // documentation missing in model
    IamUser,
    #[allow(missing_docs)] // documentation missing in model
    LastUpdatedAt,
    #[allow(missing_docs)] // documentation missing in model
    MlModelId,
    #[allow(missing_docs)] // documentation missing in model
    Name,
    #[allow(missing_docs)] // documentation missing in model
    Status,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for EvaluationFilterVariable {
    fn from(s: &str) -> Self {
        match s {
            "CreatedAt" => EvaluationFilterVariable::CreatedAt,
            "DataSourceId" => EvaluationFilterVariable::DatasourceId,
            "DataURI" => EvaluationFilterVariable::DataUri,
            "IAMUser" => EvaluationFilterVariable::IamUser,
            "LastUpdatedAt" => EvaluationFilterVariable::LastUpdatedAt,
            "MLModelId" => EvaluationFilterVariable::MlModelId,
            "Name" => EvaluationFilterVariable::Name,
            "Status" => EvaluationFilterVariable::Status,
            other => EvaluationFilterVariable::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for EvaluationFilterVariable {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(EvaluationFilterVariable::from(s))
    }
}
impl EvaluationFilterVariable {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            EvaluationFilterVariable::CreatedAt => "CreatedAt",
            EvaluationFilterVariable::DatasourceId => "DataSourceId",
            EvaluationFilterVariable::DataUri => "DataURI",
            EvaluationFilterVariable::IamUser => "IAMUser",
            EvaluationFilterVariable::LastUpdatedAt => "LastUpdatedAt",
            EvaluationFilterVariable::MlModelId => "MLModelId",
            EvaluationFilterVariable::Name => "Name",
            EvaluationFilterVariable::Status => "Status",
            EvaluationFilterVariable::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "CreatedAt",
            "DataSourceId",
            "DataURI",
            "IAMUser",
            "LastUpdatedAt",
            "MLModelId",
            "Name",
            "Status",
        ]
    }
}
impl AsRef<str> for EvaluationFilterVariable {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p> Represents the output of the <code>GetDataSource</code> operation. </p>
/// <p> The content consists of the detailed metadata and data file information and the current status of the <code>DataSource</code>. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataSource {
    /// <p>The ID that is assigned to the <code>DataSource</code> during creation.</p>
    #[doc(hidden)]
    pub data_source_id: std::option::Option<std::string::String>,
    /// <p>The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub data_location_s3: std::option::Option<std::string::String>,
    /// <p>A JSON string that represents the splitting and rearrangement requirement used when this <code>DataSource</code> was created.</p>
    #[doc(hidden)]
    pub data_rearrangement: std::option::Option<std::string::String>,
    /// <p>The AWS user account from which the <code>DataSource</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    #[doc(hidden)]
    pub created_by_iam_user: std::option::Option<std::string::String>,
    /// <p>The time that the <code>DataSource</code> was created. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub created_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The total number of observations contained in the data files that the <code>DataSource</code> references.</p>
    #[doc(hidden)]
    pub data_size_in_bytes: std::option::Option<i64>,
    /// <p>The number of data files referenced by the <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub number_of_files: std::option::Option<i64>,
    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>The current status of the <code>DataSource</code>. This element can have one of the following values: </p>
    /// <ul>
    /// <li> <p>PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a <code>DataSource</code>.</p> </li>
    /// <li> <p>INPROGRESS - The creation process is underway.</p> </li>
    /// <li> <p>FAILED - The request to create a <code>DataSource</code> did not run to completion. It is not usable.</p> </li>
    /// <li> <p>COMPLETED - The creation process completed successfully.</p> </li>
    /// <li> <p>DELETED - The <code>DataSource</code> is marked as deleted. It is not usable.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub status: std::option::Option<crate::model::EntityStatus>,
    /// <p>A description of the most recent details about creating the <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
    /// <p>Describes the <code>DataSource</code> details specific to Amazon Redshift.</p>
    #[doc(hidden)]
    pub redshift_metadata: std::option::Option<crate::model::RedshiftMetadata>,
    /// <p>The datasource details that are specific to Amazon RDS.</p>
    #[doc(hidden)]
    pub rds_metadata: std::option::Option<crate::model::RdsMetadata>,
    /// <p>The Amazon Resource Name (ARN) of an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts">AWS IAM Role</a>, such as the following: arn:aws:iam::account:role/rolename. </p>
    #[doc(hidden)]
    pub role_arn: std::option::Option<std::string::String>,
    /// <p> The parameter is <code>true</code> if statistics need to be generated from the observation data. </p>
    #[doc(hidden)]
    pub compute_statistics: bool,
    /// <p>Long integer type that is a 64-bit signed number.</p>
    #[doc(hidden)]
    pub compute_time: std::option::Option<i64>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub finished_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub started_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl DataSource {
    /// <p>The ID that is assigned to the <code>DataSource</code> during creation.</p>
    pub fn data_source_id(&self) -> std::option::Option<&str> {
        self.data_source_id.as_deref()
    }
    /// <p>The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a <code>DataSource</code>.</p>
    pub fn data_location_s3(&self) -> std::option::Option<&str> {
        self.data_location_s3.as_deref()
    }
    /// <p>A JSON string that represents the splitting and rearrangement requirement used when this <code>DataSource</code> was created.</p>
    pub fn data_rearrangement(&self) -> std::option::Option<&str> {
        self.data_rearrangement.as_deref()
    }
    /// <p>The AWS user account from which the <code>DataSource</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    pub fn created_by_iam_user(&self) -> std::option::Option<&str> {
        self.created_by_iam_user.as_deref()
    }
    /// <p>The time that the <code>DataSource</code> was created. The time is expressed in epoch time.</p>
    pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.created_at.as_ref()
    }
    /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
    pub fn last_updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.last_updated_at.as_ref()
    }
    /// <p>The total number of observations contained in the data files that the <code>DataSource</code> references.</p>
    pub fn data_size_in_bytes(&self) -> std::option::Option<i64> {
        self.data_size_in_bytes
    }
    /// <p>The number of data files referenced by the <code>DataSource</code>.</p>
    pub fn number_of_files(&self) -> std::option::Option<i64> {
        self.number_of_files
    }
    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>The current status of the <code>DataSource</code>. This element can have one of the following values: </p>
    /// <ul>
    /// <li> <p>PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a <code>DataSource</code>.</p> </li>
    /// <li> <p>INPROGRESS - The creation process is underway.</p> </li>
    /// <li> <p>FAILED - The request to create a <code>DataSource</code> did not run to completion. It is not usable.</p> </li>
    /// <li> <p>COMPLETED - The creation process completed successfully.</p> </li>
    /// <li> <p>DELETED - The <code>DataSource</code> is marked as deleted. It is not usable.</p> </li>
    /// </ul>
    pub fn status(&self) -> std::option::Option<&crate::model::EntityStatus> {
        self.status.as_ref()
    }
    /// <p>A description of the most recent details about creating the <code>DataSource</code>.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
    /// <p>Describes the <code>DataSource</code> details specific to Amazon Redshift.</p>
    pub fn redshift_metadata(&self) -> std::option::Option<&crate::model::RedshiftMetadata> {
        self.redshift_metadata.as_ref()
    }
    /// <p>The datasource details that are specific to Amazon RDS.</p>
    pub fn rds_metadata(&self) -> std::option::Option<&crate::model::RdsMetadata> {
        self.rds_metadata.as_ref()
    }
    /// <p>The Amazon Resource Name (ARN) of an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts">AWS IAM Role</a>, such as the following: arn:aws:iam::account:role/rolename. </p>
    pub fn role_arn(&self) -> std::option::Option<&str> {
        self.role_arn.as_deref()
    }
    /// <p> The parameter is <code>true</code> if statistics need to be generated from the observation data. </p>
    pub fn compute_statistics(&self) -> bool {
        self.compute_statistics
    }
    /// <p>Long integer type that is a 64-bit signed number.</p>
    pub fn compute_time(&self) -> std::option::Option<i64> {
        self.compute_time
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn finished_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.finished_at.as_ref()
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn started_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.started_at.as_ref()
    }
}
/// See [`DataSource`](crate::model::DataSource).
pub mod data_source {

    /// A builder for [`DataSource`](crate::model::DataSource).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) data_source_id: std::option::Option<std::string::String>,
        pub(crate) data_location_s3: std::option::Option<std::string::String>,
        pub(crate) data_rearrangement: std::option::Option<std::string::String>,
        pub(crate) created_by_iam_user: std::option::Option<std::string::String>,
        pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) data_size_in_bytes: std::option::Option<i64>,
        pub(crate) number_of_files: std::option::Option<i64>,
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) status: std::option::Option<crate::model::EntityStatus>,
        pub(crate) message: std::option::Option<std::string::String>,
        pub(crate) redshift_metadata: std::option::Option<crate::model::RedshiftMetadata>,
        pub(crate) rds_metadata: std::option::Option<crate::model::RdsMetadata>,
        pub(crate) role_arn: std::option::Option<std::string::String>,
        pub(crate) compute_statistics: std::option::Option<bool>,
        pub(crate) compute_time: std::option::Option<i64>,
        pub(crate) finished_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) started_at: std::option::Option<aws_smithy_types::DateTime>,
    }
    impl Builder {
        /// <p>The ID that is assigned to the <code>DataSource</code> during creation.</p>
        pub fn data_source_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_source_id = Some(input.into());
            self
        }
        /// <p>The ID that is assigned to the <code>DataSource</code> during creation.</p>
        pub fn set_data_source_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_source_id = input;
            self
        }
        /// <p>The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a <code>DataSource</code>.</p>
        pub fn data_location_s3(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_location_s3 = Some(input.into());
            self
        }
        /// <p>The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a <code>DataSource</code>.</p>
        pub fn set_data_location_s3(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_location_s3 = input;
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement requirement used when this <code>DataSource</code> was created.</p>
        pub fn data_rearrangement(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_rearrangement = Some(input.into());
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement requirement used when this <code>DataSource</code> was created.</p>
        pub fn set_data_rearrangement(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_rearrangement = input;
            self
        }
        /// <p>The AWS user account from which the <code>DataSource</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn created_by_iam_user(mut self, input: impl Into<std::string::String>) -> Self {
            self.created_by_iam_user = Some(input.into());
            self
        }
        /// <p>The AWS user account from which the <code>DataSource</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn set_created_by_iam_user(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.created_by_iam_user = input;
            self
        }
        /// <p>The time that the <code>DataSource</code> was created. The time is expressed in epoch time.</p>
        pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.created_at = Some(input);
            self
        }
        /// <p>The time that the <code>DataSource</code> was created. The time is expressed in epoch time.</p>
        pub fn set_created_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.created_at = input;
            self
        }
        /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
        pub fn last_updated_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.last_updated_at = Some(input);
            self
        }
        /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
        pub fn set_last_updated_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.last_updated_at = input;
            self
        }
        /// <p>The total number of observations contained in the data files that the <code>DataSource</code> references.</p>
        pub fn data_size_in_bytes(mut self, input: i64) -> Self {
            self.data_size_in_bytes = Some(input);
            self
        }
        /// <p>The total number of observations contained in the data files that the <code>DataSource</code> references.</p>
        pub fn set_data_size_in_bytes(mut self, input: std::option::Option<i64>) -> Self {
            self.data_size_in_bytes = input;
            self
        }
        /// <p>The number of data files referenced by the <code>DataSource</code>.</p>
        pub fn number_of_files(mut self, input: i64) -> Self {
            self.number_of_files = Some(input);
            self
        }
        /// <p>The number of data files referenced by the <code>DataSource</code>.</p>
        pub fn set_number_of_files(mut self, input: std::option::Option<i64>) -> Self {
            self.number_of_files = input;
            self
        }
        /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>The current status of the <code>DataSource</code>. This element can have one of the following values: </p>
        /// <ul>
        /// <li> <p>PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a <code>DataSource</code>.</p> </li>
        /// <li> <p>INPROGRESS - The creation process is underway.</p> </li>
        /// <li> <p>FAILED - The request to create a <code>DataSource</code> did not run to completion. It is not usable.</p> </li>
        /// <li> <p>COMPLETED - The creation process completed successfully.</p> </li>
        /// <li> <p>DELETED - The <code>DataSource</code> is marked as deleted. It is not usable.</p> </li>
        /// </ul>
        pub fn status(mut self, input: crate::model::EntityStatus) -> Self {
            self.status = Some(input);
            self
        }
        /// <p>The current status of the <code>DataSource</code>. This element can have one of the following values: </p>
        /// <ul>
        /// <li> <p>PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a <code>DataSource</code>.</p> </li>
        /// <li> <p>INPROGRESS - The creation process is underway.</p> </li>
        /// <li> <p>FAILED - The request to create a <code>DataSource</code> did not run to completion. It is not usable.</p> </li>
        /// <li> <p>COMPLETED - The creation process completed successfully.</p> </li>
        /// <li> <p>DELETED - The <code>DataSource</code> is marked as deleted. It is not usable.</p> </li>
        /// </ul>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::EntityStatus>,
        ) -> Self {
            self.status = input;
            self
        }
        /// <p>A description of the most recent details about creating the <code>DataSource</code>.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A description of the most recent details about creating the <code>DataSource</code>.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// <p>Describes the <code>DataSource</code> details specific to Amazon Redshift.</p>
        pub fn redshift_metadata(mut self, input: crate::model::RedshiftMetadata) -> Self {
            self.redshift_metadata = Some(input);
            self
        }
        /// <p>Describes the <code>DataSource</code> details specific to Amazon Redshift.</p>
        pub fn set_redshift_metadata(
            mut self,
            input: std::option::Option<crate::model::RedshiftMetadata>,
        ) -> Self {
            self.redshift_metadata = input;
            self
        }
        /// <p>The datasource details that are specific to Amazon RDS.</p>
        pub fn rds_metadata(mut self, input: crate::model::RdsMetadata) -> Self {
            self.rds_metadata = Some(input);
            self
        }
        /// <p>The datasource details that are specific to Amazon RDS.</p>
        pub fn set_rds_metadata(
            mut self,
            input: std::option::Option<crate::model::RdsMetadata>,
        ) -> Self {
            self.rds_metadata = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) of an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts">AWS IAM Role</a>, such as the following: arn:aws:iam::account:role/rolename. </p>
        pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.role_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) of an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts">AWS IAM Role</a>, such as the following: arn:aws:iam::account:role/rolename. </p>
        pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.role_arn = input;
            self
        }
        /// <p> The parameter is <code>true</code> if statistics need to be generated from the observation data. </p>
        pub fn compute_statistics(mut self, input: bool) -> Self {
            self.compute_statistics = Some(input);
            self
        }
        /// <p> The parameter is <code>true</code> if statistics need to be generated from the observation data. </p>
        pub fn set_compute_statistics(mut self, input: std::option::Option<bool>) -> Self {
            self.compute_statistics = input;
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn compute_time(mut self, input: i64) -> Self {
            self.compute_time = Some(input);
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn set_compute_time(mut self, input: std::option::Option<i64>) -> Self {
            self.compute_time = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn finished_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.finished_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_finished_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.finished_at = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn started_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.started_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_started_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.started_at = input;
            self
        }
        /// Consumes the builder and constructs a [`DataSource`](crate::model::DataSource).
        pub fn build(self) -> crate::model::DataSource {
            crate::model::DataSource {
                data_source_id: self.data_source_id,
                data_location_s3: self.data_location_s3,
                data_rearrangement: self.data_rearrangement,
                created_by_iam_user: self.created_by_iam_user,
                created_at: self.created_at,
                last_updated_at: self.last_updated_at,
                data_size_in_bytes: self.data_size_in_bytes,
                number_of_files: self.number_of_files,
                name: self.name,
                status: self.status,
                message: self.message,
                redshift_metadata: self.redshift_metadata,
                rds_metadata: self.rds_metadata,
                role_arn: self.role_arn,
                compute_statistics: self.compute_statistics.unwrap_or_default(),
                compute_time: self.compute_time,
                finished_at: self.finished_at,
                started_at: self.started_at,
            }
        }
    }
}
impl DataSource {
    /// Creates a new builder-style object to manufacture [`DataSource`](crate::model::DataSource).
    pub fn builder() -> crate::model::data_source::Builder {
        crate::model::data_source::Builder::default()
    }
}

/// When writing a match expression against `DataSourceFilterVariable`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let datasourcefiltervariable = unimplemented!();
/// match datasourcefiltervariable {
///     DataSourceFilterVariable::CreatedAt => { /* ... */ },
///     DataSourceFilterVariable::DataUri => { /* ... */ },
///     DataSourceFilterVariable::IamUser => { /* ... */ },
///     DataSourceFilterVariable::LastUpdatedAt => { /* ... */ },
///     DataSourceFilterVariable::Name => { /* ... */ },
///     DataSourceFilterVariable::Status => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `datasourcefiltervariable` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DataSourceFilterVariable::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DataSourceFilterVariable::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DataSourceFilterVariable::NewFeature` is defined.
/// Specifically, when `datasourcefiltervariable` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DataSourceFilterVariable::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>A list of the variables to use in searching or filtering <code>DataSource</code>.</p>    
/// <ul>
/// <li>
/// <p>
/// <code>CreatedAt</code> - Sets the search criteria to <code>DataSource</code> creation date.</p>
/// </li>
/// <li>
/// <p>
/// <code>Status</code> - Sets the search criteria to <code>DataSource</code> status.</p>
/// </li>
/// <li>
/// <p>
/// <code>Name</code> - Sets the search criteria to the contents of <code>DataSource</code>
/// <code>Name</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>DataUri</code> - Sets the search criteria to the URI of data files used to create the <code>DataSource</code>. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.</p>
/// </li>
/// <li>
/// <p>
/// <code>IAMUser</code> - Sets the search criteria to the user account that invoked the <code>DataSource</code> creation.</p>
/// </li>
/// </ul>
///
/// <p>
/// <b>Note:</b>
/// The variable names should match the variable names in the <code>DataSource</code>.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DataSourceFilterVariable {
    #[allow(missing_docs)] // documentation missing in model
    CreatedAt,
    #[allow(missing_docs)] // documentation missing in model
    DataUri,
    #[allow(missing_docs)] // documentation missing in model
    IamUser,
    #[allow(missing_docs)] // documentation missing in model
    LastUpdatedAt,
    #[allow(missing_docs)] // documentation missing in model
    Name,
    #[allow(missing_docs)] // documentation missing in model
    Status,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DataSourceFilterVariable {
    fn from(s: &str) -> Self {
        match s {
            "CreatedAt" => DataSourceFilterVariable::CreatedAt,
            "DataLocationS3" => DataSourceFilterVariable::DataUri,
            "IAMUser" => DataSourceFilterVariable::IamUser,
            "LastUpdatedAt" => DataSourceFilterVariable::LastUpdatedAt,
            "Name" => DataSourceFilterVariable::Name,
            "Status" => DataSourceFilterVariable::Status,
            other => DataSourceFilterVariable::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for DataSourceFilterVariable {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DataSourceFilterVariable::from(s))
    }
}
impl DataSourceFilterVariable {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DataSourceFilterVariable::CreatedAt => "CreatedAt",
            DataSourceFilterVariable::DataUri => "DataLocationS3",
            DataSourceFilterVariable::IamUser => "IAMUser",
            DataSourceFilterVariable::LastUpdatedAt => "LastUpdatedAt",
            DataSourceFilterVariable::Name => "Name",
            DataSourceFilterVariable::Status => "Status",
            DataSourceFilterVariable::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "CreatedAt",
            "DataLocationS3",
            "IAMUser",
            "LastUpdatedAt",
            "Name",
            "Status",
        ]
    }
}
impl AsRef<str> for DataSourceFilterVariable {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p> Represents the output of a <code>GetBatchPrediction</code> operation.</p>
/// <p> The content consists of the detailed metadata, the status, and the data file information of a <code>Batch Prediction</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct BatchPrediction {
    /// <p>The ID assigned to the <code>BatchPrediction</code> at creation. This value should be identical to the value of the <code>BatchPredictionID</code> in the request. </p>
    #[doc(hidden)]
    pub batch_prediction_id: std::option::Option<std::string::String>,
    /// <p>The ID of the <code>MLModel</code> that generated predictions for the <code>BatchPrediction</code> request.</p>
    #[doc(hidden)]
    pub ml_model_id: std::option::Option<std::string::String>,
    /// <p>The ID of the <code>DataSource</code> that points to the group of observations to predict.</p>
    #[doc(hidden)]
    pub batch_prediction_data_source_id: std::option::Option<std::string::String>,
    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
    #[doc(hidden)]
    pub input_data_location_s3: std::option::Option<std::string::String>,
    /// <p>The AWS user account that invoked the <code>BatchPrediction</code>. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    #[doc(hidden)]
    pub created_by_iam_user: std::option::Option<std::string::String>,
    /// <p>The time that the <code>BatchPrediction</code> was created. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub created_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
    #[doc(hidden)]
    pub last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A user-supplied name or description of the <code>BatchPrediction</code>.</p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>The status of the <code>BatchPrediction</code>. This element can have one of the following values:</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.</p> </li>
    /// <li> <p> <code>INPROGRESS</code> - The process is underway.</p> </li>
    /// <li> <p> <code>FAILED</code> - The request to perform a batch prediction did not run to completion. It is not usable.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - The batch prediction process completed successfully.</p> </li>
    /// <li> <p> <code>DELETED</code> - The <code>BatchPrediction</code> is marked as deleted. It is not usable.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub status: std::option::Option<crate::model::EntityStatus>,
    /// <p>The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the <code>s3 key</code> portion of the <code>outputURI</code> field: ':', '//', '/./', '/../'.</p>
    #[doc(hidden)]
    pub output_uri: std::option::Option<std::string::String>,
    /// <p>A description of the most recent details about processing the batch prediction request.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
    /// <p>Long integer type that is a 64-bit signed number.</p>
    #[doc(hidden)]
    pub compute_time: std::option::Option<i64>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub finished_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>A timestamp represented in epoch time.</p>
    #[doc(hidden)]
    pub started_at: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>Long integer type that is a 64-bit signed number.</p>
    #[doc(hidden)]
    pub total_record_count: std::option::Option<i64>,
    /// <p>Long integer type that is a 64-bit signed number.</p>
    #[doc(hidden)]
    pub invalid_record_count: std::option::Option<i64>,
}
impl BatchPrediction {
    /// <p>The ID assigned to the <code>BatchPrediction</code> at creation. This value should be identical to the value of the <code>BatchPredictionID</code> in the request. </p>
    pub fn batch_prediction_id(&self) -> std::option::Option<&str> {
        self.batch_prediction_id.as_deref()
    }
    /// <p>The ID of the <code>MLModel</code> that generated predictions for the <code>BatchPrediction</code> request.</p>
    pub fn ml_model_id(&self) -> std::option::Option<&str> {
        self.ml_model_id.as_deref()
    }
    /// <p>The ID of the <code>DataSource</code> that points to the group of observations to predict.</p>
    pub fn batch_prediction_data_source_id(&self) -> std::option::Option<&str> {
        self.batch_prediction_data_source_id.as_deref()
    }
    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
    pub fn input_data_location_s3(&self) -> std::option::Option<&str> {
        self.input_data_location_s3.as_deref()
    }
    /// <p>The AWS user account that invoked the <code>BatchPrediction</code>. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
    pub fn created_by_iam_user(&self) -> std::option::Option<&str> {
        self.created_by_iam_user.as_deref()
    }
    /// <p>The time that the <code>BatchPrediction</code> was created. The time is expressed in epoch time.</p>
    pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.created_at.as_ref()
    }
    /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
    pub fn last_updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.last_updated_at.as_ref()
    }
    /// <p>A user-supplied name or description of the <code>BatchPrediction</code>.</p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>The status of the <code>BatchPrediction</code>. This element can have one of the following values:</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.</p> </li>
    /// <li> <p> <code>INPROGRESS</code> - The process is underway.</p> </li>
    /// <li> <p> <code>FAILED</code> - The request to perform a batch prediction did not run to completion. It is not usable.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - The batch prediction process completed successfully.</p> </li>
    /// <li> <p> <code>DELETED</code> - The <code>BatchPrediction</code> is marked as deleted. It is not usable.</p> </li>
    /// </ul>
    pub fn status(&self) -> std::option::Option<&crate::model::EntityStatus> {
        self.status.as_ref()
    }
    /// <p>The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the <code>s3 key</code> portion of the <code>outputURI</code> field: ':', '//', '/./', '/../'.</p>
    pub fn output_uri(&self) -> std::option::Option<&str> {
        self.output_uri.as_deref()
    }
    /// <p>A description of the most recent details about processing the batch prediction request.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
    /// <p>Long integer type that is a 64-bit signed number.</p>
    pub fn compute_time(&self) -> std::option::Option<i64> {
        self.compute_time
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn finished_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.finished_at.as_ref()
    }
    /// <p>A timestamp represented in epoch time.</p>
    pub fn started_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.started_at.as_ref()
    }
    /// <p>Long integer type that is a 64-bit signed number.</p>
    pub fn total_record_count(&self) -> std::option::Option<i64> {
        self.total_record_count
    }
    /// <p>Long integer type that is a 64-bit signed number.</p>
    pub fn invalid_record_count(&self) -> std::option::Option<i64> {
        self.invalid_record_count
    }
}
/// See [`BatchPrediction`](crate::model::BatchPrediction).
pub mod batch_prediction {

    /// A builder for [`BatchPrediction`](crate::model::BatchPrediction).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) batch_prediction_id: std::option::Option<std::string::String>,
        pub(crate) ml_model_id: std::option::Option<std::string::String>,
        pub(crate) batch_prediction_data_source_id: std::option::Option<std::string::String>,
        pub(crate) input_data_location_s3: std::option::Option<std::string::String>,
        pub(crate) created_by_iam_user: std::option::Option<std::string::String>,
        pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) status: std::option::Option<crate::model::EntityStatus>,
        pub(crate) output_uri: std::option::Option<std::string::String>,
        pub(crate) message: std::option::Option<std::string::String>,
        pub(crate) compute_time: std::option::Option<i64>,
        pub(crate) finished_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) started_at: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) total_record_count: std::option::Option<i64>,
        pub(crate) invalid_record_count: std::option::Option<i64>,
    }
    impl Builder {
        /// <p>The ID assigned to the <code>BatchPrediction</code> at creation. This value should be identical to the value of the <code>BatchPredictionID</code> in the request. </p>
        pub fn batch_prediction_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.batch_prediction_id = Some(input.into());
            self
        }
        /// <p>The ID assigned to the <code>BatchPrediction</code> at creation. This value should be identical to the value of the <code>BatchPredictionID</code> in the request. </p>
        pub fn set_batch_prediction_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.batch_prediction_id = input;
            self
        }
        /// <p>The ID of the <code>MLModel</code> that generated predictions for the <code>BatchPrediction</code> request.</p>
        pub fn ml_model_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.ml_model_id = Some(input.into());
            self
        }
        /// <p>The ID of the <code>MLModel</code> that generated predictions for the <code>BatchPrediction</code> request.</p>
        pub fn set_ml_model_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.ml_model_id = input;
            self
        }
        /// <p>The ID of the <code>DataSource</code> that points to the group of observations to predict.</p>
        pub fn batch_prediction_data_source_id(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.batch_prediction_data_source_id = Some(input.into());
            self
        }
        /// <p>The ID of the <code>DataSource</code> that points to the group of observations to predict.</p>
        pub fn set_batch_prediction_data_source_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.batch_prediction_data_source_id = input;
            self
        }
        /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
        pub fn input_data_location_s3(mut self, input: impl Into<std::string::String>) -> Self {
            self.input_data_location_s3 = Some(input.into());
            self
        }
        /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
        pub fn set_input_data_location_s3(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.input_data_location_s3 = input;
            self
        }
        /// <p>The AWS user account that invoked the <code>BatchPrediction</code>. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn created_by_iam_user(mut self, input: impl Into<std::string::String>) -> Self {
            self.created_by_iam_user = Some(input.into());
            self
        }
        /// <p>The AWS user account that invoked the <code>BatchPrediction</code>. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
        pub fn set_created_by_iam_user(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.created_by_iam_user = input;
            self
        }
        /// <p>The time that the <code>BatchPrediction</code> was created. The time is expressed in epoch time.</p>
        pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.created_at = Some(input);
            self
        }
        /// <p>The time that the <code>BatchPrediction</code> was created. The time is expressed in epoch time.</p>
        pub fn set_created_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.created_at = input;
            self
        }
        /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
        pub fn last_updated_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.last_updated_at = Some(input);
            self
        }
        /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
        pub fn set_last_updated_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.last_updated_at = input;
            self
        }
        /// <p>A user-supplied name or description of the <code>BatchPrediction</code>.</p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>A user-supplied name or description of the <code>BatchPrediction</code>.</p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>The status of the <code>BatchPrediction</code>. This element can have one of the following values:</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.</p> </li>
        /// <li> <p> <code>INPROGRESS</code> - The process is underway.</p> </li>
        /// <li> <p> <code>FAILED</code> - The request to perform a batch prediction did not run to completion. It is not usable.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - The batch prediction process completed successfully.</p> </li>
        /// <li> <p> <code>DELETED</code> - The <code>BatchPrediction</code> is marked as deleted. It is not usable.</p> </li>
        /// </ul>
        pub fn status(mut self, input: crate::model::EntityStatus) -> Self {
            self.status = Some(input);
            self
        }
        /// <p>The status of the <code>BatchPrediction</code>. This element can have one of the following values:</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.</p> </li>
        /// <li> <p> <code>INPROGRESS</code> - The process is underway.</p> </li>
        /// <li> <p> <code>FAILED</code> - The request to perform a batch prediction did not run to completion. It is not usable.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - The batch prediction process completed successfully.</p> </li>
        /// <li> <p> <code>DELETED</code> - The <code>BatchPrediction</code> is marked as deleted. It is not usable.</p> </li>
        /// </ul>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::EntityStatus>,
        ) -> Self {
            self.status = input;
            self
        }
        /// <p>The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the <code>s3 key</code> portion of the <code>outputURI</code> field: ':', '//', '/./', '/../'.</p>
        pub fn output_uri(mut self, input: impl Into<std::string::String>) -> Self {
            self.output_uri = Some(input.into());
            self
        }
        /// <p>The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the <code>s3 key</code> portion of the <code>outputURI</code> field: ':', '//', '/./', '/../'.</p>
        pub fn set_output_uri(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.output_uri = input;
            self
        }
        /// <p>A description of the most recent details about processing the batch prediction request.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A description of the most recent details about processing the batch prediction request.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn compute_time(mut self, input: i64) -> Self {
            self.compute_time = Some(input);
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn set_compute_time(mut self, input: std::option::Option<i64>) -> Self {
            self.compute_time = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn finished_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.finished_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_finished_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.finished_at = input;
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn started_at(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.started_at = Some(input);
            self
        }
        /// <p>A timestamp represented in epoch time.</p>
        pub fn set_started_at(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.started_at = input;
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn total_record_count(mut self, input: i64) -> Self {
            self.total_record_count = Some(input);
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn set_total_record_count(mut self, input: std::option::Option<i64>) -> Self {
            self.total_record_count = input;
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn invalid_record_count(mut self, input: i64) -> Self {
            self.invalid_record_count = Some(input);
            self
        }
        /// <p>Long integer type that is a 64-bit signed number.</p>
        pub fn set_invalid_record_count(mut self, input: std::option::Option<i64>) -> Self {
            self.invalid_record_count = input;
            self
        }
        /// Consumes the builder and constructs a [`BatchPrediction`](crate::model::BatchPrediction).
        pub fn build(self) -> crate::model::BatchPrediction {
            crate::model::BatchPrediction {
                batch_prediction_id: self.batch_prediction_id,
                ml_model_id: self.ml_model_id,
                batch_prediction_data_source_id: self.batch_prediction_data_source_id,
                input_data_location_s3: self.input_data_location_s3,
                created_by_iam_user: self.created_by_iam_user,
                created_at: self.created_at,
                last_updated_at: self.last_updated_at,
                name: self.name,
                status: self.status,
                output_uri: self.output_uri,
                message: self.message,
                compute_time: self.compute_time,
                finished_at: self.finished_at,
                started_at: self.started_at,
                total_record_count: self.total_record_count,
                invalid_record_count: self.invalid_record_count,
            }
        }
    }
}
impl BatchPrediction {
    /// Creates a new builder-style object to manufacture [`BatchPrediction`](crate::model::BatchPrediction).
    pub fn builder() -> crate::model::batch_prediction::Builder {
        crate::model::batch_prediction::Builder::default()
    }
}

/// When writing a match expression against `BatchPredictionFilterVariable`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let batchpredictionfiltervariable = unimplemented!();
/// match batchpredictionfiltervariable {
///     BatchPredictionFilterVariable::CreatedAt => { /* ... */ },
///     BatchPredictionFilterVariable::DatasourceId => { /* ... */ },
///     BatchPredictionFilterVariable::DataUri => { /* ... */ },
///     BatchPredictionFilterVariable::IamUser => { /* ... */ },
///     BatchPredictionFilterVariable::LastUpdatedAt => { /* ... */ },
///     BatchPredictionFilterVariable::MlModelId => { /* ... */ },
///     BatchPredictionFilterVariable::Name => { /* ... */ },
///     BatchPredictionFilterVariable::Status => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `batchpredictionfiltervariable` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `BatchPredictionFilterVariable::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `BatchPredictionFilterVariable::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `BatchPredictionFilterVariable::NewFeature` is defined.
/// Specifically, when `batchpredictionfiltervariable` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `BatchPredictionFilterVariable::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>A list of the variables to use in searching or filtering <code>BatchPrediction</code>.</p>
/// <ul>
/// <li>
/// <p>
/// <code>CreatedAt</code> - Sets the search criteria to <code>BatchPrediction</code> creation date.</p>
/// </li>
/// <li>
/// <p>
/// <code>Status</code> - Sets the search criteria to <code>BatchPrediction</code> status.</p>
/// </li>
/// <li>
/// <p>
/// <code>Name</code> - Sets the search criteria to the contents of <code>BatchPrediction</code>
/// <code>Name</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>IAMUser</code> - Sets the search criteria to the user account that invoked the <code>BatchPrediction</code> creation.</p>
/// </li>
/// <li>
/// <p>
/// <code>MLModelId</code> - Sets the search criteria to the <code>MLModel</code> used in the <code>BatchPrediction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>DataSourceId</code> - Sets the search criteria to the <code>DataSource</code> used in the <code>BatchPrediction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <code>DataURI</code> - Sets the search criteria to the data file(s) used in the <code>BatchPrediction</code>. The URL can
/// identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum BatchPredictionFilterVariable {
    #[allow(missing_docs)] // documentation missing in model
    CreatedAt,
    #[allow(missing_docs)] // documentation missing in model
    DatasourceId,
    #[allow(missing_docs)] // documentation missing in model
    DataUri,
    #[allow(missing_docs)] // documentation missing in model
    IamUser,
    #[allow(missing_docs)] // documentation missing in model
    LastUpdatedAt,
    #[allow(missing_docs)] // documentation missing in model
    MlModelId,
    #[allow(missing_docs)] // documentation missing in model
    Name,
    #[allow(missing_docs)] // documentation missing in model
    Status,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for BatchPredictionFilterVariable {
    fn from(s: &str) -> Self {
        match s {
            "CreatedAt" => BatchPredictionFilterVariable::CreatedAt,
            "DataSourceId" => BatchPredictionFilterVariable::DatasourceId,
            "DataURI" => BatchPredictionFilterVariable::DataUri,
            "IAMUser" => BatchPredictionFilterVariable::IamUser,
            "LastUpdatedAt" => BatchPredictionFilterVariable::LastUpdatedAt,
            "MLModelId" => BatchPredictionFilterVariable::MlModelId,
            "Name" => BatchPredictionFilterVariable::Name,
            "Status" => BatchPredictionFilterVariable::Status,
            other => BatchPredictionFilterVariable::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for BatchPredictionFilterVariable {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(BatchPredictionFilterVariable::from(s))
    }
}
impl BatchPredictionFilterVariable {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            BatchPredictionFilterVariable::CreatedAt => "CreatedAt",
            BatchPredictionFilterVariable::DatasourceId => "DataSourceId",
            BatchPredictionFilterVariable::DataUri => "DataURI",
            BatchPredictionFilterVariable::IamUser => "IAMUser",
            BatchPredictionFilterVariable::LastUpdatedAt => "LastUpdatedAt",
            BatchPredictionFilterVariable::MlModelId => "MLModelId",
            BatchPredictionFilterVariable::Name => "Name",
            BatchPredictionFilterVariable::Status => "Status",
            BatchPredictionFilterVariable::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "CreatedAt",
            "DataSourceId",
            "DataURI",
            "IAMUser",
            "LastUpdatedAt",
            "MLModelId",
            "Name",
            "Status",
        ]
    }
}
impl AsRef<str> for BatchPredictionFilterVariable {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p> Describes the data specification of a <code>DataSource</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct S3DataSpec {
    /// <p>The location of the data file(s) used by a <code>DataSource</code>. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.</p>
    #[doc(hidden)]
    pub data_location_s3: std::option::Option<std::string::String>,
    /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
    /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
    /// <ul>
    /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
    /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_rearrangement: std::option::Option<std::string::String>,
    /// <p> A JSON string that represents the schema for an Amazon S3 <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
    /// <p>You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
    /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
    /// <p>{ "version": "1.0",</p>
    /// <p>"recordAnnotationFieldName": "F1",</p>
    /// <p>"recordWeightFieldName": "F2",</p>
    /// <p>"targetFieldName": "F3",</p>
    /// <p>"dataFormat": "CSV",</p>
    /// <p>"dataFileContainsHeader": true,</p>
    /// <p>"attributes": [</p>
    /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
    /// <p>"excludedVariableNames": [ "F6" ] }</p>
    #[doc(hidden)]
    pub data_schema: std::option::Option<std::string::String>,
    /// <p>Describes the schema location in Amazon S3. You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
    #[doc(hidden)]
    pub data_schema_location_s3: std::option::Option<std::string::String>,
}
impl S3DataSpec {
    /// <p>The location of the data file(s) used by a <code>DataSource</code>. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.</p>
    pub fn data_location_s3(&self) -> std::option::Option<&str> {
        self.data_location_s3.as_deref()
    }
    /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
    /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
    /// <ul>
    /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
    /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
    /// </ul>
    pub fn data_rearrangement(&self) -> std::option::Option<&str> {
        self.data_rearrangement.as_deref()
    }
    /// <p> A JSON string that represents the schema for an Amazon S3 <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
    /// <p>You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
    /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
    /// <p>{ "version": "1.0",</p>
    /// <p>"recordAnnotationFieldName": "F1",</p>
    /// <p>"recordWeightFieldName": "F2",</p>
    /// <p>"targetFieldName": "F3",</p>
    /// <p>"dataFormat": "CSV",</p>
    /// <p>"dataFileContainsHeader": true,</p>
    /// <p>"attributes": [</p>
    /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
    /// <p>"excludedVariableNames": [ "F6" ] }</p>
    pub fn data_schema(&self) -> std::option::Option<&str> {
        self.data_schema.as_deref()
    }
    /// <p>Describes the schema location in Amazon S3. You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
    pub fn data_schema_location_s3(&self) -> std::option::Option<&str> {
        self.data_schema_location_s3.as_deref()
    }
}
/// See [`S3DataSpec`](crate::model::S3DataSpec).
pub mod s3_data_spec {

    /// A builder for [`S3DataSpec`](crate::model::S3DataSpec).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) data_location_s3: std::option::Option<std::string::String>,
        pub(crate) data_rearrangement: std::option::Option<std::string::String>,
        pub(crate) data_schema: std::option::Option<std::string::String>,
        pub(crate) data_schema_location_s3: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The location of the data file(s) used by a <code>DataSource</code>. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.</p>
        pub fn data_location_s3(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_location_s3 = Some(input.into());
            self
        }
        /// <p>The location of the data file(s) used by a <code>DataSource</code>. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.</p>
        pub fn set_data_location_s3(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_location_s3 = input;
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
        /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
        /// <ul>
        /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
        /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
        /// </ul>
        pub fn data_rearrangement(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_rearrangement = Some(input.into());
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
        /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
        /// <ul>
        /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
        /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
        /// </ul>
        pub fn set_data_rearrangement(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_rearrangement = input;
            self
        }
        /// <p> A JSON string that represents the schema for an Amazon S3 <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
        /// <p>You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
        /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
        /// <p>{ "version": "1.0",</p>
        /// <p>"recordAnnotationFieldName": "F1",</p>
        /// <p>"recordWeightFieldName": "F2",</p>
        /// <p>"targetFieldName": "F3",</p>
        /// <p>"dataFormat": "CSV",</p>
        /// <p>"dataFileContainsHeader": true,</p>
        /// <p>"attributes": [</p>
        /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
        /// <p>"excludedVariableNames": [ "F6" ] }</p>
        pub fn data_schema(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_schema = Some(input.into());
            self
        }
        /// <p> A JSON string that represents the schema for an Amazon S3 <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
        /// <p>You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
        /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
        /// <p>{ "version": "1.0",</p>
        /// <p>"recordAnnotationFieldName": "F1",</p>
        /// <p>"recordWeightFieldName": "F2",</p>
        /// <p>"targetFieldName": "F3",</p>
        /// <p>"dataFormat": "CSV",</p>
        /// <p>"dataFileContainsHeader": true,</p>
        /// <p>"attributes": [</p>
        /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
        /// <p>"excludedVariableNames": [ "F6" ] }</p>
        pub fn set_data_schema(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.data_schema = input;
            self
        }
        /// <p>Describes the schema location in Amazon S3. You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
        pub fn data_schema_location_s3(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_schema_location_s3 = Some(input.into());
            self
        }
        /// <p>Describes the schema location in Amazon S3. You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
        pub fn set_data_schema_location_s3(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_schema_location_s3 = input;
            self
        }
        /// Consumes the builder and constructs a [`S3DataSpec`](crate::model::S3DataSpec).
        pub fn build(self) -> crate::model::S3DataSpec {
            crate::model::S3DataSpec {
                data_location_s3: self.data_location_s3,
                data_rearrangement: self.data_rearrangement,
                data_schema: self.data_schema,
                data_schema_location_s3: self.data_schema_location_s3,
            }
        }
    }
}
impl S3DataSpec {
    /// Creates a new builder-style object to manufacture [`S3DataSpec`](crate::model::S3DataSpec).
    pub fn builder() -> crate::model::s3_data_spec::Builder {
        crate::model::s3_data_spec::Builder::default()
    }
}

/// <p>Describes the data specification of an Amazon Redshift <code>DataSource</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RedshiftDataSpec {
    /// <p>Describes the <code>DatabaseName</code> and <code>ClusterIdentifier</code> for an Amazon Redshift <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub database_information: std::option::Option<crate::model::RedshiftDatabase>,
    /// <p>Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub select_sql_query: std::option::Option<std::string::String>,
    /// <p>Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.</p>
    #[doc(hidden)]
    pub database_credentials: std::option::Option<crate::model::RedshiftDatabaseCredentials>,
    /// <p>Describes an Amazon S3 location to store the result set of the <code>SelectSqlQuery</code> query.</p>
    #[doc(hidden)]
    pub s3_staging_location: std::option::Option<std::string::String>,
    /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
    /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
    /// <ul>
    /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
    /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_rearrangement: std::option::Option<std::string::String>,
    /// <p>A JSON string that represents the schema for an Amazon Redshift <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
    /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code>.</p>
    /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
    /// <p>{ "version": "1.0",</p>
    /// <p>"recordAnnotationFieldName": "F1",</p>
    /// <p>"recordWeightFieldName": "F2",</p>
    /// <p>"targetFieldName": "F3",</p>
    /// <p>"dataFormat": "CSV",</p>
    /// <p>"dataFileContainsHeader": true,</p>
    /// <p>"attributes": [</p>
    /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
    /// <p>"excludedVariableNames": [ "F6" ] }</p>
    #[doc(hidden)]
    pub data_schema: std::option::Option<std::string::String>,
    /// <p>Describes the schema location for an Amazon Redshift <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub data_schema_uri: std::option::Option<std::string::String>,
}
impl RedshiftDataSpec {
    /// <p>Describes the <code>DatabaseName</code> and <code>ClusterIdentifier</code> for an Amazon Redshift <code>DataSource</code>.</p>
    pub fn database_information(&self) -> std::option::Option<&crate::model::RedshiftDatabase> {
        self.database_information.as_ref()
    }
    /// <p>Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift <code>DataSource</code>.</p>
    pub fn select_sql_query(&self) -> std::option::Option<&str> {
        self.select_sql_query.as_deref()
    }
    /// <p>Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.</p>
    pub fn database_credentials(
        &self,
    ) -> std::option::Option<&crate::model::RedshiftDatabaseCredentials> {
        self.database_credentials.as_ref()
    }
    /// <p>Describes an Amazon S3 location to store the result set of the <code>SelectSqlQuery</code> query.</p>
    pub fn s3_staging_location(&self) -> std::option::Option<&str> {
        self.s3_staging_location.as_deref()
    }
    /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
    /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
    /// <ul>
    /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
    /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
    /// </ul>
    pub fn data_rearrangement(&self) -> std::option::Option<&str> {
        self.data_rearrangement.as_deref()
    }
    /// <p>A JSON string that represents the schema for an Amazon Redshift <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
    /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code>.</p>
    /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
    /// <p>{ "version": "1.0",</p>
    /// <p>"recordAnnotationFieldName": "F1",</p>
    /// <p>"recordWeightFieldName": "F2",</p>
    /// <p>"targetFieldName": "F3",</p>
    /// <p>"dataFormat": "CSV",</p>
    /// <p>"dataFileContainsHeader": true,</p>
    /// <p>"attributes": [</p>
    /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
    /// <p>"excludedVariableNames": [ "F6" ] }</p>
    pub fn data_schema(&self) -> std::option::Option<&str> {
        self.data_schema.as_deref()
    }
    /// <p>Describes the schema location for an Amazon Redshift <code>DataSource</code>.</p>
    pub fn data_schema_uri(&self) -> std::option::Option<&str> {
        self.data_schema_uri.as_deref()
    }
}
/// See [`RedshiftDataSpec`](crate::model::RedshiftDataSpec).
pub mod redshift_data_spec {

    /// A builder for [`RedshiftDataSpec`](crate::model::RedshiftDataSpec).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) database_information: std::option::Option<crate::model::RedshiftDatabase>,
        pub(crate) select_sql_query: std::option::Option<std::string::String>,
        pub(crate) database_credentials:
            std::option::Option<crate::model::RedshiftDatabaseCredentials>,
        pub(crate) s3_staging_location: std::option::Option<std::string::String>,
        pub(crate) data_rearrangement: std::option::Option<std::string::String>,
        pub(crate) data_schema: std::option::Option<std::string::String>,
        pub(crate) data_schema_uri: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>Describes the <code>DatabaseName</code> and <code>ClusterIdentifier</code> for an Amazon Redshift <code>DataSource</code>.</p>
        pub fn database_information(mut self, input: crate::model::RedshiftDatabase) -> Self {
            self.database_information = Some(input);
            self
        }
        /// <p>Describes the <code>DatabaseName</code> and <code>ClusterIdentifier</code> for an Amazon Redshift <code>DataSource</code>.</p>
        pub fn set_database_information(
            mut self,
            input: std::option::Option<crate::model::RedshiftDatabase>,
        ) -> Self {
            self.database_information = input;
            self
        }
        /// <p>Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift <code>DataSource</code>.</p>
        pub fn select_sql_query(mut self, input: impl Into<std::string::String>) -> Self {
            self.select_sql_query = Some(input.into());
            self
        }
        /// <p>Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift <code>DataSource</code>.</p>
        pub fn set_select_sql_query(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.select_sql_query = input;
            self
        }
        /// <p>Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.</p>
        pub fn database_credentials(
            mut self,
            input: crate::model::RedshiftDatabaseCredentials,
        ) -> Self {
            self.database_credentials = Some(input);
            self
        }
        /// <p>Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.</p>
        pub fn set_database_credentials(
            mut self,
            input: std::option::Option<crate::model::RedshiftDatabaseCredentials>,
        ) -> Self {
            self.database_credentials = input;
            self
        }
        /// <p>Describes an Amazon S3 location to store the result set of the <code>SelectSqlQuery</code> query.</p>
        pub fn s3_staging_location(mut self, input: impl Into<std::string::String>) -> Self {
            self.s3_staging_location = Some(input.into());
            self
        }
        /// <p>Describes an Amazon S3 location to store the result set of the <code>SelectSqlQuery</code> query.</p>
        pub fn set_s3_staging_location(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.s3_staging_location = input;
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
        /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
        /// <ul>
        /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
        /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
        /// </ul>
        pub fn data_rearrangement(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_rearrangement = Some(input.into());
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
        /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
        /// <ul>
        /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
        /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
        /// </ul>
        pub fn set_data_rearrangement(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_rearrangement = input;
            self
        }
        /// <p>A JSON string that represents the schema for an Amazon Redshift <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
        /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code>.</p>
        /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
        /// <p>{ "version": "1.0",</p>
        /// <p>"recordAnnotationFieldName": "F1",</p>
        /// <p>"recordWeightFieldName": "F2",</p>
        /// <p>"targetFieldName": "F3",</p>
        /// <p>"dataFormat": "CSV",</p>
        /// <p>"dataFileContainsHeader": true,</p>
        /// <p>"attributes": [</p>
        /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
        /// <p>"excludedVariableNames": [ "F6" ] }</p>
        pub fn data_schema(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_schema = Some(input.into());
            self
        }
        /// <p>A JSON string that represents the schema for an Amazon Redshift <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
        /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code>.</p>
        /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
        /// <p>{ "version": "1.0",</p>
        /// <p>"recordAnnotationFieldName": "F1",</p>
        /// <p>"recordWeightFieldName": "F2",</p>
        /// <p>"targetFieldName": "F3",</p>
        /// <p>"dataFormat": "CSV",</p>
        /// <p>"dataFileContainsHeader": true,</p>
        /// <p>"attributes": [</p>
        /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
        /// <p>"excludedVariableNames": [ "F6" ] }</p>
        pub fn set_data_schema(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.data_schema = input;
            self
        }
        /// <p>Describes the schema location for an Amazon Redshift <code>DataSource</code>.</p>
        pub fn data_schema_uri(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_schema_uri = Some(input.into());
            self
        }
        /// <p>Describes the schema location for an Amazon Redshift <code>DataSource</code>.</p>
        pub fn set_data_schema_uri(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_schema_uri = input;
            self
        }
        /// Consumes the builder and constructs a [`RedshiftDataSpec`](crate::model::RedshiftDataSpec).
        pub fn build(self) -> crate::model::RedshiftDataSpec {
            crate::model::RedshiftDataSpec {
                database_information: self.database_information,
                select_sql_query: self.select_sql_query,
                database_credentials: self.database_credentials,
                s3_staging_location: self.s3_staging_location,
                data_rearrangement: self.data_rearrangement,
                data_schema: self.data_schema,
                data_schema_uri: self.data_schema_uri,
            }
        }
    }
}
impl RedshiftDataSpec {
    /// Creates a new builder-style object to manufacture [`RedshiftDataSpec`](crate::model::RedshiftDataSpec).
    pub fn builder() -> crate::model::redshift_data_spec::Builder {
        crate::model::redshift_data_spec::Builder::default()
    }
}

/// <p>Describes the database credentials for connecting to a database on an Amazon Redshift cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RedshiftDatabaseCredentials {
    /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
    #[doc(hidden)]
    pub username: std::option::Option<std::string::String>,
    /// <p>A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a <code>RedshiftSelectSqlQuery</code> query. The password should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
    #[doc(hidden)]
    pub password: std::option::Option<std::string::String>,
}
impl RedshiftDatabaseCredentials {
    /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
    pub fn username(&self) -> std::option::Option<&str> {
        self.username.as_deref()
    }
    /// <p>A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a <code>RedshiftSelectSqlQuery</code> query. The password should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
    pub fn password(&self) -> std::option::Option<&str> {
        self.password.as_deref()
    }
}
/// See [`RedshiftDatabaseCredentials`](crate::model::RedshiftDatabaseCredentials).
pub mod redshift_database_credentials {

    /// A builder for [`RedshiftDatabaseCredentials`](crate::model::RedshiftDatabaseCredentials).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) username: std::option::Option<std::string::String>,
        pub(crate) password: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
        pub fn username(mut self, input: impl Into<std::string::String>) -> Self {
            self.username = Some(input.into());
            self
        }
        /// <p>A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the <code>RedshiftSelectSqlQuery</code> query. The username should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
        pub fn set_username(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.username = input;
            self
        }
        /// <p>A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a <code>RedshiftSelectSqlQuery</code> query. The password should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
        pub fn password(mut self, input: impl Into<std::string::String>) -> Self {
            self.password = Some(input.into());
            self
        }
        /// <p>A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a <code>RedshiftSelectSqlQuery</code> query. The password should be valid for an Amazon Redshift <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html">USER</a>.</p>
        pub fn set_password(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.password = input;
            self
        }
        /// Consumes the builder and constructs a [`RedshiftDatabaseCredentials`](crate::model::RedshiftDatabaseCredentials).
        pub fn build(self) -> crate::model::RedshiftDatabaseCredentials {
            crate::model::RedshiftDatabaseCredentials {
                username: self.username,
                password: self.password,
            }
        }
    }
}
impl RedshiftDatabaseCredentials {
    /// Creates a new builder-style object to manufacture [`RedshiftDatabaseCredentials`](crate::model::RedshiftDatabaseCredentials).
    pub fn builder() -> crate::model::redshift_database_credentials::Builder {
        crate::model::redshift_database_credentials::Builder::default()
    }
}

/// <p>The data specification of an Amazon Relational Database Service (Amazon RDS) <code>DataSource</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RdsDataSpec {
    /// <p>Describes the <code>DatabaseName</code> and <code>InstanceIdentifier</code> of an Amazon RDS database.</p>
    #[doc(hidden)]
    pub database_information: std::option::Option<crate::model::RdsDatabase>,
    /// <p>The query that is used to retrieve the observation data for the <code>DataSource</code>.</p>
    #[doc(hidden)]
    pub select_sql_query: std::option::Option<std::string::String>,
    /// <p>The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.</p>
    #[doc(hidden)]
    pub database_credentials: std::option::Option<crate::model::RdsDatabaseCredentials>,
    /// <p>The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using <code>SelectSqlQuery</code> is stored in this location.</p>
    #[doc(hidden)]
    pub s3_staging_location: std::option::Option<std::string::String>,
    /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
    /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
    /// <ul>
    /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
    /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_rearrangement: std::option::Option<std::string::String>,
    /// <p>A JSON string that represents the schema for an Amazon RDS <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
    /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code> </p>
    /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
    /// <p>{ "version": "1.0",</p>
    /// <p>"recordAnnotationFieldName": "F1",</p>
    /// <p>"recordWeightFieldName": "F2",</p>
    /// <p>"targetFieldName": "F3",</p>
    /// <p>"dataFormat": "CSV",</p>
    /// <p>"dataFileContainsHeader": true,</p>
    /// <p>"attributes": [</p>
    /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
    /// <p>"excludedVariableNames": [ "F6" ] }</p>
    #[doc(hidden)]
    pub data_schema: std::option::Option<std::string::String>,
    /// <p>The Amazon S3 location of the <code>DataSchema</code>. </p>
    #[doc(hidden)]
    pub data_schema_uri: std::option::Option<std::string::String>,
    /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    #[doc(hidden)]
    pub resource_role: std::option::Option<std::string::String>,
    /// <p>The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    #[doc(hidden)]
    pub service_role: std::option::Option<std::string::String>,
    /// <p>The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.</p>
    #[doc(hidden)]
    pub subnet_id: std::option::Option<std::string::String>,
    /// <p>The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.</p>
    #[doc(hidden)]
    pub security_group_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl RdsDataSpec {
    /// <p>Describes the <code>DatabaseName</code> and <code>InstanceIdentifier</code> of an Amazon RDS database.</p>
    pub fn database_information(&self) -> std::option::Option<&crate::model::RdsDatabase> {
        self.database_information.as_ref()
    }
    /// <p>The query that is used to retrieve the observation data for the <code>DataSource</code>.</p>
    pub fn select_sql_query(&self) -> std::option::Option<&str> {
        self.select_sql_query.as_deref()
    }
    /// <p>The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.</p>
    pub fn database_credentials(
        &self,
    ) -> std::option::Option<&crate::model::RdsDatabaseCredentials> {
        self.database_credentials.as_ref()
    }
    /// <p>The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using <code>SelectSqlQuery</code> is stored in this location.</p>
    pub fn s3_staging_location(&self) -> std::option::Option<&str> {
        self.s3_staging_location.as_deref()
    }
    /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
    /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
    /// <ul>
    /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
    /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
    /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
    /// </ul>
    pub fn data_rearrangement(&self) -> std::option::Option<&str> {
        self.data_rearrangement.as_deref()
    }
    /// <p>A JSON string that represents the schema for an Amazon RDS <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
    /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code> </p>
    /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
    /// <p>{ "version": "1.0",</p>
    /// <p>"recordAnnotationFieldName": "F1",</p>
    /// <p>"recordWeightFieldName": "F2",</p>
    /// <p>"targetFieldName": "F3",</p>
    /// <p>"dataFormat": "CSV",</p>
    /// <p>"dataFileContainsHeader": true,</p>
    /// <p>"attributes": [</p>
    /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
    /// <p>"excludedVariableNames": [ "F6" ] }</p>
    pub fn data_schema(&self) -> std::option::Option<&str> {
        self.data_schema.as_deref()
    }
    /// <p>The Amazon S3 location of the <code>DataSchema</code>. </p>
    pub fn data_schema_uri(&self) -> std::option::Option<&str> {
        self.data_schema_uri.as_deref()
    }
    /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    pub fn resource_role(&self) -> std::option::Option<&str> {
        self.resource_role.as_deref()
    }
    /// <p>The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
    pub fn service_role(&self) -> std::option::Option<&str> {
        self.service_role.as_deref()
    }
    /// <p>The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.</p>
    pub fn subnet_id(&self) -> std::option::Option<&str> {
        self.subnet_id.as_deref()
    }
    /// <p>The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.</p>
    pub fn security_group_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.security_group_ids.as_deref()
    }
}
/// See [`RdsDataSpec`](crate::model::RdsDataSpec).
pub mod rds_data_spec {

    /// A builder for [`RdsDataSpec`](crate::model::RdsDataSpec).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) database_information: std::option::Option<crate::model::RdsDatabase>,
        pub(crate) select_sql_query: std::option::Option<std::string::String>,
        pub(crate) database_credentials: std::option::Option<crate::model::RdsDatabaseCredentials>,
        pub(crate) s3_staging_location: std::option::Option<std::string::String>,
        pub(crate) data_rearrangement: std::option::Option<std::string::String>,
        pub(crate) data_schema: std::option::Option<std::string::String>,
        pub(crate) data_schema_uri: std::option::Option<std::string::String>,
        pub(crate) resource_role: std::option::Option<std::string::String>,
        pub(crate) service_role: std::option::Option<std::string::String>,
        pub(crate) subnet_id: std::option::Option<std::string::String>,
        pub(crate) security_group_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>Describes the <code>DatabaseName</code> and <code>InstanceIdentifier</code> of an Amazon RDS database.</p>
        pub fn database_information(mut self, input: crate::model::RdsDatabase) -> Self {
            self.database_information = Some(input);
            self
        }
        /// <p>Describes the <code>DatabaseName</code> and <code>InstanceIdentifier</code> of an Amazon RDS database.</p>
        pub fn set_database_information(
            mut self,
            input: std::option::Option<crate::model::RdsDatabase>,
        ) -> Self {
            self.database_information = input;
            self
        }
        /// <p>The query that is used to retrieve the observation data for the <code>DataSource</code>.</p>
        pub fn select_sql_query(mut self, input: impl Into<std::string::String>) -> Self {
            self.select_sql_query = Some(input.into());
            self
        }
        /// <p>The query that is used to retrieve the observation data for the <code>DataSource</code>.</p>
        pub fn set_select_sql_query(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.select_sql_query = input;
            self
        }
        /// <p>The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.</p>
        pub fn database_credentials(mut self, input: crate::model::RdsDatabaseCredentials) -> Self {
            self.database_credentials = Some(input);
            self
        }
        /// <p>The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.</p>
        pub fn set_database_credentials(
            mut self,
            input: std::option::Option<crate::model::RdsDatabaseCredentials>,
        ) -> Self {
            self.database_credentials = input;
            self
        }
        /// <p>The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using <code>SelectSqlQuery</code> is stored in this location.</p>
        pub fn s3_staging_location(mut self, input: impl Into<std::string::String>) -> Self {
            self.s3_staging_location = Some(input.into());
            self
        }
        /// <p>The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using <code>SelectSqlQuery</code> is stored in this location.</p>
        pub fn set_s3_staging_location(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.s3_staging_location = input;
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
        /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
        /// <ul>
        /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
        /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
        /// </ul>
        pub fn data_rearrangement(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_rearrangement = Some(input.into());
            self
        }
        /// <p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p>
        /// <p>There are multiple parameters that control what data is used to create a datasource:</p>
        /// <ul>
        /// <li> <p> <b> <code>percentBegin</code> </b> </p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>percentEnd</code> </b> </p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p> </li>
        /// <li> <p> <b> <code>complement</code> </b> </p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":0, "percentEnd":25}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}</code> </p> </li>
        /// <li> <p> <b> <code>strategy</code> </b> </p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}</code> </p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}</code> </p> <p>Datasource for training: <code>{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}</code> </p> </li>
        /// </ul>
        pub fn set_data_rearrangement(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_rearrangement = input;
            self
        }
        /// <p>A JSON string that represents the schema for an Amazon RDS <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
        /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code> </p>
        /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
        /// <p>{ "version": "1.0",</p>
        /// <p>"recordAnnotationFieldName": "F1",</p>
        /// <p>"recordWeightFieldName": "F2",</p>
        /// <p>"targetFieldName": "F3",</p>
        /// <p>"dataFormat": "CSV",</p>
        /// <p>"dataFileContainsHeader": true,</p>
        /// <p>"attributes": [</p>
        /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
        /// <p>"excludedVariableNames": [ "F6" ] }</p>
        pub fn data_schema(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_schema = Some(input.into());
            self
        }
        /// <p>A JSON string that represents the schema for an Amazon RDS <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p>
        /// <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code> </p>
        /// <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p>
        /// <p>{ "version": "1.0",</p>
        /// <p>"recordAnnotationFieldName": "F1",</p>
        /// <p>"recordWeightFieldName": "F2",</p>
        /// <p>"targetFieldName": "F3",</p>
        /// <p>"dataFormat": "CSV",</p>
        /// <p>"dataFileContainsHeader": true,</p>
        /// <p>"attributes": [</p>
        /// <p>{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p>
        /// <p>"excludedVariableNames": [ "F6" ] }</p>
        pub fn set_data_schema(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.data_schema = input;
            self
        }
        /// <p>The Amazon S3 location of the <code>DataSchema</code>. </p>
        pub fn data_schema_uri(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_schema_uri = Some(input.into());
            self
        }
        /// <p>The Amazon S3 location of the <code>DataSchema</code>. </p>
        pub fn set_data_schema_uri(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_schema_uri = input;
            self
        }
        /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn resource_role(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_role = Some(input.into());
            self
        }
        /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn set_resource_role(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.resource_role = input;
            self
        }
        /// <p>The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn service_role(mut self, input: impl Into<std::string::String>) -> Self {
            self.service_role = Some(input.into());
            self
        }
        /// <p>The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
        pub fn set_service_role(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.service_role = input;
            self
        }
        /// <p>The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.</p>
        pub fn subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.subnet_id = Some(input.into());
            self
        }
        /// <p>The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.</p>
        pub fn set_subnet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.subnet_id = input;
            self
        }
        /// Appends an item to `security_group_ids`.
        ///
        /// To override the contents of this collection use [`set_security_group_ids`](Self::set_security_group_ids).
        ///
        /// <p>The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.</p>
        pub fn security_group_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.security_group_ids.unwrap_or_default();
            v.push(input.into());
            self.security_group_ids = Some(v);
            self
        }
        /// <p>The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.</p>
        pub fn set_security_group_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.security_group_ids = input;
            self
        }
        /// Consumes the builder and constructs a [`RdsDataSpec`](crate::model::RdsDataSpec).
        pub fn build(self) -> crate::model::RdsDataSpec {
            crate::model::RdsDataSpec {
                database_information: self.database_information,
                select_sql_query: self.select_sql_query,
                database_credentials: self.database_credentials,
                s3_staging_location: self.s3_staging_location,
                data_rearrangement: self.data_rearrangement,
                data_schema: self.data_schema,
                data_schema_uri: self.data_schema_uri,
                resource_role: self.resource_role,
                service_role: self.service_role,
                subnet_id: self.subnet_id,
                security_group_ids: self.security_group_ids,
            }
        }
    }
}
impl RdsDataSpec {
    /// Creates a new builder-style object to manufacture [`RdsDataSpec`](crate::model::RdsDataSpec).
    pub fn builder() -> crate::model::rds_data_spec::Builder {
        crate::model::rds_data_spec::Builder::default()
    }
}

/// <p>The database credentials to connect to a database on an RDS DB instance.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct RdsDatabaseCredentials {
    /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
    #[doc(hidden)]
    pub username: std::option::Option<std::string::String>,
    /// <p>The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the <code>RDSSelectQuery</code> query.</p>
    #[doc(hidden)]
    pub password: std::option::Option<std::string::String>,
}
impl RdsDatabaseCredentials {
    /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
    pub fn username(&self) -> std::option::Option<&str> {
        self.username.as_deref()
    }
    /// <p>The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the <code>RDSSelectQuery</code> query.</p>
    pub fn password(&self) -> std::option::Option<&str> {
        self.password.as_deref()
    }
}
/// See [`RdsDatabaseCredentials`](crate::model::RdsDatabaseCredentials).
pub mod rds_database_credentials {

    /// A builder for [`RdsDatabaseCredentials`](crate::model::RdsDatabaseCredentials).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) username: std::option::Option<std::string::String>,
        pub(crate) password: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
        pub fn username(mut self, input: impl Into<std::string::String>) -> Self {
            self.username = Some(input.into());
            self
        }
        /// <p>The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an <code>RDSSelectSqlQuery</code> query.</p>
        pub fn set_username(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.username = input;
            self
        }
        /// <p>The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the <code>RDSSelectQuery</code> query.</p>
        pub fn password(mut self, input: impl Into<std::string::String>) -> Self {
            self.password = Some(input.into());
            self
        }
        /// <p>The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the <code>RDSSelectQuery</code> query.</p>
        pub fn set_password(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.password = input;
            self
        }
        /// Consumes the builder and constructs a [`RdsDatabaseCredentials`](crate::model::RdsDatabaseCredentials).
        pub fn build(self) -> crate::model::RdsDatabaseCredentials {
            crate::model::RdsDatabaseCredentials {
                username: self.username,
                password: self.password,
            }
        }
    }
}
impl RdsDatabaseCredentials {
    /// Creates a new builder-style object to manufacture [`RdsDatabaseCredentials`](crate::model::RdsDatabaseCredentials).
    pub fn builder() -> crate::model::rds_database_credentials::Builder {
        crate::model::rds_database_credentials::Builder::default()
    }
}