rusoto_machinelearning/
generated.rs

1// =================================================================
2//
3//                           * WARNING *
4//
5//                    This file is generated!
6//
7//  Changes made to this file will be overwritten. If changes are
8//  required to the generated code, the service_crategen project
9//  must be updated to generate the changes.
10//
11// =================================================================
12
13use std::error::Error;
14use std::fmt;
15
16use async_trait::async_trait;
17use rusoto_core::credential::ProvideAwsCredentials;
18use rusoto_core::region;
19use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest};
20use rusoto_core::{Client, RusotoError};
21
22use rusoto_core::proto;
23use rusoto_core::request::HttpResponse;
24use rusoto_core::signature::SignedRequest;
25#[allow(unused_imports)]
26use serde::{Deserialize, Serialize};
27
28impl MachineLearningClient {
29    fn new_signed_request(&self, http_method: &str, request_uri: &str) -> SignedRequest {
30        let mut request =
31            SignedRequest::new(http_method, "machinelearning", &self.region, request_uri);
32
33        request.set_content_type("application/x-amz-json-1.1".to_owned());
34
35        request
36    }
37
38    async fn sign_and_dispatch<E>(
39        &self,
40        request: SignedRequest,
41        from_response: fn(BufferedHttpResponse) -> RusotoError<E>,
42    ) -> Result<HttpResponse, RusotoError<E>> {
43        let mut response = self.client.sign_and_dispatch(request).await?;
44        if !response.status.is_success() {
45            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
46            return Err(from_response(response));
47        }
48
49        Ok(response)
50    }
51}
52
53use serde_json;
54#[derive(Clone, Debug, Default, PartialEq, Serialize)]
55#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
56pub struct AddTagsInput {
57    /// <p>The ID of the ML object to tag. For example, <code>exampleModelId</code>.</p>
58    #[serde(rename = "ResourceId")]
59    pub resource_id: String,
60    /// <p>The type of the ML object to tag. </p>
61    #[serde(rename = "ResourceType")]
62    pub resource_type: String,
63    /// <p>The key-value pairs to use to create tags. If you specify a key without specifying a value, Amazon ML creates a tag with the specified key and a value of null.</p>
64    #[serde(rename = "Tags")]
65    pub tags: Vec<Tag>,
66}
67
68/// <p>Amazon ML returns the following elements. </p>
69#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
70#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
71pub struct AddTagsOutput {
72    /// <p>The ID of the ML object that was tagged.</p>
73    #[serde(rename = "ResourceId")]
74    #[serde(skip_serializing_if = "Option::is_none")]
75    pub resource_id: Option<String>,
76    /// <p>The type of the ML object that was tagged.</p>
77    #[serde(rename = "ResourceType")]
78    #[serde(skip_serializing_if = "Option::is_none")]
79    pub resource_type: Option<String>,
80}
81
82/// <p> Represents the output of a <code>GetBatchPrediction</code> operation.</p> <p> The content consists of the detailed metadata, the status, and the data file information of a <code>Batch Prediction</code>.</p>
83#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
84#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
85pub struct BatchPrediction {
86    /// <p>The ID of the <code>DataSource</code> that points to the group of observations to predict.</p>
87    #[serde(rename = "BatchPredictionDataSourceId")]
88    #[serde(skip_serializing_if = "Option::is_none")]
89    pub batch_prediction_data_source_id: Option<String>,
90    /// <p>The ID assigned to the <code>BatchPrediction</code> at creation. This value should be identical to the value of the <code>BatchPredictionID</code> in the request. </p>
91    #[serde(rename = "BatchPredictionId")]
92    #[serde(skip_serializing_if = "Option::is_none")]
93    pub batch_prediction_id: Option<String>,
94    #[serde(rename = "ComputeTime")]
95    #[serde(skip_serializing_if = "Option::is_none")]
96    pub compute_time: Option<i64>,
97    /// <p>The time that the <code>BatchPrediction</code> was created. The time is expressed in epoch time.</p>
98    #[serde(rename = "CreatedAt")]
99    #[serde(skip_serializing_if = "Option::is_none")]
100    pub created_at: Option<f64>,
101    /// <p>The AWS user account that invoked the <code>BatchPrediction</code>. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
102    #[serde(rename = "CreatedByIamUser")]
103    #[serde(skip_serializing_if = "Option::is_none")]
104    pub created_by_iam_user: Option<String>,
105    #[serde(rename = "FinishedAt")]
106    #[serde(skip_serializing_if = "Option::is_none")]
107    pub finished_at: Option<f64>,
108    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
109    #[serde(rename = "InputDataLocationS3")]
110    #[serde(skip_serializing_if = "Option::is_none")]
111    pub input_data_location_s3: Option<String>,
112    #[serde(rename = "InvalidRecordCount")]
113    #[serde(skip_serializing_if = "Option::is_none")]
114    pub invalid_record_count: Option<i64>,
115    /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
116    #[serde(rename = "LastUpdatedAt")]
117    #[serde(skip_serializing_if = "Option::is_none")]
118    pub last_updated_at: Option<f64>,
119    /// <p>The ID of the <code>MLModel</code> that generated predictions for the <code>BatchPrediction</code> request.</p>
120    #[serde(rename = "MLModelId")]
121    #[serde(skip_serializing_if = "Option::is_none")]
122    pub ml_model_id: Option<String>,
123    /// <p>A description of the most recent details about processing the batch prediction request.</p>
124    #[serde(rename = "Message")]
125    #[serde(skip_serializing_if = "Option::is_none")]
126    pub message: Option<String>,
127    /// <p>A user-supplied name or description of the <code>BatchPrediction</code>.</p>
128    #[serde(rename = "Name")]
129    #[serde(skip_serializing_if = "Option::is_none")]
130    pub name: Option<String>,
131    /// <p>The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the <code>s3 key</code> portion of the <code>outputURI</code> field: ':', '//', '/./', '/../'.</p>
132    #[serde(rename = "OutputUri")]
133    #[serde(skip_serializing_if = "Option::is_none")]
134    pub output_uri: Option<String>,
135    #[serde(rename = "StartedAt")]
136    #[serde(skip_serializing_if = "Option::is_none")]
137    pub started_at: Option<f64>,
138    /// <p><p>The status of the <code>BatchPrediction</code>. This element can have one of the following values:</p> <ul> <li> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.</li> <li> <code>INPROGRESS</code> - The process is underway.</li> <li> <code>FAILED</code> - The request to perform a batch prediction did not run to completion. It is not usable.</li> <li> <code>COMPLETED</code> - The batch prediction process completed successfully.</li> <li> <code>DELETED</code> - The <code>BatchPrediction</code> is marked as deleted. It is not usable.</li> </ul></p>
139    #[serde(rename = "Status")]
140    #[serde(skip_serializing_if = "Option::is_none")]
141    pub status: Option<String>,
142    #[serde(rename = "TotalRecordCount")]
143    #[serde(skip_serializing_if = "Option::is_none")]
144    pub total_record_count: Option<i64>,
145}
146
147#[derive(Clone, Debug, Default, PartialEq, Serialize)]
148#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
149pub struct CreateBatchPredictionInput {
150    /// <p>The ID of the <code>DataSource</code> that points to the group of observations to predict.</p>
151    #[serde(rename = "BatchPredictionDataSourceId")]
152    pub batch_prediction_data_source_id: String,
153    /// <p>A user-supplied ID that uniquely identifies the <code>BatchPrediction</code>.</p>
154    #[serde(rename = "BatchPredictionId")]
155    pub batch_prediction_id: String,
156    /// <p>A user-supplied name or description of the <code>BatchPrediction</code>. <code>BatchPredictionName</code> can only use the UTF-8 character set.</p>
157    #[serde(rename = "BatchPredictionName")]
158    #[serde(skip_serializing_if = "Option::is_none")]
159    pub batch_prediction_name: Option<String>,
160    /// <p>The ID of the <code>MLModel</code> that will generate predictions for the group of observations. </p>
161    #[serde(rename = "MLModelId")]
162    pub ml_model_id: String,
163    /// <p>The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory to store the batch prediction results. The following substrings are not allowed in the <code>s3 key</code> portion of the <code>outputURI</code> field: ':', '//', '/./', '/../'.</p> <p>Amazon ML needs permissions to store and retrieve the logs on your behalf. For information about how to set permissions, see the <a href="http://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>.</p>
164    #[serde(rename = "OutputUri")]
165    pub output_uri: String,
166}
167
168/// <p> Represents the output of a <code>CreateBatchPrediction</code> operation, and is an acknowledgement that Amazon ML received the request.</p> <p>The <code>CreateBatchPrediction</code> operation is asynchronous. You can poll for status updates by using the <code>&gt;GetBatchPrediction</code> operation and checking the <code>Status</code> parameter of the result. </p>
169#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
170#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
171pub struct CreateBatchPredictionOutput {
172    /// <p>A user-supplied ID that uniquely identifies the <code>BatchPrediction</code>. This value is identical to the value of the <code>BatchPredictionId</code> in the request.</p>
173    #[serde(rename = "BatchPredictionId")]
174    #[serde(skip_serializing_if = "Option::is_none")]
175    pub batch_prediction_id: Option<String>,
176}
177
178#[derive(Clone, Debug, Default, PartialEq, Serialize)]
179#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
180pub struct CreateDataSourceFromRDSInput {
181    /// <p>The compute statistics for a <code>DataSource</code>. The statistics are generated from the observation data referenced by a <code>DataSource</code>. Amazon ML uses the statistics internally during <code>MLModel</code> training. This parameter must be set to <code>true</code> if the <code></code>DataSource<code></code> needs to be used for <code>MLModel</code> training. </p>
182    #[serde(rename = "ComputeStatistics")]
183    #[serde(skip_serializing_if = "Option::is_none")]
184    pub compute_statistics: Option<bool>,
185    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>. Typically, an Amazon Resource Number (ARN) becomes the ID for a <code>DataSource</code>.</p>
186    #[serde(rename = "DataSourceId")]
187    pub data_source_id: String,
188    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
189    #[serde(rename = "DataSourceName")]
190    #[serde(skip_serializing_if = "Option::is_none")]
191    pub data_source_name: Option<String>,
192    /// <p><p>The data specification of an Amazon RDS <code>DataSource</code>:</p> <ul> <li><p>DatabaseInformation - <ul> <li> <code>DatabaseName</code> - The name of the Amazon RDS database.</li> <li> <code>InstanceIdentifier </code> - A unique identifier for the Amazon RDS database instance.</li> </ul> </p></li> <li><p>DatabaseCredentials - AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon RDS database.</p></li> <li><p>ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 instance to carry out the copy task from Amazon RDS to Amazon Simple Storage Service (Amazon S3). For more information, see <a href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p></li> <li><p>ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p></li> <li><p>SecurityInfo - The security information to use to access an RDS DB instance. You need to set up appropriate ingress rules for the security entity IDs provided to allow access to the Amazon RDS instance. Specify a [<code>SubnetId</code>, <code>SecurityGroupIds</code>] pair for a VPC-based RDS DB instance.</p></li> <li><p>SelectSqlQuery - A query that is used to retrieve the observation data for the <code>Datasource</code>.</p></li> <li><p>S3StagingLocation - The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using <code>SelectSqlQuery</code> is stored in this location.</p></li> <li><p>DataSchemaUri - The Amazon S3 location of the <code>DataSchema</code>.</p></li> <li><p>DataSchema - A JSON string representing the schema. This is not required if <code>DataSchemaUri</code> is specified. </p></li> <li> <p>DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the <code>Datasource</code>. </p> <br> <p> Sample - <code> &quot;{&quot;splitting&quot;:{&quot;percentBegin&quot;:10,&quot;percentEnd&quot;:60}}&quot;</code> </p> </li> </ul></p>
193    #[serde(rename = "RDSData")]
194    pub rds_data: RDSDataSpec,
195    /// <p>The role that Amazon ML assumes on behalf of the user to create and activate a data pipeline in the user's account and copy data using the <code>SelectSqlQuery</code> query from Amazon RDS to Amazon S3.</p> <p> </p>
196    #[serde(rename = "RoleARN")]
197    pub role_arn: String,
198}
199
200/// <p> Represents the output of a <code>CreateDataSourceFromRDS</code> operation, and is an acknowledgement that Amazon ML received the request.</p> <p>The <code>CreateDataSourceFromRDS</code>&gt; operation is asynchronous. You can poll for updates by using the <code>GetBatchPrediction</code> operation and checking the <code>Status</code> parameter. You can inspect the <code>Message</code> when <code>Status</code> shows up as <code>FAILED</code>. You can also check the progress of the copy operation by going to the <code>DataPipeline</code> console and looking up the pipeline using the <code>pipelineId </code> from the describe call.</p>
201#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
202#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
203pub struct CreateDataSourceFromRDSOutput {
204    /// <p>A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the <code>DataSourceID</code> in the request. </p>
205    #[serde(rename = "DataSourceId")]
206    #[serde(skip_serializing_if = "Option::is_none")]
207    pub data_source_id: Option<String>,
208}
209
210#[derive(Clone, Debug, Default, PartialEq, Serialize)]
211#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
212pub struct CreateDataSourceFromRedshiftInput {
213    /// <p>The compute statistics for a <code>DataSource</code>. The statistics are generated from the observation data referenced by a <code>DataSource</code>. Amazon ML uses the statistics internally during <code>MLModel</code> training. This parameter must be set to <code>true</code> if the <code>DataSource</code> needs to be used for <code>MLModel</code> training.</p>
214    #[serde(rename = "ComputeStatistics")]
215    #[serde(skip_serializing_if = "Option::is_none")]
216    pub compute_statistics: Option<bool>,
217    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>.</p>
218    #[serde(rename = "DataSourceId")]
219    pub data_source_id: String,
220    /// <p>A user-supplied name or description of the <code>DataSource</code>. </p>
221    #[serde(rename = "DataSourceName")]
222    #[serde(skip_serializing_if = "Option::is_none")]
223    pub data_source_name: Option<String>,
224    /// <p><p>The data specification of an Amazon Redshift <code>DataSource</code>:</p> <ul> <li><p>DatabaseInformation - <ul> <li> <code>DatabaseName</code> - The name of the Amazon Redshift database. </li> <li> <code> ClusterIdentifier</code> - The unique ID for the Amazon Redshift cluster.</li> </ul></p></li> <li><p>DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.</p></li> <li><p>SelectSqlQuery - The query that is used to retrieve the observation data for the <code>Datasource</code>.</p></li> <li><p>S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Redshift using the <code>SelectSqlQuery</code> query is stored in this location.</p></li> <li><p>DataSchemaUri - The Amazon S3 location of the <code>DataSchema</code>.</p></li> <li><p>DataSchema - A JSON string representing the schema. This is not required if <code>DataSchemaUri</code> is specified. </p></li> <li> <p>DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the <code>DataSource</code>.</p> <p> Sample - <code> &quot;{&quot;splitting&quot;:{&quot;percentBegin&quot;:10,&quot;percentEnd&quot;:60}}&quot;</code> </p> </li> </ul></p>
225    #[serde(rename = "DataSpec")]
226    pub data_spec: RedshiftDataSpec,
227    /// <p>A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following: </p> <p> <ul> <li><p>A security group to allow Amazon ML to execute the <code>SelectSqlQuery</code> query on an Amazon Redshift cluster</p></li> <li><p>An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the <code>S3StagingLocation</code></p></li> </ul> </p>
228    #[serde(rename = "RoleARN")]
229    pub role_arn: String,
230}
231
232/// <p> Represents the output of a <code>CreateDataSourceFromRedshift</code> operation, and is an acknowledgement that Amazon ML received the request.</p> <p>The <code>CreateDataSourceFromRedshift</code> operation is asynchronous. You can poll for updates by using the <code>GetBatchPrediction</code> operation and checking the <code>Status</code> parameter. </p>
233#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
234#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
235pub struct CreateDataSourceFromRedshiftOutput {
236    /// <p>A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the <code>DataSourceID</code> in the request. </p>
237    #[serde(rename = "DataSourceId")]
238    #[serde(skip_serializing_if = "Option::is_none")]
239    pub data_source_id: Option<String>,
240}
241
242#[derive(Clone, Debug, Default, PartialEq, Serialize)]
243#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
244pub struct CreateDataSourceFromS3Input {
245    /// <p>The compute statistics for a <code>DataSource</code>. The statistics are generated from the observation data referenced by a <code>DataSource</code>. Amazon ML uses the statistics internally during <code>MLModel</code> training. This parameter must be set to <code>true</code> if the <code></code>DataSource<code></code> needs to be used for <code>MLModel</code> training.</p>
246    #[serde(rename = "ComputeStatistics")]
247    #[serde(skip_serializing_if = "Option::is_none")]
248    pub compute_statistics: Option<bool>,
249    /// <p>A user-supplied identifier that uniquely identifies the <code>DataSource</code>. </p>
250    #[serde(rename = "DataSourceId")]
251    pub data_source_id: String,
252    /// <p>A user-supplied name or description of the <code>DataSource</code>. </p>
253    #[serde(rename = "DataSourceName")]
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub data_source_name: Option<String>,
256    /// <p><p>The data specification of a <code>DataSource</code>:</p> <ul> <li><p>DataLocationS3 - The Amazon S3 location of the observation data.</p></li> <li><p>DataSchemaLocationS3 - The Amazon S3 location of the <code>DataSchema</code>.</p></li> <li><p>DataSchema - A JSON string representing the schema. This is not required if <code>DataSchemaUri</code> is specified. </p></li> <li> <p>DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the <code>Datasource</code>. </p> <p> Sample - <code> &quot;{&quot;splitting&quot;:{&quot;percentBegin&quot;:10,&quot;percentEnd&quot;:60}}&quot;</code> </p> </li> </ul></p>
257    #[serde(rename = "DataSpec")]
258    pub data_spec: S3DataSpec,
259}
260
261/// <p> Represents the output of a <code>CreateDataSourceFromS3</code> operation, and is an acknowledgement that Amazon ML received the request.</p> <p>The <code>CreateDataSourceFromS3</code> operation is asynchronous. You can poll for updates by using the <code>GetBatchPrediction</code> operation and checking the <code>Status</code> parameter. </p>
262#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
263#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
264pub struct CreateDataSourceFromS3Output {
265    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>. This value should be identical to the value of the <code>DataSourceID</code> in the request. </p>
266    #[serde(rename = "DataSourceId")]
267    #[serde(skip_serializing_if = "Option::is_none")]
268    pub data_source_id: Option<String>,
269}
270
271#[derive(Clone, Debug, Default, PartialEq, Serialize)]
272#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
273pub struct CreateEvaluationInput {
274    /// <p>The ID of the <code>DataSource</code> for the evaluation. The schema of the <code>DataSource</code> must match the schema used to create the <code>MLModel</code>.</p>
275    #[serde(rename = "EvaluationDataSourceId")]
276    pub evaluation_data_source_id: String,
277    /// <p>A user-supplied ID that uniquely identifies the <code>Evaluation</code>.</p>
278    #[serde(rename = "EvaluationId")]
279    pub evaluation_id: String,
280    /// <p>A user-supplied name or description of the <code>Evaluation</code>.</p>
281    #[serde(rename = "EvaluationName")]
282    #[serde(skip_serializing_if = "Option::is_none")]
283    pub evaluation_name: Option<String>,
284    /// <p>The ID of the <code>MLModel</code> to evaluate.</p> <p>The schema used in creating the <code>MLModel</code> must match the schema of the <code>DataSource</code> used in the <code>Evaluation</code>.</p>
285    #[serde(rename = "MLModelId")]
286    pub ml_model_id: String,
287}
288
289/// <p> Represents the output of a <code>CreateEvaluation</code> operation, and is an acknowledgement that Amazon ML received the request.</p> <p><code>CreateEvaluation</code> operation is asynchronous. You can poll for status updates by using the <code>GetEvcaluation</code> operation and checking the <code>Status</code> parameter. </p>
290#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
291#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
292pub struct CreateEvaluationOutput {
293    /// <p>The user-supplied ID that uniquely identifies the <code>Evaluation</code>. This value should be identical to the value of the <code>EvaluationId</code> in the request.</p>
294    #[serde(rename = "EvaluationId")]
295    #[serde(skip_serializing_if = "Option::is_none")]
296    pub evaluation_id: Option<String>,
297}
298
299#[derive(Clone, Debug, Default, PartialEq, Serialize)]
300#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
301pub struct CreateMLModelInput {
302    /// <p>A user-supplied ID that uniquely identifies the <code>MLModel</code>.</p>
303    #[serde(rename = "MLModelId")]
304    pub ml_model_id: String,
305    /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
306    #[serde(rename = "MLModelName")]
307    #[serde(skip_serializing_if = "Option::is_none")]
308    pub ml_model_name: Option<String>,
309    /// <p>The category of supervised learning that this <code>MLModel</code> will address. Choose from the following types:</p> <ul> <li>Choose <code>REGRESSION</code> if the <code>MLModel</code> will be used to predict a numeric value.</li> <li>Choose <code>BINARY</code> if the <code>MLModel</code> result has two possible values.</li> <li>Choose <code>MULTICLASS</code> if the <code>MLModel</code> result has a limited number of values. </li> </ul> <p> For more information, see the <a href="http://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>.</p>
310    #[serde(rename = "MLModelType")]
311    pub ml_model_type: String,
312    /// <p><p>A list of the training parameters in the <code>MLModel</code>. The list is implemented as a map of key-value pairs.</p> <p>The following is the current set of training parameters: </p> <ul> <li> <p><code>sgd.maxMLModelSizeInBytes</code> - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.</p> <p> The value is an integer that ranges from <code>100000</code> to <code>2147483648</code>. The default value is <code>33554432</code>.</p> </li> <li><p><code>sgd.maxPasses</code> - The number of times that the training process traverses the observations to build the <code>MLModel</code>. The value is an integer that ranges from <code>1</code> to <code>10000</code>. The default value is <code>10</code>.</p></li> <li> <p><code>sgd.shuffleType</code> - Whether Amazon ML shuffles the training data. Shuffling the data improves a model&#39;s ability to find the optimal solution for a variety of data types. The valid values are <code>auto</code> and <code>none</code>. The default value is <code>none</code>. We &lt;?oxy<em>insert</em>start author=&quot;laurama&quot; timestamp=&quot;20160329T131121-0700&quot;&gt;strongly recommend that you shuffle your data.&lt;?oxy<em>insert</em>end&gt;</p> </li> <li> <p><code>sgd.l1RegularizationAmount</code> - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX<em>DOUBLE</code>. The default is to not use L1 normalization. This parameter can&#39;t be used when <code>L2</code> is specified. Use this parameter sparingly.</p> </li> <li> <p><code>sgd.l2RegularizationAmount</code> - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX</em>DOUBLE</code>. The default is to not use L2 normalization. This parameter can&#39;t be used when <code>L1</code> is specified. Use this parameter sparingly.</p> </li> </ul></p>
313    #[serde(rename = "Parameters")]
314    #[serde(skip_serializing_if = "Option::is_none")]
315    pub parameters: Option<::std::collections::HashMap<String, String>>,
316    /// <p>The data recipe for creating the <code>MLModel</code>. You must specify either the recipe or its URI. If you don't specify a recipe or its URI, Amazon ML creates a default.</p>
317    #[serde(rename = "Recipe")]
318    #[serde(skip_serializing_if = "Option::is_none")]
319    pub recipe: Option<String>,
320    /// <p>The Amazon Simple Storage Service (Amazon S3) location and file name that contains the <code>MLModel</code> recipe. You must specify either the recipe or its URI. If you don't specify a recipe or its URI, Amazon ML creates a default.</p>
321    #[serde(rename = "RecipeUri")]
322    #[serde(skip_serializing_if = "Option::is_none")]
323    pub recipe_uri: Option<String>,
324    /// <p>The <code>DataSource</code> that points to the training data.</p>
325    #[serde(rename = "TrainingDataSourceId")]
326    pub training_data_source_id: String,
327}
328
329/// <p> Represents the output of a <code>CreateMLModel</code> operation, and is an acknowledgement that Amazon ML received the request.</p> <p>The <code>CreateMLModel</code> operation is asynchronous. You can poll for status updates by using the <code>GetMLModel</code> operation and checking the <code>Status</code> parameter. </p>
330#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
331#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
332pub struct CreateMLModelOutput {
333    /// <p>A user-supplied ID that uniquely identifies the <code>MLModel</code>. This value should be identical to the value of the <code>MLModelId</code> in the request. </p>
334    #[serde(rename = "MLModelId")]
335    #[serde(skip_serializing_if = "Option::is_none")]
336    pub ml_model_id: Option<String>,
337}
338
339#[derive(Clone, Debug, Default, PartialEq, Serialize)]
340#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
341pub struct CreateRealtimeEndpointInput {
342    /// <p>The ID assigned to the <code>MLModel</code> during creation.</p>
343    #[serde(rename = "MLModelId")]
344    pub ml_model_id: String,
345}
346
347/// <p><p>Represents the output of an <code>CreateRealtimeEndpoint</code> operation.</p> <p>The result contains the <code>MLModelId</code> and the endpoint information for the <code>MLModel</code>.</p> <note> <p>The endpoint information includes the URI of the <code>MLModel</code>; that is, the location to send online prediction requests for the specified <code>MLModel</code>.</p> </note></p>
348#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
349#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
350pub struct CreateRealtimeEndpointOutput {
351    /// <p>A user-supplied ID that uniquely identifies the <code>MLModel</code>. This value should be identical to the value of the <code>MLModelId</code> in the request.</p>
352    #[serde(rename = "MLModelId")]
353    #[serde(skip_serializing_if = "Option::is_none")]
354    pub ml_model_id: Option<String>,
355    /// <p>The endpoint information of the <code>MLModel</code> </p>
356    #[serde(rename = "RealtimeEndpointInfo")]
357    #[serde(skip_serializing_if = "Option::is_none")]
358    pub realtime_endpoint_info: Option<RealtimeEndpointInfo>,
359}
360
361/// <p> Represents the output of the <code>GetDataSource</code> operation. </p> <p> The content consists of the detailed metadata and data file information and the current status of the <code>DataSource</code>. </p>
362#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
363#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
364pub struct DataSource {
365    /// <p> The parameter is <code>true</code> if statistics need to be generated from the observation data. </p>
366    #[serde(rename = "ComputeStatistics")]
367    #[serde(skip_serializing_if = "Option::is_none")]
368    pub compute_statistics: Option<bool>,
369    #[serde(rename = "ComputeTime")]
370    #[serde(skip_serializing_if = "Option::is_none")]
371    pub compute_time: Option<i64>,
372    /// <p>The time that the <code>DataSource</code> was created. The time is expressed in epoch time.</p>
373    #[serde(rename = "CreatedAt")]
374    #[serde(skip_serializing_if = "Option::is_none")]
375    pub created_at: Option<f64>,
376    /// <p>The AWS user account from which the <code>DataSource</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
377    #[serde(rename = "CreatedByIamUser")]
378    #[serde(skip_serializing_if = "Option::is_none")]
379    pub created_by_iam_user: Option<String>,
380    /// <p>The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a <code>DataSource</code>.</p>
381    #[serde(rename = "DataLocationS3")]
382    #[serde(skip_serializing_if = "Option::is_none")]
383    pub data_location_s3: Option<String>,
384    /// <p>A JSON string that represents the splitting and rearrangement requirement used when this <code>DataSource</code> was created.</p>
385    #[serde(rename = "DataRearrangement")]
386    #[serde(skip_serializing_if = "Option::is_none")]
387    pub data_rearrangement: Option<String>,
388    /// <p>The total number of observations contained in the data files that the <code>DataSource</code> references.</p>
389    #[serde(rename = "DataSizeInBytes")]
390    #[serde(skip_serializing_if = "Option::is_none")]
391    pub data_size_in_bytes: Option<i64>,
392    /// <p>The ID that is assigned to the <code>DataSource</code> during creation.</p>
393    #[serde(rename = "DataSourceId")]
394    #[serde(skip_serializing_if = "Option::is_none")]
395    pub data_source_id: Option<String>,
396    #[serde(rename = "FinishedAt")]
397    #[serde(skip_serializing_if = "Option::is_none")]
398    pub finished_at: Option<f64>,
399    /// <p>The time of the most recent edit to the <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
400    #[serde(rename = "LastUpdatedAt")]
401    #[serde(skip_serializing_if = "Option::is_none")]
402    pub last_updated_at: Option<f64>,
403    /// <p>A description of the most recent details about creating the <code>DataSource</code>.</p>
404    #[serde(rename = "Message")]
405    #[serde(skip_serializing_if = "Option::is_none")]
406    pub message: Option<String>,
407    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
408    #[serde(rename = "Name")]
409    #[serde(skip_serializing_if = "Option::is_none")]
410    pub name: Option<String>,
411    /// <p>The number of data files referenced by the <code>DataSource</code>.</p>
412    #[serde(rename = "NumberOfFiles")]
413    #[serde(skip_serializing_if = "Option::is_none")]
414    pub number_of_files: Option<i64>,
415    #[serde(rename = "RDSMetadata")]
416    #[serde(skip_serializing_if = "Option::is_none")]
417    pub rds_metadata: Option<RDSMetadata>,
418    #[serde(rename = "RedshiftMetadata")]
419    #[serde(skip_serializing_if = "Option::is_none")]
420    pub redshift_metadata: Option<RedshiftMetadata>,
421    #[serde(rename = "RoleARN")]
422    #[serde(skip_serializing_if = "Option::is_none")]
423    pub role_arn: Option<String>,
424    #[serde(rename = "StartedAt")]
425    #[serde(skip_serializing_if = "Option::is_none")]
426    pub started_at: Option<f64>,
427    /// <p><p>The current status of the <code>DataSource</code>. This element can have one of the following values: </p> <ul> <li>PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a <code>DataSource</code>.</li> <li>INPROGRESS - The creation process is underway.</li> <li>FAILED - The request to create a <code>DataSource</code> did not run to completion. It is not usable.</li> <li>COMPLETED - The creation process completed successfully.</li> <li>DELETED - The <code>DataSource</code> is marked as deleted. It is not usable.</li> </ul></p>
428    #[serde(rename = "Status")]
429    #[serde(skip_serializing_if = "Option::is_none")]
430    pub status: Option<String>,
431}
432
433#[derive(Clone, Debug, Default, PartialEq, Serialize)]
434#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
435pub struct DeleteBatchPredictionInput {
436    /// <p>A user-supplied ID that uniquely identifies the <code>BatchPrediction</code>.</p>
437    #[serde(rename = "BatchPredictionId")]
438    pub batch_prediction_id: String,
439}
440
441/// <p> Represents the output of a <code>DeleteBatchPrediction</code> operation.</p> <p>You can use the <code>GetBatchPrediction</code> operation and check the value of the <code>Status</code> parameter to see whether a <code>BatchPrediction</code> is marked as <code>DELETED</code>.</p>
442#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
443#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
444pub struct DeleteBatchPredictionOutput {
445    /// <p>A user-supplied ID that uniquely identifies the <code>BatchPrediction</code>. This value should be identical to the value of the <code>BatchPredictionID</code> in the request.</p>
446    #[serde(rename = "BatchPredictionId")]
447    #[serde(skip_serializing_if = "Option::is_none")]
448    pub batch_prediction_id: Option<String>,
449}
450
451#[derive(Clone, Debug, Default, PartialEq, Serialize)]
452#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
453pub struct DeleteDataSourceInput {
454    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>.</p>
455    #[serde(rename = "DataSourceId")]
456    pub data_source_id: String,
457}
458
459/// <p> Represents the output of a <code>DeleteDataSource</code> operation.</p>
460#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
461#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
462pub struct DeleteDataSourceOutput {
463    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>. This value should be identical to the value of the <code>DataSourceID</code> in the request.</p>
464    #[serde(rename = "DataSourceId")]
465    #[serde(skip_serializing_if = "Option::is_none")]
466    pub data_source_id: Option<String>,
467}
468
469#[derive(Clone, Debug, Default, PartialEq, Serialize)]
470#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
471pub struct DeleteEvaluationInput {
472    /// <p>A user-supplied ID that uniquely identifies the <code>Evaluation</code> to delete.</p>
473    #[serde(rename = "EvaluationId")]
474    pub evaluation_id: String,
475}
476
477/// <p> Represents the output of a <code>DeleteEvaluation</code> operation. The output indicates that Amazon Machine Learning (Amazon ML) received the request.</p> <p>You can use the <code>GetEvaluation</code> operation and check the value of the <code>Status</code> parameter to see whether an <code>Evaluation</code> is marked as <code>DELETED</code>.</p>
478#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
479#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
480pub struct DeleteEvaluationOutput {
481    /// <p>A user-supplied ID that uniquely identifies the <code>Evaluation</code>. This value should be identical to the value of the <code>EvaluationId</code> in the request.</p>
482    #[serde(rename = "EvaluationId")]
483    #[serde(skip_serializing_if = "Option::is_none")]
484    pub evaluation_id: Option<String>,
485}
486
487#[derive(Clone, Debug, Default, PartialEq, Serialize)]
488#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
489pub struct DeleteMLModelInput {
490    /// <p>A user-supplied ID that uniquely identifies the <code>MLModel</code>.</p>
491    #[serde(rename = "MLModelId")]
492    pub ml_model_id: String,
493}
494
495/// <p>Represents the output of a <code>DeleteMLModel</code> operation.</p> <p>You can use the <code>GetMLModel</code> operation and check the value of the <code>Status</code> parameter to see whether an <code>MLModel</code> is marked as <code>DELETED</code>.</p>
496#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
497#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
498pub struct DeleteMLModelOutput {
499    /// <p>A user-supplied ID that uniquely identifies the <code>MLModel</code>. This value should be identical to the value of the <code>MLModelID</code> in the request.</p>
500    #[serde(rename = "MLModelId")]
501    #[serde(skip_serializing_if = "Option::is_none")]
502    pub ml_model_id: Option<String>,
503}
504
505#[derive(Clone, Debug, Default, PartialEq, Serialize)]
506#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
507pub struct DeleteRealtimeEndpointInput {
508    /// <p>The ID assigned to the <code>MLModel</code> during creation.</p>
509    #[serde(rename = "MLModelId")]
510    pub ml_model_id: String,
511}
512
513/// <p>Represents the output of an <code>DeleteRealtimeEndpoint</code> operation.</p> <p>The result contains the <code>MLModelId</code> and the endpoint information for the <code>MLModel</code>. </p>
514#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
515#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
516pub struct DeleteRealtimeEndpointOutput {
517    /// <p>A user-supplied ID that uniquely identifies the <code>MLModel</code>. This value should be identical to the value of the <code>MLModelId</code> in the request.</p>
518    #[serde(rename = "MLModelId")]
519    #[serde(skip_serializing_if = "Option::is_none")]
520    pub ml_model_id: Option<String>,
521    /// <p>The endpoint information of the <code>MLModel</code> </p>
522    #[serde(rename = "RealtimeEndpointInfo")]
523    #[serde(skip_serializing_if = "Option::is_none")]
524    pub realtime_endpoint_info: Option<RealtimeEndpointInfo>,
525}
526
527#[derive(Clone, Debug, Default, PartialEq, Serialize)]
528#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
529pub struct DeleteTagsInput {
530    /// <p>The ID of the tagged ML object. For example, <code>exampleModelId</code>.</p>
531    #[serde(rename = "ResourceId")]
532    pub resource_id: String,
533    /// <p>The type of the tagged ML object.</p>
534    #[serde(rename = "ResourceType")]
535    pub resource_type: String,
536    /// <p>One or more tags to delete.</p>
537    #[serde(rename = "TagKeys")]
538    pub tag_keys: Vec<String>,
539}
540
541/// <p>Amazon ML returns the following elements. </p>
542#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
543#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
544pub struct DeleteTagsOutput {
545    /// <p>The ID of the ML object from which tags were deleted.</p>
546    #[serde(rename = "ResourceId")]
547    #[serde(skip_serializing_if = "Option::is_none")]
548    pub resource_id: Option<String>,
549    /// <p>The type of the ML object from which tags were deleted.</p>
550    #[serde(rename = "ResourceType")]
551    #[serde(skip_serializing_if = "Option::is_none")]
552    pub resource_type: Option<String>,
553}
554
555#[derive(Clone, Debug, Default, PartialEq, Serialize)]
556#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
557pub struct DescribeBatchPredictionsInput {
558    /// <p>The equal to operator. The <code>BatchPrediction</code> results will have <code>FilterVariable</code> values that exactly match the value specified with <code>EQ</code>.</p>
559    #[serde(rename = "EQ")]
560    #[serde(skip_serializing_if = "Option::is_none")]
561    pub eq: Option<String>,
562    /// <p><p>Use one of the following variables to filter a list of <code>BatchPrediction</code>:</p> <ul> <li> <code>CreatedAt</code> - Sets the search criteria to the <code>BatchPrediction</code> creation date.</li> <li> <code>Status</code> - Sets the search criteria to the <code>BatchPrediction</code> status.</li> <li> <code>Name</code> - Sets the search criteria to the contents of the <code>BatchPrediction</code><b> </b> <code>Name</code>.</li> <li> <code>IAMUser</code> - Sets the search criteria to the user account that invoked the <code>BatchPrediction</code> creation.</li> <li> <code>MLModelId</code> - Sets the search criteria to the <code>MLModel</code> used in the <code>BatchPrediction</code>.</li> <li> <code>DataSourceId</code> - Sets the search criteria to the <code>DataSource</code> used in the <code>BatchPrediction</code>.</li> <li> <code>DataURI</code> - Sets the search criteria to the data file(s) used in the <code>BatchPrediction</code>. The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.</li> </ul></p>
563    #[serde(rename = "FilterVariable")]
564    #[serde(skip_serializing_if = "Option::is_none")]
565    pub filter_variable: Option<String>,
566    /// <p>The greater than or equal to operator. The <code>BatchPrediction</code> results will have <code>FilterVariable</code> values that are greater than or equal to the value specified with <code>GE</code>. </p>
567    #[serde(rename = "GE")]
568    #[serde(skip_serializing_if = "Option::is_none")]
569    pub ge: Option<String>,
570    /// <p>The greater than operator. The <code>BatchPrediction</code> results will have <code>FilterVariable</code> values that are greater than the value specified with <code>GT</code>.</p>
571    #[serde(rename = "GT")]
572    #[serde(skip_serializing_if = "Option::is_none")]
573    pub gt: Option<String>,
574    /// <p>The less than or equal to operator. The <code>BatchPrediction</code> results will have <code>FilterVariable</code> values that are less than or equal to the value specified with <code>LE</code>.</p>
575    #[serde(rename = "LE")]
576    #[serde(skip_serializing_if = "Option::is_none")]
577    pub le: Option<String>,
578    /// <p>The less than operator. The <code>BatchPrediction</code> results will have <code>FilterVariable</code> values that are less than the value specified with <code>LT</code>.</p>
579    #[serde(rename = "LT")]
580    #[serde(skip_serializing_if = "Option::is_none")]
581    pub lt: Option<String>,
582    /// <p>The number of pages of information to include in the result. The range of acceptable values is <code>1</code> through <code>100</code>. The default value is <code>100</code>.</p>
583    #[serde(rename = "Limit")]
584    #[serde(skip_serializing_if = "Option::is_none")]
585    pub limit: Option<i64>,
586    /// <p>The not equal to operator. The <code>BatchPrediction</code> results will have <code>FilterVariable</code> values not equal to the value specified with <code>NE</code>.</p>
587    #[serde(rename = "NE")]
588    #[serde(skip_serializing_if = "Option::is_none")]
589    pub ne: Option<String>,
590    /// <p>An ID of the page in the paginated results.</p>
591    #[serde(rename = "NextToken")]
592    #[serde(skip_serializing_if = "Option::is_none")]
593    pub next_token: Option<String>,
594    /// <p><p>A string that is found at the beginning of a variable, such as <code>Name</code> or <code>Id</code>.</p> <p>For example, a <code>Batch Prediction</code> operation could have the <code>Name</code> <code>2014-09-09-HolidayGiftMailer</code>. To search for this <code>BatchPrediction</code>, select <code>Name</code> for the <code>FilterVariable</code> and any of the following strings for the <code>Prefix</code>: </p> <ul> <li><p>2014-09</p></li> <li><p>2014-09-09</p></li> <li><p>2014-09-09-Holiday</p></li> </ul></p>
595    #[serde(rename = "Prefix")]
596    #[serde(skip_serializing_if = "Option::is_none")]
597    pub prefix: Option<String>,
598    /// <p>A two-value parameter that determines the sequence of the resulting list of <code>MLModel</code>s.</p> <ul> <li> <code>asc</code> - Arranges the list in ascending order (A-Z, 0-9).</li> <li> <code>dsc</code> - Arranges the list in descending order (Z-A, 9-0).</li> </ul> <p>Results are sorted by <code>FilterVariable</code>.</p>
599    #[serde(rename = "SortOrder")]
600    #[serde(skip_serializing_if = "Option::is_none")]
601    pub sort_order: Option<String>,
602}
603
604/// <p>Represents the output of a <code>DescribeBatchPredictions</code> operation. The content is essentially a list of <code>BatchPrediction</code>s.</p>
605#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
606#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
607pub struct DescribeBatchPredictionsOutput {
608    /// <p>The ID of the next page in the paginated results that indicates at least one more page follows.</p>
609    #[serde(rename = "NextToken")]
610    #[serde(skip_serializing_if = "Option::is_none")]
611    pub next_token: Option<String>,
612    /// <p>A list of <code>BatchPrediction</code> objects that meet the search criteria. </p>
613    #[serde(rename = "Results")]
614    #[serde(skip_serializing_if = "Option::is_none")]
615    pub results: Option<Vec<BatchPrediction>>,
616}
617
618#[derive(Clone, Debug, Default, PartialEq, Serialize)]
619#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
620pub struct DescribeDataSourcesInput {
621    /// <p>The equal to operator. The <code>DataSource</code> results will have <code>FilterVariable</code> values that exactly match the value specified with <code>EQ</code>.</p>
622    #[serde(rename = "EQ")]
623    #[serde(skip_serializing_if = "Option::is_none")]
624    pub eq: Option<String>,
625    /// <p><p>Use one of the following variables to filter a list of <code>DataSource</code>:</p> <ul> <li> <code>CreatedAt</code> - Sets the search criteria to <code>DataSource</code> creation dates.</li> <li> <code>Status</code> - Sets the search criteria to <code>DataSource</code> statuses.</li> <li> <code>Name</code> - Sets the search criteria to the contents of <code>DataSource</code> <b> </b> <code>Name</code>.</li> <li> <code>DataUri</code> - Sets the search criteria to the URI of data files used to create the <code>DataSource</code>. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.</li> <li> <code>IAMUser</code> - Sets the search criteria to the user account that invoked the <code>DataSource</code> creation.</li> </ul></p>
626    #[serde(rename = "FilterVariable")]
627    #[serde(skip_serializing_if = "Option::is_none")]
628    pub filter_variable: Option<String>,
629    /// <p>The greater than or equal to operator. The <code>DataSource</code> results will have <code>FilterVariable</code> values that are greater than or equal to the value specified with <code>GE</code>. </p>
630    #[serde(rename = "GE")]
631    #[serde(skip_serializing_if = "Option::is_none")]
632    pub ge: Option<String>,
633    /// <p>The greater than operator. The <code>DataSource</code> results will have <code>FilterVariable</code> values that are greater than the value specified with <code>GT</code>.</p>
634    #[serde(rename = "GT")]
635    #[serde(skip_serializing_if = "Option::is_none")]
636    pub gt: Option<String>,
637    /// <p>The less than or equal to operator. The <code>DataSource</code> results will have <code>FilterVariable</code> values that are less than or equal to the value specified with <code>LE</code>.</p>
638    #[serde(rename = "LE")]
639    #[serde(skip_serializing_if = "Option::is_none")]
640    pub le: Option<String>,
641    /// <p>The less than operator. The <code>DataSource</code> results will have <code>FilterVariable</code> values that are less than the value specified with <code>LT</code>.</p>
642    #[serde(rename = "LT")]
643    #[serde(skip_serializing_if = "Option::is_none")]
644    pub lt: Option<String>,
645    /// <p> The maximum number of <code>DataSource</code> to include in the result.</p>
646    #[serde(rename = "Limit")]
647    #[serde(skip_serializing_if = "Option::is_none")]
648    pub limit: Option<i64>,
649    /// <p>The not equal to operator. The <code>DataSource</code> results will have <code>FilterVariable</code> values not equal to the value specified with <code>NE</code>.</p>
650    #[serde(rename = "NE")]
651    #[serde(skip_serializing_if = "Option::is_none")]
652    pub ne: Option<String>,
653    /// <p>The ID of the page in the paginated results.</p>
654    #[serde(rename = "NextToken")]
655    #[serde(skip_serializing_if = "Option::is_none")]
656    pub next_token: Option<String>,
657    /// <p><p>A string that is found at the beginning of a variable, such as <code>Name</code> or <code>Id</code>.</p> <p>For example, a <code>DataSource</code> could have the <code>Name</code> <code>2014-09-09-HolidayGiftMailer</code>. To search for this <code>DataSource</code>, select <code>Name</code> for the <code>FilterVariable</code> and any of the following strings for the <code>Prefix</code>: </p> <ul> <li><p>2014-09</p></li> <li><p>2014-09-09</p></li> <li><p>2014-09-09-Holiday</p></li> </ul></p>
658    #[serde(rename = "Prefix")]
659    #[serde(skip_serializing_if = "Option::is_none")]
660    pub prefix: Option<String>,
661    /// <p>A two-value parameter that determines the sequence of the resulting list of <code>DataSource</code>.</p> <ul> <li> <code>asc</code> - Arranges the list in ascending order (A-Z, 0-9).</li> <li> <code>dsc</code> - Arranges the list in descending order (Z-A, 9-0).</li> </ul> <p>Results are sorted by <code>FilterVariable</code>.</p>
662    #[serde(rename = "SortOrder")]
663    #[serde(skip_serializing_if = "Option::is_none")]
664    pub sort_order: Option<String>,
665}
666
667/// <p>Represents the query results from a <a>DescribeDataSources</a> operation. The content is essentially a list of <code>DataSource</code>.</p>
668#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
669#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
670pub struct DescribeDataSourcesOutput {
671    /// <p>An ID of the next page in the paginated results that indicates at least one more page follows.</p>
672    #[serde(rename = "NextToken")]
673    #[serde(skip_serializing_if = "Option::is_none")]
674    pub next_token: Option<String>,
675    /// <p>A list of <code>DataSource</code> that meet the search criteria. </p>
676    #[serde(rename = "Results")]
677    #[serde(skip_serializing_if = "Option::is_none")]
678    pub results: Option<Vec<DataSource>>,
679}
680
681#[derive(Clone, Debug, Default, PartialEq, Serialize)]
682#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
683pub struct DescribeEvaluationsInput {
684    /// <p>The equal to operator. The <code>Evaluation</code> results will have <code>FilterVariable</code> values that exactly match the value specified with <code>EQ</code>.</p>
685    #[serde(rename = "EQ")]
686    #[serde(skip_serializing_if = "Option::is_none")]
687    pub eq: Option<String>,
688    /// <p><p>Use one of the following variable to filter a list of <code>Evaluation</code> objects:</p> <ul> <li> <code>CreatedAt</code> - Sets the search criteria to the <code>Evaluation</code> creation date.</li> <li> <code>Status</code> - Sets the search criteria to the <code>Evaluation</code> status.</li> <li> <code>Name</code> - Sets the search criteria to the contents of <code>Evaluation</code> <b> </b> <code>Name</code>.</li> <li> <code>IAMUser</code> - Sets the search criteria to the user account that invoked an <code>Evaluation</code>.</li> <li> <code>MLModelId</code> - Sets the search criteria to the <code>MLModel</code> that was evaluated.</li> <li> <code>DataSourceId</code> - Sets the search criteria to the <code>DataSource</code> used in <code>Evaluation</code>.</li> <li> <code>DataUri</code> - Sets the search criteria to the data file(s) used in <code>Evaluation</code>. The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.</li> </ul></p>
689    #[serde(rename = "FilterVariable")]
690    #[serde(skip_serializing_if = "Option::is_none")]
691    pub filter_variable: Option<String>,
692    /// <p>The greater than or equal to operator. The <code>Evaluation</code> results will have <code>FilterVariable</code> values that are greater than or equal to the value specified with <code>GE</code>. </p>
693    #[serde(rename = "GE")]
694    #[serde(skip_serializing_if = "Option::is_none")]
695    pub ge: Option<String>,
696    /// <p>The greater than operator. The <code>Evaluation</code> results will have <code>FilterVariable</code> values that are greater than the value specified with <code>GT</code>.</p>
697    #[serde(rename = "GT")]
698    #[serde(skip_serializing_if = "Option::is_none")]
699    pub gt: Option<String>,
700    /// <p>The less than or equal to operator. The <code>Evaluation</code> results will have <code>FilterVariable</code> values that are less than or equal to the value specified with <code>LE</code>.</p>
701    #[serde(rename = "LE")]
702    #[serde(skip_serializing_if = "Option::is_none")]
703    pub le: Option<String>,
704    /// <p>The less than operator. The <code>Evaluation</code> results will have <code>FilterVariable</code> values that are less than the value specified with <code>LT</code>.</p>
705    #[serde(rename = "LT")]
706    #[serde(skip_serializing_if = "Option::is_none")]
707    pub lt: Option<String>,
708    /// <p> The maximum number of <code>Evaluation</code> to include in the result.</p>
709    #[serde(rename = "Limit")]
710    #[serde(skip_serializing_if = "Option::is_none")]
711    pub limit: Option<i64>,
712    /// <p>The not equal to operator. The <code>Evaluation</code> results will have <code>FilterVariable</code> values not equal to the value specified with <code>NE</code>.</p>
713    #[serde(rename = "NE")]
714    #[serde(skip_serializing_if = "Option::is_none")]
715    pub ne: Option<String>,
716    /// <p>The ID of the page in the paginated results.</p>
717    #[serde(rename = "NextToken")]
718    #[serde(skip_serializing_if = "Option::is_none")]
719    pub next_token: Option<String>,
720    /// <p><p>A string that is found at the beginning of a variable, such as <code>Name</code> or <code>Id</code>.</p> <p>For example, an <code>Evaluation</code> could have the <code>Name</code> <code>2014-09-09-HolidayGiftMailer</code>. To search for this <code>Evaluation</code>, select <code>Name</code> for the <code>FilterVariable</code> and any of the following strings for the <code>Prefix</code>: </p> <ul> <li><p>2014-09</p></li> <li><p>2014-09-09</p></li> <li><p>2014-09-09-Holiday</p></li> </ul></p>
721    #[serde(rename = "Prefix")]
722    #[serde(skip_serializing_if = "Option::is_none")]
723    pub prefix: Option<String>,
724    /// <p>A two-value parameter that determines the sequence of the resulting list of <code>Evaluation</code>.</p> <ul> <li> <code>asc</code> - Arranges the list in ascending order (A-Z, 0-9).</li> <li> <code>dsc</code> - Arranges the list in descending order (Z-A, 9-0).</li> </ul> <p>Results are sorted by <code>FilterVariable</code>.</p>
725    #[serde(rename = "SortOrder")]
726    #[serde(skip_serializing_if = "Option::is_none")]
727    pub sort_order: Option<String>,
728}
729
730/// <p>Represents the query results from a <code>DescribeEvaluations</code> operation. The content is essentially a list of <code>Evaluation</code>.</p>
731#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
732#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
733pub struct DescribeEvaluationsOutput {
734    /// <p>The ID of the next page in the paginated results that indicates at least one more page follows.</p>
735    #[serde(rename = "NextToken")]
736    #[serde(skip_serializing_if = "Option::is_none")]
737    pub next_token: Option<String>,
738    /// <p>A list of <code>Evaluation</code> that meet the search criteria. </p>
739    #[serde(rename = "Results")]
740    #[serde(skip_serializing_if = "Option::is_none")]
741    pub results: Option<Vec<Evaluation>>,
742}
743
744#[derive(Clone, Debug, Default, PartialEq, Serialize)]
745#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
746pub struct DescribeMLModelsInput {
747    /// <p>The equal to operator. The <code>MLModel</code> results will have <code>FilterVariable</code> values that exactly match the value specified with <code>EQ</code>.</p>
748    #[serde(rename = "EQ")]
749    #[serde(skip_serializing_if = "Option::is_none")]
750    pub eq: Option<String>,
751    /// <p><p>Use one of the following variables to filter a list of <code>MLModel</code>:</p> <ul> <li> <code>CreatedAt</code> - Sets the search criteria to <code>MLModel</code> creation date.</li> <li> <code>Status</code> - Sets the search criteria to <code>MLModel</code> status.</li> <li> <code>Name</code> - Sets the search criteria to the contents of <code>MLModel</code><b> </b> <code>Name</code>.</li> <li> <code>IAMUser</code> - Sets the search criteria to the user account that invoked the <code>MLModel</code> creation.</li> <li> <code>TrainingDataSourceId</code> - Sets the search criteria to the <code>DataSource</code> used to train one or more <code>MLModel</code>.</li> <li> <code>RealtimeEndpointStatus</code> - Sets the search criteria to the <code>MLModel</code> real-time endpoint status.</li> <li> <code>MLModelType</code> - Sets the search criteria to <code>MLModel</code> type: binary, regression, or multi-class.</li> <li> <code>Algorithm</code> - Sets the search criteria to the algorithm that the <code>MLModel</code> uses.</li> <li> <code>TrainingDataURI</code> - Sets the search criteria to the data file(s) used in training a <code>MLModel</code>. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.</li> </ul></p>
752    #[serde(rename = "FilterVariable")]
753    #[serde(skip_serializing_if = "Option::is_none")]
754    pub filter_variable: Option<String>,
755    /// <p>The greater than or equal to operator. The <code>MLModel</code> results will have <code>FilterVariable</code> values that are greater than or equal to the value specified with <code>GE</code>. </p>
756    #[serde(rename = "GE")]
757    #[serde(skip_serializing_if = "Option::is_none")]
758    pub ge: Option<String>,
759    /// <p>The greater than operator. The <code>MLModel</code> results will have <code>FilterVariable</code> values that are greater than the value specified with <code>GT</code>.</p>
760    #[serde(rename = "GT")]
761    #[serde(skip_serializing_if = "Option::is_none")]
762    pub gt: Option<String>,
763    /// <p>The less than or equal to operator. The <code>MLModel</code> results will have <code>FilterVariable</code> values that are less than or equal to the value specified with <code>LE</code>.</p>
764    #[serde(rename = "LE")]
765    #[serde(skip_serializing_if = "Option::is_none")]
766    pub le: Option<String>,
767    /// <p>The less than operator. The <code>MLModel</code> results will have <code>FilterVariable</code> values that are less than the value specified with <code>LT</code>.</p>
768    #[serde(rename = "LT")]
769    #[serde(skip_serializing_if = "Option::is_none")]
770    pub lt: Option<String>,
771    /// <p>The number of pages of information to include in the result. The range of acceptable values is <code>1</code> through <code>100</code>. The default value is <code>100</code>.</p>
772    #[serde(rename = "Limit")]
773    #[serde(skip_serializing_if = "Option::is_none")]
774    pub limit: Option<i64>,
775    /// <p>The not equal to operator. The <code>MLModel</code> results will have <code>FilterVariable</code> values not equal to the value specified with <code>NE</code>.</p>
776    #[serde(rename = "NE")]
777    #[serde(skip_serializing_if = "Option::is_none")]
778    pub ne: Option<String>,
779    /// <p>The ID of the page in the paginated results.</p>
780    #[serde(rename = "NextToken")]
781    #[serde(skip_serializing_if = "Option::is_none")]
782    pub next_token: Option<String>,
783    /// <p><p>A string that is found at the beginning of a variable, such as <code>Name</code> or <code>Id</code>.</p> <p>For example, an <code>MLModel</code> could have the <code>Name</code> <code>2014-09-09-HolidayGiftMailer</code>. To search for this <code>MLModel</code>, select <code>Name</code> for the <code>FilterVariable</code> and any of the following strings for the <code>Prefix</code>: </p> <ul> <li><p>2014-09</p></li> <li><p>2014-09-09</p></li> <li><p>2014-09-09-Holiday</p></li> </ul></p>
784    #[serde(rename = "Prefix")]
785    #[serde(skip_serializing_if = "Option::is_none")]
786    pub prefix: Option<String>,
787    /// <p>A two-value parameter that determines the sequence of the resulting list of <code>MLModel</code>.</p> <ul> <li> <code>asc</code> - Arranges the list in ascending order (A-Z, 0-9).</li> <li> <code>dsc</code> - Arranges the list in descending order (Z-A, 9-0).</li> </ul> <p>Results are sorted by <code>FilterVariable</code>.</p>
788    #[serde(rename = "SortOrder")]
789    #[serde(skip_serializing_if = "Option::is_none")]
790    pub sort_order: Option<String>,
791}
792
793/// <p>Represents the output of a <code>DescribeMLModels</code> operation. The content is essentially a list of <code>MLModel</code>.</p>
794#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
795#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
796pub struct DescribeMLModelsOutput {
797    /// <p>The ID of the next page in the paginated results that indicates at least one more page follows.</p>
798    #[serde(rename = "NextToken")]
799    #[serde(skip_serializing_if = "Option::is_none")]
800    pub next_token: Option<String>,
801    /// <p>A list of <code>MLModel</code> that meet the search criteria.</p>
802    #[serde(rename = "Results")]
803    #[serde(skip_serializing_if = "Option::is_none")]
804    pub results: Option<Vec<MLModel>>,
805}
806
807#[derive(Clone, Debug, Default, PartialEq, Serialize)]
808#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
809pub struct DescribeTagsInput {
810    /// <p>The ID of the ML object. For example, <code>exampleModelId</code>. </p>
811    #[serde(rename = "ResourceId")]
812    pub resource_id: String,
813    /// <p>The type of the ML object.</p>
814    #[serde(rename = "ResourceType")]
815    pub resource_type: String,
816}
817
818/// <p>Amazon ML returns the following elements. </p>
819#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
820#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
821pub struct DescribeTagsOutput {
822    /// <p>The ID of the tagged ML object.</p>
823    #[serde(rename = "ResourceId")]
824    #[serde(skip_serializing_if = "Option::is_none")]
825    pub resource_id: Option<String>,
826    /// <p>The type of the tagged ML object.</p>
827    #[serde(rename = "ResourceType")]
828    #[serde(skip_serializing_if = "Option::is_none")]
829    pub resource_type: Option<String>,
830    /// <p>A list of tags associated with the ML object.</p>
831    #[serde(rename = "Tags")]
832    #[serde(skip_serializing_if = "Option::is_none")]
833    pub tags: Option<Vec<Tag>>,
834}
835
836/// <p> Represents the output of <code>GetEvaluation</code> operation. </p> <p>The content consists of the detailed metadata and data file information and the current status of the <code>Evaluation</code>.</p>
837#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
838#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
839pub struct Evaluation {
840    #[serde(rename = "ComputeTime")]
841    #[serde(skip_serializing_if = "Option::is_none")]
842    pub compute_time: Option<i64>,
843    /// <p>The time that the <code>Evaluation</code> was created. The time is expressed in epoch time.</p>
844    #[serde(rename = "CreatedAt")]
845    #[serde(skip_serializing_if = "Option::is_none")]
846    pub created_at: Option<f64>,
847    /// <p>The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
848    #[serde(rename = "CreatedByIamUser")]
849    #[serde(skip_serializing_if = "Option::is_none")]
850    pub created_by_iam_user: Option<String>,
851    /// <p>The ID of the <code>DataSource</code> that is used to evaluate the <code>MLModel</code>.</p>
852    #[serde(rename = "EvaluationDataSourceId")]
853    #[serde(skip_serializing_if = "Option::is_none")]
854    pub evaluation_data_source_id: Option<String>,
855    /// <p>The ID that is assigned to the <code>Evaluation</code> at creation.</p>
856    #[serde(rename = "EvaluationId")]
857    #[serde(skip_serializing_if = "Option::is_none")]
858    pub evaluation_id: Option<String>,
859    #[serde(rename = "FinishedAt")]
860    #[serde(skip_serializing_if = "Option::is_none")]
861    pub finished_at: Option<f64>,
862    /// <p>The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.</p>
863    #[serde(rename = "InputDataLocationS3")]
864    #[serde(skip_serializing_if = "Option::is_none")]
865    pub input_data_location_s3: Option<String>,
866    /// <p>The time of the most recent edit to the <code>Evaluation</code>. The time is expressed in epoch time.</p>
867    #[serde(rename = "LastUpdatedAt")]
868    #[serde(skip_serializing_if = "Option::is_none")]
869    pub last_updated_at: Option<f64>,
870    /// <p>The ID of the <code>MLModel</code> that is the focus of the evaluation.</p>
871    #[serde(rename = "MLModelId")]
872    #[serde(skip_serializing_if = "Option::is_none")]
873    pub ml_model_id: Option<String>,
874    /// <p>A description of the most recent details about evaluating the <code>MLModel</code>.</p>
875    #[serde(rename = "Message")]
876    #[serde(skip_serializing_if = "Option::is_none")]
877    pub message: Option<String>,
878    /// <p>A user-supplied name or description of the <code>Evaluation</code>. </p>
879    #[serde(rename = "Name")]
880    #[serde(skip_serializing_if = "Option::is_none")]
881    pub name: Option<String>,
882    /// <p>Measurements of how well the <code>MLModel</code> performed, using observations referenced by the <code>DataSource</code>. One of the following metrics is returned, based on the type of the <code>MLModel</code>: </p> <ul> <li> <p>BinaryAUC: A binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li> <li> <p>RegressionRMSE: A regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li> <li> <p>MulticlassAvgFScore: A multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li> </ul> <p> For more information about performance metrics, please see the <a href="http://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
883    #[serde(rename = "PerformanceMetrics")]
884    #[serde(skip_serializing_if = "Option::is_none")]
885    pub performance_metrics: Option<PerformanceMetrics>,
886    #[serde(rename = "StartedAt")]
887    #[serde(skip_serializing_if = "Option::is_none")]
888    pub started_at: Option<f64>,
889    /// <p><p>The status of the evaluation. This element can have one of the following values:</p> <ul> <li> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an <code>MLModel</code>.</li> <li> <code>INPROGRESS</code> - The evaluation is underway.</li> <li> <code>FAILED</code> - The request to evaluate an <code>MLModel</code> did not run to completion. It is not usable.</li> <li> <code>COMPLETED</code> - The evaluation process completed successfully.</li> <li> <code>DELETED</code> - The <code>Evaluation</code> is marked as deleted. It is not usable.</li> </ul></p>
890    #[serde(rename = "Status")]
891    #[serde(skip_serializing_if = "Option::is_none")]
892    pub status: Option<String>,
893}
894
895#[derive(Clone, Debug, Default, PartialEq, Serialize)]
896#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
897pub struct GetBatchPredictionInput {
898    /// <p>An ID assigned to the <code>BatchPrediction</code> at creation.</p>
899    #[serde(rename = "BatchPredictionId")]
900    pub batch_prediction_id: String,
901}
902
903/// <p>Represents the output of a <code>GetBatchPrediction</code> operation and describes a <code>BatchPrediction</code>.</p>
904#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
905#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
906pub struct GetBatchPredictionOutput {
907    /// <p>The ID of the <code>DataSource</code> that was used to create the <code>BatchPrediction</code>. </p>
908    #[serde(rename = "BatchPredictionDataSourceId")]
909    #[serde(skip_serializing_if = "Option::is_none")]
910    pub batch_prediction_data_source_id: Option<String>,
911    /// <p>An ID assigned to the <code>BatchPrediction</code> at creation. This value should be identical to the value of the <code>BatchPredictionID</code> in the request.</p>
912    #[serde(rename = "BatchPredictionId")]
913    #[serde(skip_serializing_if = "Option::is_none")]
914    pub batch_prediction_id: Option<String>,
915    /// <p>The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the <code>BatchPrediction</code>, normalized and scaled on computation resources. <code>ComputeTime</code> is only available if the <code>BatchPrediction</code> is in the <code>COMPLETED</code> state.</p>
916    #[serde(rename = "ComputeTime")]
917    #[serde(skip_serializing_if = "Option::is_none")]
918    pub compute_time: Option<i64>,
919    /// <p>The time when the <code>BatchPrediction</code> was created. The time is expressed in epoch time.</p>
920    #[serde(rename = "CreatedAt")]
921    #[serde(skip_serializing_if = "Option::is_none")]
922    pub created_at: Option<f64>,
923    /// <p>The AWS user account that invoked the <code>BatchPrediction</code>. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
924    #[serde(rename = "CreatedByIamUser")]
925    #[serde(skip_serializing_if = "Option::is_none")]
926    pub created_by_iam_user: Option<String>,
927    /// <p>The epoch time when Amazon Machine Learning marked the <code>BatchPrediction</code> as <code>COMPLETED</code> or <code>FAILED</code>. <code>FinishedAt</code> is only available when the <code>BatchPrediction</code> is in the <code>COMPLETED</code> or <code>FAILED</code> state.</p>
928    #[serde(rename = "FinishedAt")]
929    #[serde(skip_serializing_if = "Option::is_none")]
930    pub finished_at: Option<f64>,
931    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
932    #[serde(rename = "InputDataLocationS3")]
933    #[serde(skip_serializing_if = "Option::is_none")]
934    pub input_data_location_s3: Option<String>,
935    /// <p>The number of invalid records that Amazon Machine Learning saw while processing the <code>BatchPrediction</code>.</p>
936    #[serde(rename = "InvalidRecordCount")]
937    #[serde(skip_serializing_if = "Option::is_none")]
938    pub invalid_record_count: Option<i64>,
939    /// <p>The time of the most recent edit to <code>BatchPrediction</code>. The time is expressed in epoch time.</p>
940    #[serde(rename = "LastUpdatedAt")]
941    #[serde(skip_serializing_if = "Option::is_none")]
942    pub last_updated_at: Option<f64>,
943    /// <p>A link to the file that contains logs of the <code>CreateBatchPrediction</code> operation.</p>
944    #[serde(rename = "LogUri")]
945    #[serde(skip_serializing_if = "Option::is_none")]
946    pub log_uri: Option<String>,
947    /// <p>The ID of the <code>MLModel</code> that generated predictions for the <code>BatchPrediction</code> request.</p>
948    #[serde(rename = "MLModelId")]
949    #[serde(skip_serializing_if = "Option::is_none")]
950    pub ml_model_id: Option<String>,
951    /// <p>A description of the most recent details about processing the batch prediction request.</p>
952    #[serde(rename = "Message")]
953    #[serde(skip_serializing_if = "Option::is_none")]
954    pub message: Option<String>,
955    /// <p>A user-supplied name or description of the <code>BatchPrediction</code>.</p>
956    #[serde(rename = "Name")]
957    #[serde(skip_serializing_if = "Option::is_none")]
958    pub name: Option<String>,
959    /// <p>The location of an Amazon S3 bucket or directory to receive the operation results.</p>
960    #[serde(rename = "OutputUri")]
961    #[serde(skip_serializing_if = "Option::is_none")]
962    pub output_uri: Option<String>,
963    /// <p>The epoch time when Amazon Machine Learning marked the <code>BatchPrediction</code> as <code>INPROGRESS</code>. <code>StartedAt</code> isn't available if the <code>BatchPrediction</code> is in the <code>PENDING</code> state.</p>
964    #[serde(rename = "StartedAt")]
965    #[serde(skip_serializing_if = "Option::is_none")]
966    pub started_at: Option<f64>,
967    /// <p><p>The status of the <code>BatchPrediction</code>, which can be one of the following values:</p> <ul> <li> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to generate batch predictions.</li> <li> <code>INPROGRESS</code> - The batch predictions are in progress.</li> <li> <code>FAILED</code> - The request to perform a batch prediction did not run to completion. It is not usable.</li> <li> <code>COMPLETED</code> - The batch prediction process completed successfully.</li> <li> <code>DELETED</code> - The <code>BatchPrediction</code> is marked as deleted. It is not usable.</li> </ul></p>
968    #[serde(rename = "Status")]
969    #[serde(skip_serializing_if = "Option::is_none")]
970    pub status: Option<String>,
971    /// <p>The number of total records that Amazon Machine Learning saw while processing the <code>BatchPrediction</code>.</p>
972    #[serde(rename = "TotalRecordCount")]
973    #[serde(skip_serializing_if = "Option::is_none")]
974    pub total_record_count: Option<i64>,
975}
976
977#[derive(Clone, Debug, Default, PartialEq, Serialize)]
978#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
979pub struct GetDataSourceInput {
980    /// <p>The ID assigned to the <code>DataSource</code> at creation.</p>
981    #[serde(rename = "DataSourceId")]
982    pub data_source_id: String,
983    /// <p>Specifies whether the <code>GetDataSource</code> operation should return <code>DataSourceSchema</code>.</p> <p>If true, <code>DataSourceSchema</code> is returned.</p> <p>If false, <code>DataSourceSchema</code> is not returned.</p>
984    #[serde(rename = "Verbose")]
985    #[serde(skip_serializing_if = "Option::is_none")]
986    pub verbose: Option<bool>,
987}
988
989/// <p>Represents the output of a <code>GetDataSource</code> operation and describes a <code>DataSource</code>.</p>
990#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
991#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
992pub struct GetDataSourceOutput {
993    /// <p> The parameter is <code>true</code> if statistics need to be generated from the observation data. </p>
994    #[serde(rename = "ComputeStatistics")]
995    #[serde(skip_serializing_if = "Option::is_none")]
996    pub compute_statistics: Option<bool>,
997    /// <p>The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the <code>DataSource</code>, normalized and scaled on computation resources. <code>ComputeTime</code> is only available if the <code>DataSource</code> is in the <code>COMPLETED</code> state and the <code>ComputeStatistics</code> is set to true.</p>
998    #[serde(rename = "ComputeTime")]
999    #[serde(skip_serializing_if = "Option::is_none")]
1000    pub compute_time: Option<i64>,
1001    /// <p>The time that the <code>DataSource</code> was created. The time is expressed in epoch time.</p>
1002    #[serde(rename = "CreatedAt")]
1003    #[serde(skip_serializing_if = "Option::is_none")]
1004    pub created_at: Option<f64>,
1005    /// <p>The AWS user account from which the <code>DataSource</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
1006    #[serde(rename = "CreatedByIamUser")]
1007    #[serde(skip_serializing_if = "Option::is_none")]
1008    pub created_by_iam_user: Option<String>,
1009    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
1010    #[serde(rename = "DataLocationS3")]
1011    #[serde(skip_serializing_if = "Option::is_none")]
1012    pub data_location_s3: Option<String>,
1013    /// <p>A JSON string that represents the splitting and rearrangement requirement used when this <code>DataSource</code> was created.</p>
1014    #[serde(rename = "DataRearrangement")]
1015    #[serde(skip_serializing_if = "Option::is_none")]
1016    pub data_rearrangement: Option<String>,
1017    /// <p>The total size of observations in the data files.</p>
1018    #[serde(rename = "DataSizeInBytes")]
1019    #[serde(skip_serializing_if = "Option::is_none")]
1020    pub data_size_in_bytes: Option<i64>,
1021    /// <p>The ID assigned to the <code>DataSource</code> at creation. This value should be identical to the value of the <code>DataSourceId</code> in the request.</p>
1022    #[serde(rename = "DataSourceId")]
1023    #[serde(skip_serializing_if = "Option::is_none")]
1024    pub data_source_id: Option<String>,
1025    /// <p><p>The schema used by all of the data files of this <code>DataSource</code>.</p> <note><title>Note</title> <p>This parameter is provided as part of the verbose format.</p></note></p>
1026    #[serde(rename = "DataSourceSchema")]
1027    #[serde(skip_serializing_if = "Option::is_none")]
1028    pub data_source_schema: Option<String>,
1029    /// <p>The epoch time when Amazon Machine Learning marked the <code>DataSource</code> as <code>COMPLETED</code> or <code>FAILED</code>. <code>FinishedAt</code> is only available when the <code>DataSource</code> is in the <code>COMPLETED</code> or <code>FAILED</code> state.</p>
1030    #[serde(rename = "FinishedAt")]
1031    #[serde(skip_serializing_if = "Option::is_none")]
1032    pub finished_at: Option<f64>,
1033    /// <p>The time of the most recent edit to the <code>DataSource</code>. The time is expressed in epoch time.</p>
1034    #[serde(rename = "LastUpdatedAt")]
1035    #[serde(skip_serializing_if = "Option::is_none")]
1036    pub last_updated_at: Option<f64>,
1037    /// <p>A link to the file containing logs of <code>CreateDataSourceFrom*</code> operations.</p>
1038    #[serde(rename = "LogUri")]
1039    #[serde(skip_serializing_if = "Option::is_none")]
1040    pub log_uri: Option<String>,
1041    /// <p>The user-supplied description of the most recent details about creating the <code>DataSource</code>.</p>
1042    #[serde(rename = "Message")]
1043    #[serde(skip_serializing_if = "Option::is_none")]
1044    pub message: Option<String>,
1045    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
1046    #[serde(rename = "Name")]
1047    #[serde(skip_serializing_if = "Option::is_none")]
1048    pub name: Option<String>,
1049    /// <p>The number of data files referenced by the <code>DataSource</code>.</p>
1050    #[serde(rename = "NumberOfFiles")]
1051    #[serde(skip_serializing_if = "Option::is_none")]
1052    pub number_of_files: Option<i64>,
1053    #[serde(rename = "RDSMetadata")]
1054    #[serde(skip_serializing_if = "Option::is_none")]
1055    pub rds_metadata: Option<RDSMetadata>,
1056    #[serde(rename = "RedshiftMetadata")]
1057    #[serde(skip_serializing_if = "Option::is_none")]
1058    pub redshift_metadata: Option<RedshiftMetadata>,
1059    #[serde(rename = "RoleARN")]
1060    #[serde(skip_serializing_if = "Option::is_none")]
1061    pub role_arn: Option<String>,
1062    /// <p>The epoch time when Amazon Machine Learning marked the <code>DataSource</code> as <code>INPROGRESS</code>. <code>StartedAt</code> isn't available if the <code>DataSource</code> is in the <code>PENDING</code> state.</p>
1063    #[serde(rename = "StartedAt")]
1064    #[serde(skip_serializing_if = "Option::is_none")]
1065    pub started_at: Option<f64>,
1066    /// <p><p>The current status of the <code>DataSource</code>. This element can have one of the following values:</p> <ul> <li> <code>PENDING</code> - Amazon ML submitted a request to create a <code>DataSource</code>.</li> <li> <code>INPROGRESS</code> - The creation process is underway.</li> <li> <code>FAILED</code> - The request to create a <code>DataSource</code> did not run to completion. It is not usable.</li> <li> <code>COMPLETED</code> - The creation process completed successfully.</li> <li> <code>DELETED</code> - The <code>DataSource</code> is marked as deleted. It is not usable.</li> </ul></p>
1067    #[serde(rename = "Status")]
1068    #[serde(skip_serializing_if = "Option::is_none")]
1069    pub status: Option<String>,
1070}
1071
1072#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1073#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1074pub struct GetEvaluationInput {
1075    /// <p>The ID of the <code>Evaluation</code> to retrieve. The evaluation of each <code>MLModel</code> is recorded and cataloged. The ID provides the means to access the information. </p>
1076    #[serde(rename = "EvaluationId")]
1077    pub evaluation_id: String,
1078}
1079
1080/// <p>Represents the output of a <code>GetEvaluation</code> operation and describes an <code>Evaluation</code>.</p>
1081#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1082#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1083pub struct GetEvaluationOutput {
1084    /// <p>The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the <code>Evaluation</code>, normalized and scaled on computation resources. <code>ComputeTime</code> is only available if the <code>Evaluation</code> is in the <code>COMPLETED</code> state.</p>
1085    #[serde(rename = "ComputeTime")]
1086    #[serde(skip_serializing_if = "Option::is_none")]
1087    pub compute_time: Option<i64>,
1088    /// <p>The time that the <code>Evaluation</code> was created. The time is expressed in epoch time.</p>
1089    #[serde(rename = "CreatedAt")]
1090    #[serde(skip_serializing_if = "Option::is_none")]
1091    pub created_at: Option<f64>,
1092    /// <p>The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
1093    #[serde(rename = "CreatedByIamUser")]
1094    #[serde(skip_serializing_if = "Option::is_none")]
1095    pub created_by_iam_user: Option<String>,
1096    /// <p>The <code>DataSource</code> used for this evaluation.</p>
1097    #[serde(rename = "EvaluationDataSourceId")]
1098    #[serde(skip_serializing_if = "Option::is_none")]
1099    pub evaluation_data_source_id: Option<String>,
1100    /// <p>The evaluation ID which is same as the <code>EvaluationId</code> in the request.</p>
1101    #[serde(rename = "EvaluationId")]
1102    #[serde(skip_serializing_if = "Option::is_none")]
1103    pub evaluation_id: Option<String>,
1104    /// <p>The epoch time when Amazon Machine Learning marked the <code>Evaluation</code> as <code>COMPLETED</code> or <code>FAILED</code>. <code>FinishedAt</code> is only available when the <code>Evaluation</code> is in the <code>COMPLETED</code> or <code>FAILED</code> state.</p>
1105    #[serde(rename = "FinishedAt")]
1106    #[serde(skip_serializing_if = "Option::is_none")]
1107    pub finished_at: Option<f64>,
1108    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
1109    #[serde(rename = "InputDataLocationS3")]
1110    #[serde(skip_serializing_if = "Option::is_none")]
1111    pub input_data_location_s3: Option<String>,
1112    /// <p>The time of the most recent edit to the <code>Evaluation</code>. The time is expressed in epoch time.</p>
1113    #[serde(rename = "LastUpdatedAt")]
1114    #[serde(skip_serializing_if = "Option::is_none")]
1115    pub last_updated_at: Option<f64>,
1116    /// <p>A link to the file that contains logs of the <code>CreateEvaluation</code> operation.</p>
1117    #[serde(rename = "LogUri")]
1118    #[serde(skip_serializing_if = "Option::is_none")]
1119    pub log_uri: Option<String>,
1120    /// <p>The ID of the <code>MLModel</code> that was the focus of the evaluation.</p>
1121    #[serde(rename = "MLModelId")]
1122    #[serde(skip_serializing_if = "Option::is_none")]
1123    pub ml_model_id: Option<String>,
1124    /// <p>A description of the most recent details about evaluating the <code>MLModel</code>.</p>
1125    #[serde(rename = "Message")]
1126    #[serde(skip_serializing_if = "Option::is_none")]
1127    pub message: Option<String>,
1128    /// <p>A user-supplied name or description of the <code>Evaluation</code>. </p>
1129    #[serde(rename = "Name")]
1130    #[serde(skip_serializing_if = "Option::is_none")]
1131    pub name: Option<String>,
1132    /// <p>Measurements of how well the <code>MLModel</code> performed using observations referenced by the <code>DataSource</code>. One of the following metric is returned based on the type of the <code>MLModel</code>: </p> <ul> <li> <p>BinaryAUC: A binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li> <li> <p>RegressionRMSE: A regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li> <li> <p>MulticlassAvgFScore: A multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li> </ul> <p> For more information about performance metrics, please see the <a href="http://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
1133    #[serde(rename = "PerformanceMetrics")]
1134    #[serde(skip_serializing_if = "Option::is_none")]
1135    pub performance_metrics: Option<PerformanceMetrics>,
1136    /// <p>The epoch time when Amazon Machine Learning marked the <code>Evaluation</code> as <code>INPROGRESS</code>. <code>StartedAt</code> isn't available if the <code>Evaluation</code> is in the <code>PENDING</code> state.</p>
1137    #[serde(rename = "StartedAt")]
1138    #[serde(skip_serializing_if = "Option::is_none")]
1139    pub started_at: Option<f64>,
1140    /// <p><p>The status of the evaluation. This element can have one of the following values:</p> <ul> <li> <code>PENDING</code> - Amazon Machine Language (Amazon ML) submitted a request to evaluate an <code>MLModel</code>.</li> <li> <code>INPROGRESS</code> - The evaluation is underway.</li> <li> <code>FAILED</code> - The request to evaluate an <code>MLModel</code> did not run to completion. It is not usable.</li> <li> <code>COMPLETED</code> - The evaluation process completed successfully.</li> <li> <code>DELETED</code> - The <code>Evaluation</code> is marked as deleted. It is not usable.</li> </ul></p>
1141    #[serde(rename = "Status")]
1142    #[serde(skip_serializing_if = "Option::is_none")]
1143    pub status: Option<String>,
1144}
1145
1146#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1147#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1148pub struct GetMLModelInput {
1149    /// <p>The ID assigned to the <code>MLModel</code> at creation.</p>
1150    #[serde(rename = "MLModelId")]
1151    pub ml_model_id: String,
1152    /// <p>Specifies whether the <code>GetMLModel</code> operation should return <code>Recipe</code>.</p> <p>If true, <code>Recipe</code> is returned.</p> <p>If false, <code>Recipe</code> is not returned.</p>
1153    #[serde(rename = "Verbose")]
1154    #[serde(skip_serializing_if = "Option::is_none")]
1155    pub verbose: Option<bool>,
1156}
1157
1158/// <p>Represents the output of a <code>GetMLModel</code> operation, and provides detailed information about a <code>MLModel</code>.</p>
1159#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1160#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1161pub struct GetMLModelOutput {
1162    /// <p>The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the <code>MLModel</code>, normalized and scaled on computation resources. <code>ComputeTime</code> is only available if the <code>MLModel</code> is in the <code>COMPLETED</code> state.</p>
1163    #[serde(rename = "ComputeTime")]
1164    #[serde(skip_serializing_if = "Option::is_none")]
1165    pub compute_time: Option<i64>,
1166    /// <p>The time that the <code>MLModel</code> was created. The time is expressed in epoch time.</p>
1167    #[serde(rename = "CreatedAt")]
1168    #[serde(skip_serializing_if = "Option::is_none")]
1169    pub created_at: Option<f64>,
1170    /// <p>The AWS user account from which the <code>MLModel</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
1171    #[serde(rename = "CreatedByIamUser")]
1172    #[serde(skip_serializing_if = "Option::is_none")]
1173    pub created_by_iam_user: Option<String>,
1174    /// <p>The current endpoint of the <code>MLModel</code></p>
1175    #[serde(rename = "EndpointInfo")]
1176    #[serde(skip_serializing_if = "Option::is_none")]
1177    pub endpoint_info: Option<RealtimeEndpointInfo>,
1178    /// <p>The epoch time when Amazon Machine Learning marked the <code>MLModel</code> as <code>COMPLETED</code> or <code>FAILED</code>. <code>FinishedAt</code> is only available when the <code>MLModel</code> is in the <code>COMPLETED</code> or <code>FAILED</code> state.</p>
1179    #[serde(rename = "FinishedAt")]
1180    #[serde(skip_serializing_if = "Option::is_none")]
1181    pub finished_at: Option<f64>,
1182    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
1183    #[serde(rename = "InputDataLocationS3")]
1184    #[serde(skip_serializing_if = "Option::is_none")]
1185    pub input_data_location_s3: Option<String>,
1186    /// <p>The time of the most recent edit to the <code>MLModel</code>. The time is expressed in epoch time.</p>
1187    #[serde(rename = "LastUpdatedAt")]
1188    #[serde(skip_serializing_if = "Option::is_none")]
1189    pub last_updated_at: Option<f64>,
1190    /// <p>A link to the file that contains logs of the <code>CreateMLModel</code> operation.</p>
1191    #[serde(rename = "LogUri")]
1192    #[serde(skip_serializing_if = "Option::is_none")]
1193    pub log_uri: Option<String>,
1194    /// <p>The MLModel ID<?oxy_insert_start author="annbech" timestamp="20160328T151251-0700">,<?oxy_insert_end> which is same as the <code>MLModelId</code> in the request.</p>
1195    #[serde(rename = "MLModelId")]
1196    #[serde(skip_serializing_if = "Option::is_none")]
1197    pub ml_model_id: Option<String>,
1198    /// <p><p>Identifies the <code>MLModel</code> category. The following are the available types: </p> <ul> <li>REGRESSION -- Produces a numeric result. For example, &quot;What price should a house be listed at?&quot;</li> <li>BINARY -- Produces one of two possible results. For example, &quot;Is this an e-commerce website?&quot;</li> <li>MULTICLASS -- Produces one of several possible results. For example, &quot;Is this a HIGH, LOW or MEDIUM risk trade?&quot;</li> </ul></p>
1199    #[serde(rename = "MLModelType")]
1200    #[serde(skip_serializing_if = "Option::is_none")]
1201    pub ml_model_type: Option<String>,
1202    /// <p>A description of the most recent details about accessing the <code>MLModel</code>.</p>
1203    #[serde(rename = "Message")]
1204    #[serde(skip_serializing_if = "Option::is_none")]
1205    pub message: Option<String>,
1206    /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
1207    #[serde(rename = "Name")]
1208    #[serde(skip_serializing_if = "Option::is_none")]
1209    pub name: Option<String>,
1210    /// <p><p>The recipe to use when training the <code>MLModel</code>. The <code>Recipe</code> provides detailed information about the observation data to use during training, and manipulations to perform on the observation data during training.</p> <note><title>Note</title> <p>This parameter is provided as part of the verbose format.</p></note></p>
1211    #[serde(rename = "Recipe")]
1212    #[serde(skip_serializing_if = "Option::is_none")]
1213    pub recipe: Option<String>,
1214    /// <p><p>The schema used by all of the data files referenced by the <code>DataSource</code>.</p> <note><title>Note</title> <p>This parameter is provided as part of the verbose format.</p></note></p>
1215    #[serde(rename = "Schema")]
1216    #[serde(skip_serializing_if = "Option::is_none")]
1217    pub schema: Option<String>,
1218    /// <p>The scoring threshold is used in binary classification <code>MLModel</code><?oxy_insert_start author="laurama" timestamp="20160329T114851-0700"> <?oxy_insert_end>models. It marks the boundary between a positive prediction and a negative prediction.</p> <p>Output values greater than or equal to the threshold receive a positive result from the MLModel, such as <code>true</code>. Output values less than the threshold receive a negative response from the MLModel, such as <code>false</code>.</p>
1219    #[serde(rename = "ScoreThreshold")]
1220    #[serde(skip_serializing_if = "Option::is_none")]
1221    pub score_threshold: Option<f32>,
1222    /// <p>The time of the most recent edit to the <code>ScoreThreshold</code>. The time is expressed in epoch time.</p>
1223    #[serde(rename = "ScoreThresholdLastUpdatedAt")]
1224    #[serde(skip_serializing_if = "Option::is_none")]
1225    pub score_threshold_last_updated_at: Option<f64>,
1226    #[serde(rename = "SizeInBytes")]
1227    #[serde(skip_serializing_if = "Option::is_none")]
1228    pub size_in_bytes: Option<i64>,
1229    /// <p>The epoch time when Amazon Machine Learning marked the <code>MLModel</code> as <code>INPROGRESS</code>. <code>StartedAt</code> isn't available if the <code>MLModel</code> is in the <code>PENDING</code> state.</p>
1230    #[serde(rename = "StartedAt")]
1231    #[serde(skip_serializing_if = "Option::is_none")]
1232    pub started_at: Option<f64>,
1233    /// <p><p>The current status of the <code>MLModel</code>. This element can have one of the following values:</p> <ul> <li> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to describe a <code>MLModel</code>.</li> <li> <code>INPROGRESS</code> - The request is processing.</li> <li> <code>FAILED</code> - The request did not run to completion. The ML model isn&#39;t usable.</li> <li> <code>COMPLETED</code> - The request completed successfully.</li> <li> <code>DELETED</code> - The <code>MLModel</code> is marked as deleted. It isn&#39;t usable.</li> </ul></p>
1234    #[serde(rename = "Status")]
1235    #[serde(skip_serializing_if = "Option::is_none")]
1236    pub status: Option<String>,
1237    /// <p>The ID of the training <code>DataSource</code>.</p>
1238    #[serde(rename = "TrainingDataSourceId")]
1239    #[serde(skip_serializing_if = "Option::is_none")]
1240    pub training_data_source_id: Option<String>,
1241    /// <p><p>A list of the training parameters in the <code>MLModel</code>. The list is implemented as a map of key-value pairs.</p> <p>The following is the current set of training parameters: </p> <ul> <li> <p><code>sgd.maxMLModelSizeInBytes</code> - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.</p> <p> The value is an integer that ranges from <code>100000</code> to <code>2147483648</code>. The default value is <code>33554432</code>.</p> </li> <li><p><code>sgd.maxPasses</code> - The number of times that the training process traverses the observations to build the <code>MLModel</code>. The value is an integer that ranges from <code>1</code> to <code>10000</code>. The default value is <code>10</code>.</p></li> <li><p><code>sgd.shuffleType</code> - Whether Amazon ML shuffles the training data. Shuffling data improves a model&#39;s ability to find the optimal solution for a variety of data types. The valid values are <code>auto</code> and <code>none</code>. The default value is <code>none</code>. We strongly recommend that you shuffle your data.</p></li> <li> <p><code>sgd.l1RegularizationAmount</code> - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX<em>DOUBLE</code>. The default is to not use L1 normalization. This parameter can&#39;t be used when <code>L2</code> is specified. Use this parameter sparingly.</p> </li> <li> <p><code>sgd.l2RegularizationAmount</code> - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX</em>DOUBLE</code>. The default is to not use L2 normalization. This parameter can&#39;t be used when <code>L1</code> is specified. Use this parameter sparingly.</p> </li> </ul></p>
1242    #[serde(rename = "TrainingParameters")]
1243    #[serde(skip_serializing_if = "Option::is_none")]
1244    pub training_parameters: Option<::std::collections::HashMap<String, String>>,
1245}
1246
1247/// <p> Represents the output of a <code>GetMLModel</code> operation. </p> <p>The content consists of the detailed metadata and the current status of the <code>MLModel</code>.</p>
1248#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1249#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1250pub struct MLModel {
1251    /// <p><p>The algorithm used to train the <code>MLModel</code>. The following algorithm is supported:</p> <ul> <li> <code>SGD</code> -- Stochastic gradient descent. The goal of <code>SGD</code> is to minimize the gradient of the loss function. </li> </ul></p>
1252    #[serde(rename = "Algorithm")]
1253    #[serde(skip_serializing_if = "Option::is_none")]
1254    pub algorithm: Option<String>,
1255    #[serde(rename = "ComputeTime")]
1256    #[serde(skip_serializing_if = "Option::is_none")]
1257    pub compute_time: Option<i64>,
1258    /// <p>The time that the <code>MLModel</code> was created. The time is expressed in epoch time.</p>
1259    #[serde(rename = "CreatedAt")]
1260    #[serde(skip_serializing_if = "Option::is_none")]
1261    pub created_at: Option<f64>,
1262    /// <p>The AWS user account from which the <code>MLModel</code> was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.</p>
1263    #[serde(rename = "CreatedByIamUser")]
1264    #[serde(skip_serializing_if = "Option::is_none")]
1265    pub created_by_iam_user: Option<String>,
1266    /// <p>The current endpoint of the <code>MLModel</code>.</p>
1267    #[serde(rename = "EndpointInfo")]
1268    #[serde(skip_serializing_if = "Option::is_none")]
1269    pub endpoint_info: Option<RealtimeEndpointInfo>,
1270    #[serde(rename = "FinishedAt")]
1271    #[serde(skip_serializing_if = "Option::is_none")]
1272    pub finished_at: Option<f64>,
1273    /// <p>The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).</p>
1274    #[serde(rename = "InputDataLocationS3")]
1275    #[serde(skip_serializing_if = "Option::is_none")]
1276    pub input_data_location_s3: Option<String>,
1277    /// <p>The time of the most recent edit to the <code>MLModel</code>. The time is expressed in epoch time.</p>
1278    #[serde(rename = "LastUpdatedAt")]
1279    #[serde(skip_serializing_if = "Option::is_none")]
1280    pub last_updated_at: Option<f64>,
1281    /// <p>The ID assigned to the <code>MLModel</code> at creation.</p>
1282    #[serde(rename = "MLModelId")]
1283    #[serde(skip_serializing_if = "Option::is_none")]
1284    pub ml_model_id: Option<String>,
1285    /// <p><p>Identifies the <code>MLModel</code> category. The following are the available types:</p> <ul> <li> <code>REGRESSION</code> - Produces a numeric result. For example, &quot;What price should a house be listed at?&quot;</li> <li> <code>BINARY</code> - Produces one of two possible results. For example, &quot;Is this a child-friendly web site?&quot;.</li> <li> <code>MULTICLASS</code> - Produces one of several possible results. For example, &quot;Is this a HIGH-, LOW-, or MEDIUM&lt;?oxy<em>delete author=&quot;annbech&quot; timestamp=&quot;20160328T175050-0700&quot; content=&quot; &quot;&gt;&lt;?oxy</em>insert<em>start author=&quot;annbech&quot; timestamp=&quot;20160328T175050-0700&quot;&gt;-&lt;?oxy</em>insert_end&gt;risk trade?&quot;.</li> </ul></p>
1286    #[serde(rename = "MLModelType")]
1287    #[serde(skip_serializing_if = "Option::is_none")]
1288    pub ml_model_type: Option<String>,
1289    /// <p>A description of the most recent details about accessing the <code>MLModel</code>.</p>
1290    #[serde(rename = "Message")]
1291    #[serde(skip_serializing_if = "Option::is_none")]
1292    pub message: Option<String>,
1293    /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
1294    #[serde(rename = "Name")]
1295    #[serde(skip_serializing_if = "Option::is_none")]
1296    pub name: Option<String>,
1297    #[serde(rename = "ScoreThreshold")]
1298    #[serde(skip_serializing_if = "Option::is_none")]
1299    pub score_threshold: Option<f32>,
1300    /// <p>The time of the most recent edit to the <code>ScoreThreshold</code>. The time is expressed in epoch time.</p>
1301    #[serde(rename = "ScoreThresholdLastUpdatedAt")]
1302    #[serde(skip_serializing_if = "Option::is_none")]
1303    pub score_threshold_last_updated_at: Option<f64>,
1304    #[serde(rename = "SizeInBytes")]
1305    #[serde(skip_serializing_if = "Option::is_none")]
1306    pub size_in_bytes: Option<i64>,
1307    #[serde(rename = "StartedAt")]
1308    #[serde(skip_serializing_if = "Option::is_none")]
1309    pub started_at: Option<f64>,
1310    /// <p><p>The current status of an <code>MLModel</code>. This element can have one of the following values: </p> <ul> <li> <code>PENDING</code> - Amazon Machine Learning (Amazon ML) submitted a request to create an <code>MLModel</code>.</li> <li> <code>INPROGRESS</code> - The creation process is underway.</li> <li> <code>FAILED</code> - The request to create an <code>MLModel</code> didn&#39;t run to completion. The model isn&#39;t usable.</li> <li> <code>COMPLETED</code> - The creation process completed successfully.</li> <li> <code>DELETED</code> - The <code>MLModel</code> is marked as deleted. It isn&#39;t usable.</li> </ul></p>
1311    #[serde(rename = "Status")]
1312    #[serde(skip_serializing_if = "Option::is_none")]
1313    pub status: Option<String>,
1314    /// <p>The ID of the training <code>DataSource</code>. The <code>CreateMLModel</code> operation uses the <code>TrainingDataSourceId</code>.</p>
1315    #[serde(rename = "TrainingDataSourceId")]
1316    #[serde(skip_serializing_if = "Option::is_none")]
1317    pub training_data_source_id: Option<String>,
1318    /// <p><p>A list of the training parameters in the <code>MLModel</code>. The list is implemented as a map of key-value pairs.</p> <p>The following is the current set of training parameters: </p> <ul> <li> <p><code>sgd.maxMLModelSizeInBytes</code> - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.</p> <p> The value is an integer that ranges from <code>100000</code> to <code>2147483648</code>. The default value is <code>33554432</code>.</p> </li> <li><p><code>sgd.maxPasses</code> - The number of times that the training process traverses the observations to build the <code>MLModel</code>. The value is an integer that ranges from <code>1</code> to <code>10000</code>. The default value is <code>10</code>.</p></li> <li><p><code>sgd.shuffleType</code> - Whether Amazon ML shuffles the training data. Shuffling the data improves a model&#39;s ability to find the optimal solution for a variety of data types. The valid values are <code>auto</code> and <code>none</code>. The default value is <code>none</code>.</p></li> <li> <p><code>sgd.l1RegularizationAmount</code> - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX<em>DOUBLE</code>. The default is to not use L1 normalization. This parameter can&#39;t be used when <code>L2</code> is specified. Use this parameter sparingly.</p> </li> <li> <p><code>sgd.l2RegularizationAmount</code> - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as <code>1.0E-08</code>.</p> <p>The value is a double that ranges from <code>0</code> to <code>MAX</em>DOUBLE</code>. The default is to not use L2 normalization. This parameter can&#39;t be used when <code>L1</code> is specified. Use this parameter sparingly.</p> </li> </ul></p>
1319    #[serde(rename = "TrainingParameters")]
1320    #[serde(skip_serializing_if = "Option::is_none")]
1321    pub training_parameters: Option<::std::collections::HashMap<String, String>>,
1322}
1323
1324/// <p>Measurements of how well the <code>MLModel</code> performed on known observations. One of the following metrics is returned, based on the type of the <code>MLModel</code>: </p> <ul> <li> <p>BinaryAUC: The binary <code>MLModel</code> uses the Area Under the Curve (AUC) technique to measure performance. </p> </li> <li> <p>RegressionRMSE: The regression <code>MLModel</code> uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.</p> </li> <li> <p>MulticlassAvgFScore: The multiclass <code>MLModel</code> uses the F1 score technique to measure performance. </p> </li> </ul> <p> For more information about performance metrics, please see the <a href="http://docs.aws.amazon.com/machine-learning/latest/dg">Amazon Machine Learning Developer Guide</a>. </p>
1325#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1326#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1327pub struct PerformanceMetrics {
1328    #[serde(rename = "Properties")]
1329    #[serde(skip_serializing_if = "Option::is_none")]
1330    pub properties: Option<::std::collections::HashMap<String, String>>,
1331}
1332
1333#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1334#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1335pub struct PredictInput {
1336    /// <p>A unique identifier of the <code>MLModel</code>.</p>
1337    #[serde(rename = "MLModelId")]
1338    pub ml_model_id: String,
1339    #[serde(rename = "PredictEndpoint")]
1340    pub predict_endpoint: String,
1341    #[serde(rename = "Record")]
1342    pub record: ::std::collections::HashMap<String, String>,
1343}
1344
1345#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1346#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1347pub struct PredictOutput {
1348    #[serde(rename = "Prediction")]
1349    #[serde(skip_serializing_if = "Option::is_none")]
1350    pub prediction: Option<Prediction>,
1351}
1352
1353/// <p><p>The output from a <code>Predict</code> operation: </p> <ul> <li> <p> <code>Details</code> - Contains the following attributes: <code>DetailsAttributes.PREDICTIVE<em>MODEL</em>TYPE - REGRESSION | BINARY | MULTICLASS</code> <code>DetailsAttributes.ALGORITHM - SGD</code> </p> </li> <li> <p> <code>PredictedLabel</code> - Present for either a <code>BINARY</code> or <code>MULTICLASS</code> <code>MLModel</code> request. </p> </li> <li> <p> <code>PredictedScores</code> - Contains the raw classification score corresponding to each label. </p> </li> <li> <p> <code>PredictedValue</code> - Present for a <code>REGRESSION</code> <code>MLModel</code> request. </p> </li> </ul></p>
1354#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1355#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1356pub struct Prediction {
1357    #[serde(rename = "details")]
1358    #[serde(skip_serializing_if = "Option::is_none")]
1359    pub details: Option<::std::collections::HashMap<String, String>>,
1360    /// <p>The prediction label for either a <code>BINARY</code> or <code>MULTICLASS</code> <code>MLModel</code>.</p>
1361    #[serde(rename = "predictedLabel")]
1362    #[serde(skip_serializing_if = "Option::is_none")]
1363    pub predicted_label: Option<String>,
1364    #[serde(rename = "predictedScores")]
1365    #[serde(skip_serializing_if = "Option::is_none")]
1366    pub predicted_scores: Option<::std::collections::HashMap<String, f32>>,
1367    /// <p>The prediction value for <code>REGRESSION</code> <code>MLModel</code>.</p>
1368    #[serde(rename = "predictedValue")]
1369    #[serde(skip_serializing_if = "Option::is_none")]
1370    pub predicted_value: Option<f32>,
1371}
1372
1373/// <p>The data specification of an Amazon Relational Database Service (Amazon RDS) <code>DataSource</code>.</p>
1374#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1375#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1376pub struct RDSDataSpec {
1377    /// <p><p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p> <p>There are multiple parameters that control what data is used to create a datasource:</p> <ul> <li><p><b><code>percentBegin</code></b></p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p></li> <li><p><b><code>percentEnd</code></b></p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p></li> <li><p><b><code>complement</code></b></p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:0, &quot;percentEnd&quot;:25}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:0, &quot;percentEnd&quot;:25, &quot;complement&quot;:&quot;true&quot;}}</code></p> </li> <li><p><b><code>strategy</code></b></p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;sequential&quot;}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;sequential&quot;, &quot;complement&quot;:&quot;true&quot;}}</code></p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;random&quot;, &quot;randomSeed&quot;=&quot;s3://my<em>s3</em>path/bucket/file.csv&quot;}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;random&quot;, &quot;randomSeed&quot;=&quot;s3://my<em>s3</em>path/bucket/file.csv&quot;, &quot;complement&quot;:&quot;true&quot;}}</code></p> </li> </ul></p>
1378    #[serde(rename = "DataRearrangement")]
1379    #[serde(skip_serializing_if = "Option::is_none")]
1380    pub data_rearrangement: Option<String>,
1381    /// <p><p>A JSON string that represents the schema for an Amazon RDS <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p> <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code></p> <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p> <p>{ &quot;version&quot;: &quot;1.0&quot;,</p> <p> &quot;recordAnnotationFieldName&quot;: &quot;F1&quot;,</p> <p> &quot;recordWeightFieldName&quot;: &quot;F2&quot;,</p> <p> &quot;targetFieldName&quot;: &quot;F3&quot;,</p> <p> &quot;dataFormat&quot;: &quot;CSV&quot;,</p> <p> &quot;dataFileContainsHeader&quot;: true,</p> <p> &quot;attributes&quot;: [</p> <p> { &quot;fieldName&quot;: &quot;F1&quot;, &quot;fieldType&quot;: &quot;TEXT&quot; }, { &quot;fieldName&quot;: &quot;F2&quot;, &quot;fieldType&quot;: &quot;NUMERIC&quot; }, { &quot;fieldName&quot;: &quot;F3&quot;, &quot;fieldType&quot;: &quot;CATEGORICAL&quot; }, { &quot;fieldName&quot;: &quot;F4&quot;, &quot;fieldType&quot;: &quot;NUMERIC&quot; }, { &quot;fieldName&quot;: &quot;F5&quot;, &quot;fieldType&quot;: &quot;CATEGORICAL&quot; }, { &quot;fieldName&quot;: &quot;F6&quot;, &quot;fieldType&quot;: &quot;TEXT&quot; }, { &quot;fieldName&quot;: &quot;F7&quot;, &quot;fieldType&quot;: &quot;WEIGHTED<em>INT</em>SEQUENCE&quot; }, { &quot;fieldName&quot;: &quot;F8&quot;, &quot;fieldType&quot;: &quot;WEIGHTED<em>STRING</em>SEQUENCE&quot; } ],</p> <p> &quot;excludedVariableNames&quot;: [ &quot;F6&quot; ] } </p> &lt;?oxy<em>insert</em>end&gt;</p>
1382    #[serde(rename = "DataSchema")]
1383    #[serde(skip_serializing_if = "Option::is_none")]
1384    pub data_schema: Option<String>,
1385    /// <p>The Amazon S3 location of the <code>DataSchema</code>. </p>
1386    #[serde(rename = "DataSchemaUri")]
1387    #[serde(skip_serializing_if = "Option::is_none")]
1388    pub data_schema_uri: Option<String>,
1389    /// <p>The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.</p>
1390    #[serde(rename = "DatabaseCredentials")]
1391    pub database_credentials: RDSDatabaseCredentials,
1392    /// <p>Describes the <code>DatabaseName</code> and <code>InstanceIdentifier</code> of an Amazon RDS database.</p>
1393    #[serde(rename = "DatabaseInformation")]
1394    pub database_information: RDSDatabase,
1395    /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see <a href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
1396    #[serde(rename = "ResourceRole")]
1397    pub resource_role: String,
1398    /// <p>The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using <code>SelectSqlQuery</code> is stored in this location.</p>
1399    #[serde(rename = "S3StagingLocation")]
1400    pub s3_staging_location: String,
1401    /// <p>The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.</p>
1402    #[serde(rename = "SecurityGroupIds")]
1403    pub security_group_ids: Vec<String>,
1404    /// <p>The query that is used to retrieve the observation data for the <code>DataSource</code>.</p>
1405    #[serde(rename = "SelectSqlQuery")]
1406    pub select_sql_query: String,
1407    /// <p>The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
1408    #[serde(rename = "ServiceRole")]
1409    pub service_role: String,
1410    /// <p>The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.</p>
1411    #[serde(rename = "SubnetId")]
1412    pub subnet_id: String,
1413}
1414
1415/// <p>The database details of an Amazon RDS database.</p>
1416#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1417pub struct RDSDatabase {
1418    #[serde(rename = "DatabaseName")]
1419    pub database_name: String,
1420    /// <p>The ID of an RDS DB instance.</p>
1421    #[serde(rename = "InstanceIdentifier")]
1422    pub instance_identifier: String,
1423}
1424
1425/// <p>The database credentials to connect to a database on an RDS DB instance.</p>
1426#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1427#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1428pub struct RDSDatabaseCredentials {
1429    #[serde(rename = "Password")]
1430    pub password: String,
1431    #[serde(rename = "Username")]
1432    pub username: String,
1433}
1434
1435/// <p>The datasource details that are specific to Amazon RDS.</p>
1436#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1437#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1438pub struct RDSMetadata {
1439    /// <p>The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.</p>
1440    #[serde(rename = "DataPipelineId")]
1441    #[serde(skip_serializing_if = "Option::is_none")]
1442    pub data_pipeline_id: Option<String>,
1443    /// <p>The database details required to connect to an Amazon RDS.</p>
1444    #[serde(rename = "Database")]
1445    #[serde(skip_serializing_if = "Option::is_none")]
1446    pub database: Option<RDSDatabase>,
1447    #[serde(rename = "DatabaseUserName")]
1448    #[serde(skip_serializing_if = "Option::is_none")]
1449    pub database_user_name: Option<String>,
1450    /// <p>The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see <a href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
1451    #[serde(rename = "ResourceRole")]
1452    #[serde(skip_serializing_if = "Option::is_none")]
1453    pub resource_role: Option<String>,
1454    /// <p>The SQL query that is supplied during <a>CreateDataSourceFromRDS</a>. Returns only if <code>Verbose</code> is true in <code>GetDataSourceInput</code>. </p>
1455    #[serde(rename = "SelectSqlQuery")]
1456    #[serde(skip_serializing_if = "Option::is_none")]
1457    pub select_sql_query: Option<String>,
1458    /// <p>The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see <a href="http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html">Role templates</a> for data pipelines.</p>
1459    #[serde(rename = "ServiceRole")]
1460    #[serde(skip_serializing_if = "Option::is_none")]
1461    pub service_role: Option<String>,
1462}
1463
1464/// <p> Describes the real-time endpoint information for an <code>MLModel</code>.</p>
1465#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1466#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1467pub struct RealtimeEndpointInfo {
1468    /// <p>The time that the request to create the real-time endpoint for the <code>MLModel</code> was received. The time is expressed in epoch time.</p>
1469    #[serde(rename = "CreatedAt")]
1470    #[serde(skip_serializing_if = "Option::is_none")]
1471    pub created_at: Option<f64>,
1472    /// <p><p> The current status of the real-time endpoint for the <code>MLModel</code>. This element can have one of the following values: </p> <ul> <li> <code>NONE</code> - Endpoint does not exist or was previously deleted.</li> <li> <code>READY</code> - Endpoint is ready to be used for real-time predictions.</li> <li> <code>UPDATING</code> - Updating/creating the endpoint. </li> </ul></p>
1473    #[serde(rename = "EndpointStatus")]
1474    #[serde(skip_serializing_if = "Option::is_none")]
1475    pub endpoint_status: Option<String>,
1476    /// <p><p>The URI that specifies where to send real-time prediction requests for the <code>MLModel</code>.</p> <note><title>Note</title> <p>The application must wait until the real-time endpoint is ready before using this URI.</p> </note></p>
1477    #[serde(rename = "EndpointUrl")]
1478    #[serde(skip_serializing_if = "Option::is_none")]
1479    pub endpoint_url: Option<String>,
1480    /// <p> The maximum processing rate for the real-time endpoint for <code>MLModel</code>, measured in incoming requests per second.</p>
1481    #[serde(rename = "PeakRequestsPerSecond")]
1482    #[serde(skip_serializing_if = "Option::is_none")]
1483    pub peak_requests_per_second: Option<i64>,
1484}
1485
1486/// <p>Describes the data specification of an Amazon Redshift <code>DataSource</code>.</p>
1487#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1488#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1489pub struct RedshiftDataSpec {
1490    /// <p><p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p> <p>There are multiple parameters that control what data is used to create a datasource:</p> <ul> <li><p><b><code>percentBegin</code></b></p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p></li> <li><p><b><code>percentEnd</code></b></p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p></li> <li><p><b><code>complement</code></b></p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:0, &quot;percentEnd&quot;:25}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:0, &quot;percentEnd&quot;:25, &quot;complement&quot;:&quot;true&quot;}}</code></p> </li> <li><p><b><code>strategy</code></b></p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;sequential&quot;}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;sequential&quot;, &quot;complement&quot;:&quot;true&quot;}}</code></p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;random&quot;, &quot;randomSeed&quot;=&quot;s3://my<em>s3</em>path/bucket/file.csv&quot;}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;random&quot;, &quot;randomSeed&quot;=&quot;s3://my<em>s3</em>path/bucket/file.csv&quot;, &quot;complement&quot;:&quot;true&quot;}}</code></p> </li> </ul></p>
1491    #[serde(rename = "DataRearrangement")]
1492    #[serde(skip_serializing_if = "Option::is_none")]
1493    pub data_rearrangement: Option<String>,
1494    /// <p>A JSON string that represents the schema for an Amazon Redshift <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p> <p>A <code>DataSchema</code> is not required if you specify a <code>DataSchemaUri</code>.</p> <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p> <p>{ "version": "1.0",</p> <p> "recordAnnotationFieldName": "F1",</p> <p> "recordWeightFieldName": "F2",</p> <p> "targetFieldName": "F3",</p> <p> "dataFormat": "CSV",</p> <p> "dataFileContainsHeader": true,</p> <p> "attributes": [</p> <p> { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],</p> <p> "excludedVariableNames": [ "F6" ] } </p>
1495    #[serde(rename = "DataSchema")]
1496    #[serde(skip_serializing_if = "Option::is_none")]
1497    pub data_schema: Option<String>,
1498    /// <p>Describes the schema location for an Amazon Redshift <code>DataSource</code>.</p>
1499    #[serde(rename = "DataSchemaUri")]
1500    #[serde(skip_serializing_if = "Option::is_none")]
1501    pub data_schema_uri: Option<String>,
1502    /// <p>Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.</p>
1503    #[serde(rename = "DatabaseCredentials")]
1504    pub database_credentials: RedshiftDatabaseCredentials,
1505    /// <p>Describes the <code>DatabaseName</code> and <code>ClusterIdentifier</code> for an Amazon Redshift <code>DataSource</code>.</p>
1506    #[serde(rename = "DatabaseInformation")]
1507    pub database_information: RedshiftDatabase,
1508    /// <p>Describes an Amazon S3 location to store the result set of the <code>SelectSqlQuery</code> query.</p>
1509    #[serde(rename = "S3StagingLocation")]
1510    pub s3_staging_location: String,
1511    /// <p>Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift <code>DataSource</code>.</p>
1512    #[serde(rename = "SelectSqlQuery")]
1513    pub select_sql_query: String,
1514}
1515
1516/// <p>Describes the database details required to connect to an Amazon Redshift database.</p>
1517#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1518pub struct RedshiftDatabase {
1519    #[serde(rename = "ClusterIdentifier")]
1520    pub cluster_identifier: String,
1521    #[serde(rename = "DatabaseName")]
1522    pub database_name: String,
1523}
1524
1525/// <p> Describes the database credentials for connecting to a database on an Amazon Redshift cluster.</p>
1526#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1527#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1528pub struct RedshiftDatabaseCredentials {
1529    #[serde(rename = "Password")]
1530    pub password: String,
1531    #[serde(rename = "Username")]
1532    pub username: String,
1533}
1534
1535/// <p>Describes the <code>DataSource</code> details specific to Amazon Redshift.</p>
1536#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1537#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1538pub struct RedshiftMetadata {
1539    #[serde(rename = "DatabaseUserName")]
1540    #[serde(skip_serializing_if = "Option::is_none")]
1541    pub database_user_name: Option<String>,
1542    #[serde(rename = "RedshiftDatabase")]
1543    #[serde(skip_serializing_if = "Option::is_none")]
1544    pub redshift_database: Option<RedshiftDatabase>,
1545    /// <p> The SQL query that is specified during <a>CreateDataSourceFromRedshift</a>. Returns only if <code>Verbose</code> is true in GetDataSourceInput. </p>
1546    #[serde(rename = "SelectSqlQuery")]
1547    #[serde(skip_serializing_if = "Option::is_none")]
1548    pub select_sql_query: Option<String>,
1549}
1550
1551/// <p> Describes the data specification of a <code>DataSource</code>.</p>
1552#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1553#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1554pub struct S3DataSpec {
1555    /// <p>The location of the data file(s) used by a <code>DataSource</code>. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.</p>
1556    #[serde(rename = "DataLocationS3")]
1557    pub data_location_s3: String,
1558    /// <p><p>A JSON string that represents the splitting and rearrangement processing to be applied to a <code>DataSource</code>. If the <code>DataRearrangement</code> parameter is not provided, all of the input data is used to create the <code>Datasource</code>.</p> <p>There are multiple parameters that control what data is used to create a datasource:</p> <ul> <li><p><b><code>percentBegin</code></b></p> <p>Use <code>percentBegin</code> to indicate the beginning of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p></li> <li><p><b><code>percentEnd</code></b></p> <p>Use <code>percentEnd</code> to indicate the end of the range of the data used to create the Datasource. If you do not include <code>percentBegin</code> and <code>percentEnd</code>, Amazon ML includes all of the data when creating the datasource.</p></li> <li><p><b><code>complement</code></b></p> <p>The <code>complement</code> parameter instructs Amazon ML to use the data that is not included in the range of <code>percentBegin</code> to <code>percentEnd</code> to create a datasource. The <code>complement</code> parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for <code>percentBegin</code> and <code>percentEnd</code>, along with the <code>complement</code> parameter.</p> <p>For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:0, &quot;percentEnd&quot;:25}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:0, &quot;percentEnd&quot;:25, &quot;complement&quot;:&quot;true&quot;}}</code></p> </li> <li><p><b><code>strategy</code></b></p> <p>To change how Amazon ML splits the data for a datasource, use the <code>strategy</code> parameter.</p> <p>The default value for the <code>strategy</code> parameter is <code>sequential</code>, meaning that Amazon ML takes all of the data records between the <code>percentBegin</code> and <code>percentEnd</code> parameters for the datasource, in the order that the records appear in the input data.</p> <p>The following two <code>DataRearrangement</code> lines are examples of sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;sequential&quot;}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;sequential&quot;, &quot;complement&quot;:&quot;true&quot;}}</code></p> <p>To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the <code>strategy</code> parameter to <code>random</code> and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between <code>percentBegin</code> and <code>percentEnd</code>. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.</p> <p>The following two <code>DataRearrangement</code> lines are examples of non-sequentially ordered training and evaluation datasources:</p> <p>Datasource for evaluation: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;random&quot;, &quot;randomSeed&quot;=&quot;s3://my<em>s3</em>path/bucket/file.csv&quot;}}</code></p> <p>Datasource for training: <code>{&quot;splitting&quot;:{&quot;percentBegin&quot;:70, &quot;percentEnd&quot;:100, &quot;strategy&quot;:&quot;random&quot;, &quot;randomSeed&quot;=&quot;s3://my<em>s3</em>path/bucket/file.csv&quot;, &quot;complement&quot;:&quot;true&quot;}}</code></p> </li> </ul></p>
1559    #[serde(rename = "DataRearrangement")]
1560    #[serde(skip_serializing_if = "Option::is_none")]
1561    pub data_rearrangement: Option<String>,
1562    /// <p><p> A JSON string that represents the schema for an Amazon S3 <code>DataSource</code>. The <code>DataSchema</code> defines the structure of the observation data in the data file(s) referenced in the <code>DataSource</code>.</p> <p>You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p> <p>Define your <code>DataSchema</code> as a series of key-value pairs. <code>attributes</code> and <code>excludedVariableNames</code> have an array of key-value pairs for their value. Use the following format to define your <code>DataSchema</code>.</p> <p>{ &quot;version&quot;: &quot;1.0&quot;,</p> <p> &quot;recordAnnotationFieldName&quot;: &quot;F1&quot;,</p> <p> &quot;recordWeightFieldName&quot;: &quot;F2&quot;,</p> <p> &quot;targetFieldName&quot;: &quot;F3&quot;,</p> <p> &quot;dataFormat&quot;: &quot;CSV&quot;,</p> <p> &quot;dataFileContainsHeader&quot;: true,</p> <p> &quot;attributes&quot;: [</p> <p> { &quot;fieldName&quot;: &quot;F1&quot;, &quot;fieldType&quot;: &quot;TEXT&quot; }, { &quot;fieldName&quot;: &quot;F2&quot;, &quot;fieldType&quot;: &quot;NUMERIC&quot; }, { &quot;fieldName&quot;: &quot;F3&quot;, &quot;fieldType&quot;: &quot;CATEGORICAL&quot; }, { &quot;fieldName&quot;: &quot;F4&quot;, &quot;fieldType&quot;: &quot;NUMERIC&quot; }, { &quot;fieldName&quot;: &quot;F5&quot;, &quot;fieldType&quot;: &quot;CATEGORICAL&quot; }, { &quot;fieldName&quot;: &quot;F6&quot;, &quot;fieldType&quot;: &quot;TEXT&quot; }, { &quot;fieldName&quot;: &quot;F7&quot;, &quot;fieldType&quot;: &quot;WEIGHTED<em>INT</em>SEQUENCE&quot; }, { &quot;fieldName&quot;: &quot;F8&quot;, &quot;fieldType&quot;: &quot;WEIGHTED<em>STRING</em>SEQUENCE&quot; } ],</p> <p> &quot;excludedVariableNames&quot;: [ &quot;F6&quot; ] } </p> &lt;?oxy<em>insert</em>end&gt;</p>
1563    #[serde(rename = "DataSchema")]
1564    #[serde(skip_serializing_if = "Option::is_none")]
1565    pub data_schema: Option<String>,
1566    /// <p>Describes the schema location in Amazon S3. You must provide either the <code>DataSchema</code> or the <code>DataSchemaLocationS3</code>.</p>
1567    #[serde(rename = "DataSchemaLocationS3")]
1568    #[serde(skip_serializing_if = "Option::is_none")]
1569    pub data_schema_location_s3: Option<String>,
1570}
1571
1572/// <p>A custom key-value pair associated with an ML object, such as an ML model.</p>
1573#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1574pub struct Tag {
1575    /// <p>A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
1576    #[serde(rename = "Key")]
1577    #[serde(skip_serializing_if = "Option::is_none")]
1578    pub key: Option<String>,
1579    /// <p>An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.</p>
1580    #[serde(rename = "Value")]
1581    #[serde(skip_serializing_if = "Option::is_none")]
1582    pub value: Option<String>,
1583}
1584
1585#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1586#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1587pub struct UpdateBatchPredictionInput {
1588    /// <p>The ID assigned to the <code>BatchPrediction</code> during creation.</p>
1589    #[serde(rename = "BatchPredictionId")]
1590    pub batch_prediction_id: String,
1591    /// <p>A new user-supplied name or description of the <code>BatchPrediction</code>.</p>
1592    #[serde(rename = "BatchPredictionName")]
1593    pub batch_prediction_name: String,
1594}
1595
1596/// <p>Represents the output of an <code>UpdateBatchPrediction</code> operation.</p> <p>You can see the updated content by using the <code>GetBatchPrediction</code> operation.</p>
1597#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1598#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1599pub struct UpdateBatchPredictionOutput {
1600    /// <p>The ID assigned to the <code>BatchPrediction</code> during creation. This value should be identical to the value of the <code>BatchPredictionId</code> in the request.</p>
1601    #[serde(rename = "BatchPredictionId")]
1602    #[serde(skip_serializing_if = "Option::is_none")]
1603    pub batch_prediction_id: Option<String>,
1604}
1605
1606#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1607#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1608pub struct UpdateDataSourceInput {
1609    /// <p>The ID assigned to the <code>DataSource</code> during creation.</p>
1610    #[serde(rename = "DataSourceId")]
1611    pub data_source_id: String,
1612    /// <p>A new user-supplied name or description of the <code>DataSource</code> that will replace the current description. </p>
1613    #[serde(rename = "DataSourceName")]
1614    pub data_source_name: String,
1615}
1616
1617/// <p>Represents the output of an <code>UpdateDataSource</code> operation.</p> <p>You can see the updated content by using the <code>GetBatchPrediction</code> operation.</p>
1618#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1619#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1620pub struct UpdateDataSourceOutput {
1621    /// <p>The ID assigned to the <code>DataSource</code> during creation. This value should be identical to the value of the <code>DataSourceID</code> in the request.</p>
1622    #[serde(rename = "DataSourceId")]
1623    #[serde(skip_serializing_if = "Option::is_none")]
1624    pub data_source_id: Option<String>,
1625}
1626
1627#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1628#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1629pub struct UpdateEvaluationInput {
1630    /// <p>The ID assigned to the <code>Evaluation</code> during creation.</p>
1631    #[serde(rename = "EvaluationId")]
1632    pub evaluation_id: String,
1633    /// <p>A new user-supplied name or description of the <code>Evaluation</code> that will replace the current content. </p>
1634    #[serde(rename = "EvaluationName")]
1635    pub evaluation_name: String,
1636}
1637
1638/// <p>Represents the output of an <code>UpdateEvaluation</code> operation.</p> <p>You can see the updated content by using the <code>GetEvaluation</code> operation.</p>
1639#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1640#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1641pub struct UpdateEvaluationOutput {
1642    /// <p>The ID assigned to the <code>Evaluation</code> during creation. This value should be identical to the value of the <code>Evaluation</code> in the request.</p>
1643    #[serde(rename = "EvaluationId")]
1644    #[serde(skip_serializing_if = "Option::is_none")]
1645    pub evaluation_id: Option<String>,
1646}
1647
1648#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1649#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1650pub struct UpdateMLModelInput {
1651    /// <p>The ID assigned to the <code>MLModel</code> during creation.</p>
1652    #[serde(rename = "MLModelId")]
1653    pub ml_model_id: String,
1654    /// <p>A user-supplied name or description of the <code>MLModel</code>.</p>
1655    #[serde(rename = "MLModelName")]
1656    #[serde(skip_serializing_if = "Option::is_none")]
1657    pub ml_model_name: Option<String>,
1658    /// <p>The <code>ScoreThreshold</code> used in binary classification <code>MLModel</code> that marks the boundary between a positive prediction and a negative prediction.</p> <p>Output values greater than or equal to the <code>ScoreThreshold</code> receive a positive result from the <code>MLModel</code>, such as <code>true</code>. Output values less than the <code>ScoreThreshold</code> receive a negative response from the <code>MLModel</code>, such as <code>false</code>.</p>
1659    #[serde(rename = "ScoreThreshold")]
1660    #[serde(skip_serializing_if = "Option::is_none")]
1661    pub score_threshold: Option<f32>,
1662}
1663
1664/// <p>Represents the output of an <code>UpdateMLModel</code> operation.</p> <p>You can see the updated content by using the <code>GetMLModel</code> operation.</p>
1665#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1666#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1667pub struct UpdateMLModelOutput {
1668    /// <p>The ID assigned to the <code>MLModel</code> during creation. This value should be identical to the value of the <code>MLModelID</code> in the request.</p>
1669    #[serde(rename = "MLModelId")]
1670    #[serde(skip_serializing_if = "Option::is_none")]
1671    pub ml_model_id: Option<String>,
1672}
1673
1674/// Errors returned by AddTags
1675#[derive(Debug, PartialEq)]
1676pub enum AddTagsError {
1677    /// <p>An error on the server occurred when trying to process a request.</p>
1678    InternalServer(String),
1679    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
1680    InvalidInput(String),
1681
1682    InvalidTag(String),
1683    /// <p>A specified resource cannot be located.</p>
1684    ResourceNotFound(String),
1685
1686    TagLimitExceeded(String),
1687}
1688
1689impl AddTagsError {
1690    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<AddTagsError> {
1691        if let Some(err) = proto::json::Error::parse(&res) {
1692            match err.typ.as_str() {
1693                "InternalServerException" => {
1694                    return RusotoError::Service(AddTagsError::InternalServer(err.msg))
1695                }
1696                "InvalidInputException" => {
1697                    return RusotoError::Service(AddTagsError::InvalidInput(err.msg))
1698                }
1699                "InvalidTagException" => {
1700                    return RusotoError::Service(AddTagsError::InvalidTag(err.msg))
1701                }
1702                "ResourceNotFoundException" => {
1703                    return RusotoError::Service(AddTagsError::ResourceNotFound(err.msg))
1704                }
1705                "TagLimitExceededException" => {
1706                    return RusotoError::Service(AddTagsError::TagLimitExceeded(err.msg))
1707                }
1708                "ValidationException" => return RusotoError::Validation(err.msg),
1709                _ => {}
1710            }
1711        }
1712        RusotoError::Unknown(res)
1713    }
1714}
1715impl fmt::Display for AddTagsError {
1716    #[allow(unused_variables)]
1717    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1718        match *self {
1719            AddTagsError::InternalServer(ref cause) => write!(f, "{}", cause),
1720            AddTagsError::InvalidInput(ref cause) => write!(f, "{}", cause),
1721            AddTagsError::InvalidTag(ref cause) => write!(f, "{}", cause),
1722            AddTagsError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1723            AddTagsError::TagLimitExceeded(ref cause) => write!(f, "{}", cause),
1724        }
1725    }
1726}
1727impl Error for AddTagsError {}
1728/// Errors returned by CreateBatchPrediction
1729#[derive(Debug, PartialEq)]
1730pub enum CreateBatchPredictionError {
1731    /// <p>A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.</p>
1732    IdempotentParameterMismatch(String),
1733    /// <p>An error on the server occurred when trying to process a request.</p>
1734    InternalServer(String),
1735    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
1736    InvalidInput(String),
1737}
1738
1739impl CreateBatchPredictionError {
1740    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateBatchPredictionError> {
1741        if let Some(err) = proto::json::Error::parse(&res) {
1742            match err.typ.as_str() {
1743                "IdempotentParameterMismatchException" => {
1744                    return RusotoError::Service(
1745                        CreateBatchPredictionError::IdempotentParameterMismatch(err.msg),
1746                    )
1747                }
1748                "InternalServerException" => {
1749                    return RusotoError::Service(CreateBatchPredictionError::InternalServer(
1750                        err.msg,
1751                    ))
1752                }
1753                "InvalidInputException" => {
1754                    return RusotoError::Service(CreateBatchPredictionError::InvalidInput(err.msg))
1755                }
1756                "ValidationException" => return RusotoError::Validation(err.msg),
1757                _ => {}
1758            }
1759        }
1760        RusotoError::Unknown(res)
1761    }
1762}
1763impl fmt::Display for CreateBatchPredictionError {
1764    #[allow(unused_variables)]
1765    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1766        match *self {
1767            CreateBatchPredictionError::IdempotentParameterMismatch(ref cause) => {
1768                write!(f, "{}", cause)
1769            }
1770            CreateBatchPredictionError::InternalServer(ref cause) => write!(f, "{}", cause),
1771            CreateBatchPredictionError::InvalidInput(ref cause) => write!(f, "{}", cause),
1772        }
1773    }
1774}
1775impl Error for CreateBatchPredictionError {}
1776/// Errors returned by CreateDataSourceFromRDS
1777#[derive(Debug, PartialEq)]
1778pub enum CreateDataSourceFromRDSError {
1779    /// <p>A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.</p>
1780    IdempotentParameterMismatch(String),
1781    /// <p>An error on the server occurred when trying to process a request.</p>
1782    InternalServer(String),
1783    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
1784    InvalidInput(String),
1785}
1786
1787impl CreateDataSourceFromRDSError {
1788    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateDataSourceFromRDSError> {
1789        if let Some(err) = proto::json::Error::parse(&res) {
1790            match err.typ.as_str() {
1791                "IdempotentParameterMismatchException" => {
1792                    return RusotoError::Service(
1793                        CreateDataSourceFromRDSError::IdempotentParameterMismatch(err.msg),
1794                    )
1795                }
1796                "InternalServerException" => {
1797                    return RusotoError::Service(CreateDataSourceFromRDSError::InternalServer(
1798                        err.msg,
1799                    ))
1800                }
1801                "InvalidInputException" => {
1802                    return RusotoError::Service(CreateDataSourceFromRDSError::InvalidInput(
1803                        err.msg,
1804                    ))
1805                }
1806                "ValidationException" => return RusotoError::Validation(err.msg),
1807                _ => {}
1808            }
1809        }
1810        RusotoError::Unknown(res)
1811    }
1812}
1813impl fmt::Display for CreateDataSourceFromRDSError {
1814    #[allow(unused_variables)]
1815    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1816        match *self {
1817            CreateDataSourceFromRDSError::IdempotentParameterMismatch(ref cause) => {
1818                write!(f, "{}", cause)
1819            }
1820            CreateDataSourceFromRDSError::InternalServer(ref cause) => write!(f, "{}", cause),
1821            CreateDataSourceFromRDSError::InvalidInput(ref cause) => write!(f, "{}", cause),
1822        }
1823    }
1824}
1825impl Error for CreateDataSourceFromRDSError {}
1826/// Errors returned by CreateDataSourceFromRedshift
1827#[derive(Debug, PartialEq)]
1828pub enum CreateDataSourceFromRedshiftError {
1829    /// <p>A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.</p>
1830    IdempotentParameterMismatch(String),
1831    /// <p>An error on the server occurred when trying to process a request.</p>
1832    InternalServer(String),
1833    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
1834    InvalidInput(String),
1835}
1836
1837impl CreateDataSourceFromRedshiftError {
1838    pub fn from_response(
1839        res: BufferedHttpResponse,
1840    ) -> RusotoError<CreateDataSourceFromRedshiftError> {
1841        if let Some(err) = proto::json::Error::parse(&res) {
1842            match err.typ.as_str() {
1843                "IdempotentParameterMismatchException" => {
1844                    return RusotoError::Service(
1845                        CreateDataSourceFromRedshiftError::IdempotentParameterMismatch(err.msg),
1846                    )
1847                }
1848                "InternalServerException" => {
1849                    return RusotoError::Service(CreateDataSourceFromRedshiftError::InternalServer(
1850                        err.msg,
1851                    ))
1852                }
1853                "InvalidInputException" => {
1854                    return RusotoError::Service(CreateDataSourceFromRedshiftError::InvalidInput(
1855                        err.msg,
1856                    ))
1857                }
1858                "ValidationException" => return RusotoError::Validation(err.msg),
1859                _ => {}
1860            }
1861        }
1862        RusotoError::Unknown(res)
1863    }
1864}
1865impl fmt::Display for CreateDataSourceFromRedshiftError {
1866    #[allow(unused_variables)]
1867    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1868        match *self {
1869            CreateDataSourceFromRedshiftError::IdempotentParameterMismatch(ref cause) => {
1870                write!(f, "{}", cause)
1871            }
1872            CreateDataSourceFromRedshiftError::InternalServer(ref cause) => write!(f, "{}", cause),
1873            CreateDataSourceFromRedshiftError::InvalidInput(ref cause) => write!(f, "{}", cause),
1874        }
1875    }
1876}
1877impl Error for CreateDataSourceFromRedshiftError {}
1878/// Errors returned by CreateDataSourceFromS3
1879#[derive(Debug, PartialEq)]
1880pub enum CreateDataSourceFromS3Error {
1881    /// <p>A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.</p>
1882    IdempotentParameterMismatch(String),
1883    /// <p>An error on the server occurred when trying to process a request.</p>
1884    InternalServer(String),
1885    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
1886    InvalidInput(String),
1887}
1888
1889impl CreateDataSourceFromS3Error {
1890    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateDataSourceFromS3Error> {
1891        if let Some(err) = proto::json::Error::parse(&res) {
1892            match err.typ.as_str() {
1893                "IdempotentParameterMismatchException" => {
1894                    return RusotoError::Service(
1895                        CreateDataSourceFromS3Error::IdempotentParameterMismatch(err.msg),
1896                    )
1897                }
1898                "InternalServerException" => {
1899                    return RusotoError::Service(CreateDataSourceFromS3Error::InternalServer(
1900                        err.msg,
1901                    ))
1902                }
1903                "InvalidInputException" => {
1904                    return RusotoError::Service(CreateDataSourceFromS3Error::InvalidInput(err.msg))
1905                }
1906                "ValidationException" => return RusotoError::Validation(err.msg),
1907                _ => {}
1908            }
1909        }
1910        RusotoError::Unknown(res)
1911    }
1912}
1913impl fmt::Display for CreateDataSourceFromS3Error {
1914    #[allow(unused_variables)]
1915    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1916        match *self {
1917            CreateDataSourceFromS3Error::IdempotentParameterMismatch(ref cause) => {
1918                write!(f, "{}", cause)
1919            }
1920            CreateDataSourceFromS3Error::InternalServer(ref cause) => write!(f, "{}", cause),
1921            CreateDataSourceFromS3Error::InvalidInput(ref cause) => write!(f, "{}", cause),
1922        }
1923    }
1924}
1925impl Error for CreateDataSourceFromS3Error {}
1926/// Errors returned by CreateEvaluation
1927#[derive(Debug, PartialEq)]
1928pub enum CreateEvaluationError {
1929    /// <p>A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.</p>
1930    IdempotentParameterMismatch(String),
1931    /// <p>An error on the server occurred when trying to process a request.</p>
1932    InternalServer(String),
1933    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
1934    InvalidInput(String),
1935}
1936
1937impl CreateEvaluationError {
1938    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateEvaluationError> {
1939        if let Some(err) = proto::json::Error::parse(&res) {
1940            match err.typ.as_str() {
1941                "IdempotentParameterMismatchException" => {
1942                    return RusotoError::Service(
1943                        CreateEvaluationError::IdempotentParameterMismatch(err.msg),
1944                    )
1945                }
1946                "InternalServerException" => {
1947                    return RusotoError::Service(CreateEvaluationError::InternalServer(err.msg))
1948                }
1949                "InvalidInputException" => {
1950                    return RusotoError::Service(CreateEvaluationError::InvalidInput(err.msg))
1951                }
1952                "ValidationException" => return RusotoError::Validation(err.msg),
1953                _ => {}
1954            }
1955        }
1956        RusotoError::Unknown(res)
1957    }
1958}
1959impl fmt::Display for CreateEvaluationError {
1960    #[allow(unused_variables)]
1961    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1962        match *self {
1963            CreateEvaluationError::IdempotentParameterMismatch(ref cause) => write!(f, "{}", cause),
1964            CreateEvaluationError::InternalServer(ref cause) => write!(f, "{}", cause),
1965            CreateEvaluationError::InvalidInput(ref cause) => write!(f, "{}", cause),
1966        }
1967    }
1968}
1969impl Error for CreateEvaluationError {}
1970/// Errors returned by CreateMLModel
1971#[derive(Debug, PartialEq)]
1972pub enum CreateMLModelError {
1973    /// <p>A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.</p>
1974    IdempotentParameterMismatch(String),
1975    /// <p>An error on the server occurred when trying to process a request.</p>
1976    InternalServer(String),
1977    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
1978    InvalidInput(String),
1979}
1980
1981impl CreateMLModelError {
1982    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateMLModelError> {
1983        if let Some(err) = proto::json::Error::parse(&res) {
1984            match err.typ.as_str() {
1985                "IdempotentParameterMismatchException" => {
1986                    return RusotoError::Service(CreateMLModelError::IdempotentParameterMismatch(
1987                        err.msg,
1988                    ))
1989                }
1990                "InternalServerException" => {
1991                    return RusotoError::Service(CreateMLModelError::InternalServer(err.msg))
1992                }
1993                "InvalidInputException" => {
1994                    return RusotoError::Service(CreateMLModelError::InvalidInput(err.msg))
1995                }
1996                "ValidationException" => return RusotoError::Validation(err.msg),
1997                _ => {}
1998            }
1999        }
2000        RusotoError::Unknown(res)
2001    }
2002}
2003impl fmt::Display for CreateMLModelError {
2004    #[allow(unused_variables)]
2005    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2006        match *self {
2007            CreateMLModelError::IdempotentParameterMismatch(ref cause) => write!(f, "{}", cause),
2008            CreateMLModelError::InternalServer(ref cause) => write!(f, "{}", cause),
2009            CreateMLModelError::InvalidInput(ref cause) => write!(f, "{}", cause),
2010        }
2011    }
2012}
2013impl Error for CreateMLModelError {}
2014/// Errors returned by CreateRealtimeEndpoint
2015#[derive(Debug, PartialEq)]
2016pub enum CreateRealtimeEndpointError {
2017    /// <p>An error on the server occurred when trying to process a request.</p>
2018    InternalServer(String),
2019    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2020    InvalidInput(String),
2021    /// <p>A specified resource cannot be located.</p>
2022    ResourceNotFound(String),
2023}
2024
2025impl CreateRealtimeEndpointError {
2026    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateRealtimeEndpointError> {
2027        if let Some(err) = proto::json::Error::parse(&res) {
2028            match err.typ.as_str() {
2029                "InternalServerException" => {
2030                    return RusotoError::Service(CreateRealtimeEndpointError::InternalServer(
2031                        err.msg,
2032                    ))
2033                }
2034                "InvalidInputException" => {
2035                    return RusotoError::Service(CreateRealtimeEndpointError::InvalidInput(err.msg))
2036                }
2037                "ResourceNotFoundException" => {
2038                    return RusotoError::Service(CreateRealtimeEndpointError::ResourceNotFound(
2039                        err.msg,
2040                    ))
2041                }
2042                "ValidationException" => return RusotoError::Validation(err.msg),
2043                _ => {}
2044            }
2045        }
2046        RusotoError::Unknown(res)
2047    }
2048}
2049impl fmt::Display for CreateRealtimeEndpointError {
2050    #[allow(unused_variables)]
2051    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2052        match *self {
2053            CreateRealtimeEndpointError::InternalServer(ref cause) => write!(f, "{}", cause),
2054            CreateRealtimeEndpointError::InvalidInput(ref cause) => write!(f, "{}", cause),
2055            CreateRealtimeEndpointError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2056        }
2057    }
2058}
2059impl Error for CreateRealtimeEndpointError {}
2060/// Errors returned by DeleteBatchPrediction
2061#[derive(Debug, PartialEq)]
2062pub enum DeleteBatchPredictionError {
2063    /// <p>An error on the server occurred when trying to process a request.</p>
2064    InternalServer(String),
2065    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2066    InvalidInput(String),
2067    /// <p>A specified resource cannot be located.</p>
2068    ResourceNotFound(String),
2069}
2070
2071impl DeleteBatchPredictionError {
2072    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteBatchPredictionError> {
2073        if let Some(err) = proto::json::Error::parse(&res) {
2074            match err.typ.as_str() {
2075                "InternalServerException" => {
2076                    return RusotoError::Service(DeleteBatchPredictionError::InternalServer(
2077                        err.msg,
2078                    ))
2079                }
2080                "InvalidInputException" => {
2081                    return RusotoError::Service(DeleteBatchPredictionError::InvalidInput(err.msg))
2082                }
2083                "ResourceNotFoundException" => {
2084                    return RusotoError::Service(DeleteBatchPredictionError::ResourceNotFound(
2085                        err.msg,
2086                    ))
2087                }
2088                "ValidationException" => return RusotoError::Validation(err.msg),
2089                _ => {}
2090            }
2091        }
2092        RusotoError::Unknown(res)
2093    }
2094}
2095impl fmt::Display for DeleteBatchPredictionError {
2096    #[allow(unused_variables)]
2097    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2098        match *self {
2099            DeleteBatchPredictionError::InternalServer(ref cause) => write!(f, "{}", cause),
2100            DeleteBatchPredictionError::InvalidInput(ref cause) => write!(f, "{}", cause),
2101            DeleteBatchPredictionError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2102        }
2103    }
2104}
2105impl Error for DeleteBatchPredictionError {}
2106/// Errors returned by DeleteDataSource
2107#[derive(Debug, PartialEq)]
2108pub enum DeleteDataSourceError {
2109    /// <p>An error on the server occurred when trying to process a request.</p>
2110    InternalServer(String),
2111    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2112    InvalidInput(String),
2113    /// <p>A specified resource cannot be located.</p>
2114    ResourceNotFound(String),
2115}
2116
2117impl DeleteDataSourceError {
2118    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteDataSourceError> {
2119        if let Some(err) = proto::json::Error::parse(&res) {
2120            match err.typ.as_str() {
2121                "InternalServerException" => {
2122                    return RusotoError::Service(DeleteDataSourceError::InternalServer(err.msg))
2123                }
2124                "InvalidInputException" => {
2125                    return RusotoError::Service(DeleteDataSourceError::InvalidInput(err.msg))
2126                }
2127                "ResourceNotFoundException" => {
2128                    return RusotoError::Service(DeleteDataSourceError::ResourceNotFound(err.msg))
2129                }
2130                "ValidationException" => return RusotoError::Validation(err.msg),
2131                _ => {}
2132            }
2133        }
2134        RusotoError::Unknown(res)
2135    }
2136}
2137impl fmt::Display for DeleteDataSourceError {
2138    #[allow(unused_variables)]
2139    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2140        match *self {
2141            DeleteDataSourceError::InternalServer(ref cause) => write!(f, "{}", cause),
2142            DeleteDataSourceError::InvalidInput(ref cause) => write!(f, "{}", cause),
2143            DeleteDataSourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2144        }
2145    }
2146}
2147impl Error for DeleteDataSourceError {}
2148/// Errors returned by DeleteEvaluation
2149#[derive(Debug, PartialEq)]
2150pub enum DeleteEvaluationError {
2151    /// <p>An error on the server occurred when trying to process a request.</p>
2152    InternalServer(String),
2153    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2154    InvalidInput(String),
2155    /// <p>A specified resource cannot be located.</p>
2156    ResourceNotFound(String),
2157}
2158
2159impl DeleteEvaluationError {
2160    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteEvaluationError> {
2161        if let Some(err) = proto::json::Error::parse(&res) {
2162            match err.typ.as_str() {
2163                "InternalServerException" => {
2164                    return RusotoError::Service(DeleteEvaluationError::InternalServer(err.msg))
2165                }
2166                "InvalidInputException" => {
2167                    return RusotoError::Service(DeleteEvaluationError::InvalidInput(err.msg))
2168                }
2169                "ResourceNotFoundException" => {
2170                    return RusotoError::Service(DeleteEvaluationError::ResourceNotFound(err.msg))
2171                }
2172                "ValidationException" => return RusotoError::Validation(err.msg),
2173                _ => {}
2174            }
2175        }
2176        RusotoError::Unknown(res)
2177    }
2178}
2179impl fmt::Display for DeleteEvaluationError {
2180    #[allow(unused_variables)]
2181    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2182        match *self {
2183            DeleteEvaluationError::InternalServer(ref cause) => write!(f, "{}", cause),
2184            DeleteEvaluationError::InvalidInput(ref cause) => write!(f, "{}", cause),
2185            DeleteEvaluationError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2186        }
2187    }
2188}
2189impl Error for DeleteEvaluationError {}
2190/// Errors returned by DeleteMLModel
2191#[derive(Debug, PartialEq)]
2192pub enum DeleteMLModelError {
2193    /// <p>An error on the server occurred when trying to process a request.</p>
2194    InternalServer(String),
2195    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2196    InvalidInput(String),
2197    /// <p>A specified resource cannot be located.</p>
2198    ResourceNotFound(String),
2199}
2200
2201impl DeleteMLModelError {
2202    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteMLModelError> {
2203        if let Some(err) = proto::json::Error::parse(&res) {
2204            match err.typ.as_str() {
2205                "InternalServerException" => {
2206                    return RusotoError::Service(DeleteMLModelError::InternalServer(err.msg))
2207                }
2208                "InvalidInputException" => {
2209                    return RusotoError::Service(DeleteMLModelError::InvalidInput(err.msg))
2210                }
2211                "ResourceNotFoundException" => {
2212                    return RusotoError::Service(DeleteMLModelError::ResourceNotFound(err.msg))
2213                }
2214                "ValidationException" => return RusotoError::Validation(err.msg),
2215                _ => {}
2216            }
2217        }
2218        RusotoError::Unknown(res)
2219    }
2220}
2221impl fmt::Display for DeleteMLModelError {
2222    #[allow(unused_variables)]
2223    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2224        match *self {
2225            DeleteMLModelError::InternalServer(ref cause) => write!(f, "{}", cause),
2226            DeleteMLModelError::InvalidInput(ref cause) => write!(f, "{}", cause),
2227            DeleteMLModelError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2228        }
2229    }
2230}
2231impl Error for DeleteMLModelError {}
2232/// Errors returned by DeleteRealtimeEndpoint
2233#[derive(Debug, PartialEq)]
2234pub enum DeleteRealtimeEndpointError {
2235    /// <p>An error on the server occurred when trying to process a request.</p>
2236    InternalServer(String),
2237    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2238    InvalidInput(String),
2239    /// <p>A specified resource cannot be located.</p>
2240    ResourceNotFound(String),
2241}
2242
2243impl DeleteRealtimeEndpointError {
2244    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteRealtimeEndpointError> {
2245        if let Some(err) = proto::json::Error::parse(&res) {
2246            match err.typ.as_str() {
2247                "InternalServerException" => {
2248                    return RusotoError::Service(DeleteRealtimeEndpointError::InternalServer(
2249                        err.msg,
2250                    ))
2251                }
2252                "InvalidInputException" => {
2253                    return RusotoError::Service(DeleteRealtimeEndpointError::InvalidInput(err.msg))
2254                }
2255                "ResourceNotFoundException" => {
2256                    return RusotoError::Service(DeleteRealtimeEndpointError::ResourceNotFound(
2257                        err.msg,
2258                    ))
2259                }
2260                "ValidationException" => return RusotoError::Validation(err.msg),
2261                _ => {}
2262            }
2263        }
2264        RusotoError::Unknown(res)
2265    }
2266}
2267impl fmt::Display for DeleteRealtimeEndpointError {
2268    #[allow(unused_variables)]
2269    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2270        match *self {
2271            DeleteRealtimeEndpointError::InternalServer(ref cause) => write!(f, "{}", cause),
2272            DeleteRealtimeEndpointError::InvalidInput(ref cause) => write!(f, "{}", cause),
2273            DeleteRealtimeEndpointError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2274        }
2275    }
2276}
2277impl Error for DeleteRealtimeEndpointError {}
2278/// Errors returned by DeleteTags
2279#[derive(Debug, PartialEq)]
2280pub enum DeleteTagsError {
2281    /// <p>An error on the server occurred when trying to process a request.</p>
2282    InternalServer(String),
2283    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2284    InvalidInput(String),
2285
2286    InvalidTag(String),
2287    /// <p>A specified resource cannot be located.</p>
2288    ResourceNotFound(String),
2289}
2290
2291impl DeleteTagsError {
2292    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteTagsError> {
2293        if let Some(err) = proto::json::Error::parse(&res) {
2294            match err.typ.as_str() {
2295                "InternalServerException" => {
2296                    return RusotoError::Service(DeleteTagsError::InternalServer(err.msg))
2297                }
2298                "InvalidInputException" => {
2299                    return RusotoError::Service(DeleteTagsError::InvalidInput(err.msg))
2300                }
2301                "InvalidTagException" => {
2302                    return RusotoError::Service(DeleteTagsError::InvalidTag(err.msg))
2303                }
2304                "ResourceNotFoundException" => {
2305                    return RusotoError::Service(DeleteTagsError::ResourceNotFound(err.msg))
2306                }
2307                "ValidationException" => return RusotoError::Validation(err.msg),
2308                _ => {}
2309            }
2310        }
2311        RusotoError::Unknown(res)
2312    }
2313}
2314impl fmt::Display for DeleteTagsError {
2315    #[allow(unused_variables)]
2316    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2317        match *self {
2318            DeleteTagsError::InternalServer(ref cause) => write!(f, "{}", cause),
2319            DeleteTagsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2320            DeleteTagsError::InvalidTag(ref cause) => write!(f, "{}", cause),
2321            DeleteTagsError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2322        }
2323    }
2324}
2325impl Error for DeleteTagsError {}
2326/// Errors returned by DescribeBatchPredictions
2327#[derive(Debug, PartialEq)]
2328pub enum DescribeBatchPredictionsError {
2329    /// <p>An error on the server occurred when trying to process a request.</p>
2330    InternalServer(String),
2331    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2332    InvalidInput(String),
2333}
2334
2335impl DescribeBatchPredictionsError {
2336    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeBatchPredictionsError> {
2337        if let Some(err) = proto::json::Error::parse(&res) {
2338            match err.typ.as_str() {
2339                "InternalServerException" => {
2340                    return RusotoError::Service(DescribeBatchPredictionsError::InternalServer(
2341                        err.msg,
2342                    ))
2343                }
2344                "InvalidInputException" => {
2345                    return RusotoError::Service(DescribeBatchPredictionsError::InvalidInput(
2346                        err.msg,
2347                    ))
2348                }
2349                "ValidationException" => return RusotoError::Validation(err.msg),
2350                _ => {}
2351            }
2352        }
2353        RusotoError::Unknown(res)
2354    }
2355}
2356impl fmt::Display for DescribeBatchPredictionsError {
2357    #[allow(unused_variables)]
2358    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2359        match *self {
2360            DescribeBatchPredictionsError::InternalServer(ref cause) => write!(f, "{}", cause),
2361            DescribeBatchPredictionsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2362        }
2363    }
2364}
2365impl Error for DescribeBatchPredictionsError {}
2366/// Errors returned by DescribeDataSources
2367#[derive(Debug, PartialEq)]
2368pub enum DescribeDataSourcesError {
2369    /// <p>An error on the server occurred when trying to process a request.</p>
2370    InternalServer(String),
2371    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2372    InvalidInput(String),
2373}
2374
2375impl DescribeDataSourcesError {
2376    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeDataSourcesError> {
2377        if let Some(err) = proto::json::Error::parse(&res) {
2378            match err.typ.as_str() {
2379                "InternalServerException" => {
2380                    return RusotoError::Service(DescribeDataSourcesError::InternalServer(err.msg))
2381                }
2382                "InvalidInputException" => {
2383                    return RusotoError::Service(DescribeDataSourcesError::InvalidInput(err.msg))
2384                }
2385                "ValidationException" => return RusotoError::Validation(err.msg),
2386                _ => {}
2387            }
2388        }
2389        RusotoError::Unknown(res)
2390    }
2391}
2392impl fmt::Display for DescribeDataSourcesError {
2393    #[allow(unused_variables)]
2394    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2395        match *self {
2396            DescribeDataSourcesError::InternalServer(ref cause) => write!(f, "{}", cause),
2397            DescribeDataSourcesError::InvalidInput(ref cause) => write!(f, "{}", cause),
2398        }
2399    }
2400}
2401impl Error for DescribeDataSourcesError {}
2402/// Errors returned by DescribeEvaluations
2403#[derive(Debug, PartialEq)]
2404pub enum DescribeEvaluationsError {
2405    /// <p>An error on the server occurred when trying to process a request.</p>
2406    InternalServer(String),
2407    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2408    InvalidInput(String),
2409}
2410
2411impl DescribeEvaluationsError {
2412    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeEvaluationsError> {
2413        if let Some(err) = proto::json::Error::parse(&res) {
2414            match err.typ.as_str() {
2415                "InternalServerException" => {
2416                    return RusotoError::Service(DescribeEvaluationsError::InternalServer(err.msg))
2417                }
2418                "InvalidInputException" => {
2419                    return RusotoError::Service(DescribeEvaluationsError::InvalidInput(err.msg))
2420                }
2421                "ValidationException" => return RusotoError::Validation(err.msg),
2422                _ => {}
2423            }
2424        }
2425        RusotoError::Unknown(res)
2426    }
2427}
2428impl fmt::Display for DescribeEvaluationsError {
2429    #[allow(unused_variables)]
2430    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2431        match *self {
2432            DescribeEvaluationsError::InternalServer(ref cause) => write!(f, "{}", cause),
2433            DescribeEvaluationsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2434        }
2435    }
2436}
2437impl Error for DescribeEvaluationsError {}
2438/// Errors returned by DescribeMLModels
2439#[derive(Debug, PartialEq)]
2440pub enum DescribeMLModelsError {
2441    /// <p>An error on the server occurred when trying to process a request.</p>
2442    InternalServer(String),
2443    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2444    InvalidInput(String),
2445}
2446
2447impl DescribeMLModelsError {
2448    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeMLModelsError> {
2449        if let Some(err) = proto::json::Error::parse(&res) {
2450            match err.typ.as_str() {
2451                "InternalServerException" => {
2452                    return RusotoError::Service(DescribeMLModelsError::InternalServer(err.msg))
2453                }
2454                "InvalidInputException" => {
2455                    return RusotoError::Service(DescribeMLModelsError::InvalidInput(err.msg))
2456                }
2457                "ValidationException" => return RusotoError::Validation(err.msg),
2458                _ => {}
2459            }
2460        }
2461        RusotoError::Unknown(res)
2462    }
2463}
2464impl fmt::Display for DescribeMLModelsError {
2465    #[allow(unused_variables)]
2466    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2467        match *self {
2468            DescribeMLModelsError::InternalServer(ref cause) => write!(f, "{}", cause),
2469            DescribeMLModelsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2470        }
2471    }
2472}
2473impl Error for DescribeMLModelsError {}
2474/// Errors returned by DescribeTags
2475#[derive(Debug, PartialEq)]
2476pub enum DescribeTagsError {
2477    /// <p>An error on the server occurred when trying to process a request.</p>
2478    InternalServer(String),
2479    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2480    InvalidInput(String),
2481    /// <p>A specified resource cannot be located.</p>
2482    ResourceNotFound(String),
2483}
2484
2485impl DescribeTagsError {
2486    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeTagsError> {
2487        if let Some(err) = proto::json::Error::parse(&res) {
2488            match err.typ.as_str() {
2489                "InternalServerException" => {
2490                    return RusotoError::Service(DescribeTagsError::InternalServer(err.msg))
2491                }
2492                "InvalidInputException" => {
2493                    return RusotoError::Service(DescribeTagsError::InvalidInput(err.msg))
2494                }
2495                "ResourceNotFoundException" => {
2496                    return RusotoError::Service(DescribeTagsError::ResourceNotFound(err.msg))
2497                }
2498                "ValidationException" => return RusotoError::Validation(err.msg),
2499                _ => {}
2500            }
2501        }
2502        RusotoError::Unknown(res)
2503    }
2504}
2505impl fmt::Display for DescribeTagsError {
2506    #[allow(unused_variables)]
2507    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2508        match *self {
2509            DescribeTagsError::InternalServer(ref cause) => write!(f, "{}", cause),
2510            DescribeTagsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2511            DescribeTagsError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2512        }
2513    }
2514}
2515impl Error for DescribeTagsError {}
2516/// Errors returned by GetBatchPrediction
2517#[derive(Debug, PartialEq)]
2518pub enum GetBatchPredictionError {
2519    /// <p>An error on the server occurred when trying to process a request.</p>
2520    InternalServer(String),
2521    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2522    InvalidInput(String),
2523    /// <p>A specified resource cannot be located.</p>
2524    ResourceNotFound(String),
2525}
2526
2527impl GetBatchPredictionError {
2528    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<GetBatchPredictionError> {
2529        if let Some(err) = proto::json::Error::parse(&res) {
2530            match err.typ.as_str() {
2531                "InternalServerException" => {
2532                    return RusotoError::Service(GetBatchPredictionError::InternalServer(err.msg))
2533                }
2534                "InvalidInputException" => {
2535                    return RusotoError::Service(GetBatchPredictionError::InvalidInput(err.msg))
2536                }
2537                "ResourceNotFoundException" => {
2538                    return RusotoError::Service(GetBatchPredictionError::ResourceNotFound(err.msg))
2539                }
2540                "ValidationException" => return RusotoError::Validation(err.msg),
2541                _ => {}
2542            }
2543        }
2544        RusotoError::Unknown(res)
2545    }
2546}
2547impl fmt::Display for GetBatchPredictionError {
2548    #[allow(unused_variables)]
2549    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2550        match *self {
2551            GetBatchPredictionError::InternalServer(ref cause) => write!(f, "{}", cause),
2552            GetBatchPredictionError::InvalidInput(ref cause) => write!(f, "{}", cause),
2553            GetBatchPredictionError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2554        }
2555    }
2556}
2557impl Error for GetBatchPredictionError {}
2558/// Errors returned by GetDataSource
2559#[derive(Debug, PartialEq)]
2560pub enum GetDataSourceError {
2561    /// <p>An error on the server occurred when trying to process a request.</p>
2562    InternalServer(String),
2563    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2564    InvalidInput(String),
2565    /// <p>A specified resource cannot be located.</p>
2566    ResourceNotFound(String),
2567}
2568
2569impl GetDataSourceError {
2570    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<GetDataSourceError> {
2571        if let Some(err) = proto::json::Error::parse(&res) {
2572            match err.typ.as_str() {
2573                "InternalServerException" => {
2574                    return RusotoError::Service(GetDataSourceError::InternalServer(err.msg))
2575                }
2576                "InvalidInputException" => {
2577                    return RusotoError::Service(GetDataSourceError::InvalidInput(err.msg))
2578                }
2579                "ResourceNotFoundException" => {
2580                    return RusotoError::Service(GetDataSourceError::ResourceNotFound(err.msg))
2581                }
2582                "ValidationException" => return RusotoError::Validation(err.msg),
2583                _ => {}
2584            }
2585        }
2586        RusotoError::Unknown(res)
2587    }
2588}
2589impl fmt::Display for GetDataSourceError {
2590    #[allow(unused_variables)]
2591    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2592        match *self {
2593            GetDataSourceError::InternalServer(ref cause) => write!(f, "{}", cause),
2594            GetDataSourceError::InvalidInput(ref cause) => write!(f, "{}", cause),
2595            GetDataSourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2596        }
2597    }
2598}
2599impl Error for GetDataSourceError {}
2600/// Errors returned by GetEvaluation
2601#[derive(Debug, PartialEq)]
2602pub enum GetEvaluationError {
2603    /// <p>An error on the server occurred when trying to process a request.</p>
2604    InternalServer(String),
2605    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2606    InvalidInput(String),
2607    /// <p>A specified resource cannot be located.</p>
2608    ResourceNotFound(String),
2609}
2610
2611impl GetEvaluationError {
2612    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<GetEvaluationError> {
2613        if let Some(err) = proto::json::Error::parse(&res) {
2614            match err.typ.as_str() {
2615                "InternalServerException" => {
2616                    return RusotoError::Service(GetEvaluationError::InternalServer(err.msg))
2617                }
2618                "InvalidInputException" => {
2619                    return RusotoError::Service(GetEvaluationError::InvalidInput(err.msg))
2620                }
2621                "ResourceNotFoundException" => {
2622                    return RusotoError::Service(GetEvaluationError::ResourceNotFound(err.msg))
2623                }
2624                "ValidationException" => return RusotoError::Validation(err.msg),
2625                _ => {}
2626            }
2627        }
2628        RusotoError::Unknown(res)
2629    }
2630}
2631impl fmt::Display for GetEvaluationError {
2632    #[allow(unused_variables)]
2633    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2634        match *self {
2635            GetEvaluationError::InternalServer(ref cause) => write!(f, "{}", cause),
2636            GetEvaluationError::InvalidInput(ref cause) => write!(f, "{}", cause),
2637            GetEvaluationError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2638        }
2639    }
2640}
2641impl Error for GetEvaluationError {}
2642/// Errors returned by GetMLModel
2643#[derive(Debug, PartialEq)]
2644pub enum GetMLModelError {
2645    /// <p>An error on the server occurred when trying to process a request.</p>
2646    InternalServer(String),
2647    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2648    InvalidInput(String),
2649    /// <p>A specified resource cannot be located.</p>
2650    ResourceNotFound(String),
2651}
2652
2653impl GetMLModelError {
2654    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<GetMLModelError> {
2655        if let Some(err) = proto::json::Error::parse(&res) {
2656            match err.typ.as_str() {
2657                "InternalServerException" => {
2658                    return RusotoError::Service(GetMLModelError::InternalServer(err.msg))
2659                }
2660                "InvalidInputException" => {
2661                    return RusotoError::Service(GetMLModelError::InvalidInput(err.msg))
2662                }
2663                "ResourceNotFoundException" => {
2664                    return RusotoError::Service(GetMLModelError::ResourceNotFound(err.msg))
2665                }
2666                "ValidationException" => return RusotoError::Validation(err.msg),
2667                _ => {}
2668            }
2669        }
2670        RusotoError::Unknown(res)
2671    }
2672}
2673impl fmt::Display for GetMLModelError {
2674    #[allow(unused_variables)]
2675    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2676        match *self {
2677            GetMLModelError::InternalServer(ref cause) => write!(f, "{}", cause),
2678            GetMLModelError::InvalidInput(ref cause) => write!(f, "{}", cause),
2679            GetMLModelError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2680        }
2681    }
2682}
2683impl Error for GetMLModelError {}
2684/// Errors returned by Predict
2685#[derive(Debug, PartialEq)]
2686pub enum PredictError {
2687    /// <p>An error on the server occurred when trying to process a request.</p>
2688    InternalServer(String),
2689    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2690    InvalidInput(String),
2691    /// <p>The subscriber exceeded the maximum number of operations. This exception can occur when listing objects such as <code>DataSource</code>.</p>
2692    LimitExceeded(String),
2693    /// <p>The exception is thrown when a predict request is made to an unmounted <code>MLModel</code>.</p>
2694    PredictorNotMounted(String),
2695    /// <p>A specified resource cannot be located.</p>
2696    ResourceNotFound(String),
2697}
2698
2699impl PredictError {
2700    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<PredictError> {
2701        if let Some(err) = proto::json::Error::parse(&res) {
2702            match err.typ.as_str() {
2703                "InternalServerException" => {
2704                    return RusotoError::Service(PredictError::InternalServer(err.msg))
2705                }
2706                "InvalidInputException" => {
2707                    return RusotoError::Service(PredictError::InvalidInput(err.msg))
2708                }
2709                "LimitExceededException" => {
2710                    return RusotoError::Service(PredictError::LimitExceeded(err.msg))
2711                }
2712                "PredictorNotMountedException" => {
2713                    return RusotoError::Service(PredictError::PredictorNotMounted(err.msg))
2714                }
2715                "ResourceNotFoundException" => {
2716                    return RusotoError::Service(PredictError::ResourceNotFound(err.msg))
2717                }
2718                "ValidationException" => return RusotoError::Validation(err.msg),
2719                _ => {}
2720            }
2721        }
2722        RusotoError::Unknown(res)
2723    }
2724}
2725impl fmt::Display for PredictError {
2726    #[allow(unused_variables)]
2727    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2728        match *self {
2729            PredictError::InternalServer(ref cause) => write!(f, "{}", cause),
2730            PredictError::InvalidInput(ref cause) => write!(f, "{}", cause),
2731            PredictError::LimitExceeded(ref cause) => write!(f, "{}", cause),
2732            PredictError::PredictorNotMounted(ref cause) => write!(f, "{}", cause),
2733            PredictError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2734        }
2735    }
2736}
2737impl Error for PredictError {}
2738/// Errors returned by UpdateBatchPrediction
2739#[derive(Debug, PartialEq)]
2740pub enum UpdateBatchPredictionError {
2741    /// <p>An error on the server occurred when trying to process a request.</p>
2742    InternalServer(String),
2743    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2744    InvalidInput(String),
2745    /// <p>A specified resource cannot be located.</p>
2746    ResourceNotFound(String),
2747}
2748
2749impl UpdateBatchPredictionError {
2750    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateBatchPredictionError> {
2751        if let Some(err) = proto::json::Error::parse(&res) {
2752            match err.typ.as_str() {
2753                "InternalServerException" => {
2754                    return RusotoError::Service(UpdateBatchPredictionError::InternalServer(
2755                        err.msg,
2756                    ))
2757                }
2758                "InvalidInputException" => {
2759                    return RusotoError::Service(UpdateBatchPredictionError::InvalidInput(err.msg))
2760                }
2761                "ResourceNotFoundException" => {
2762                    return RusotoError::Service(UpdateBatchPredictionError::ResourceNotFound(
2763                        err.msg,
2764                    ))
2765                }
2766                "ValidationException" => return RusotoError::Validation(err.msg),
2767                _ => {}
2768            }
2769        }
2770        RusotoError::Unknown(res)
2771    }
2772}
2773impl fmt::Display for UpdateBatchPredictionError {
2774    #[allow(unused_variables)]
2775    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2776        match *self {
2777            UpdateBatchPredictionError::InternalServer(ref cause) => write!(f, "{}", cause),
2778            UpdateBatchPredictionError::InvalidInput(ref cause) => write!(f, "{}", cause),
2779            UpdateBatchPredictionError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2780        }
2781    }
2782}
2783impl Error for UpdateBatchPredictionError {}
2784/// Errors returned by UpdateDataSource
2785#[derive(Debug, PartialEq)]
2786pub enum UpdateDataSourceError {
2787    /// <p>An error on the server occurred when trying to process a request.</p>
2788    InternalServer(String),
2789    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2790    InvalidInput(String),
2791    /// <p>A specified resource cannot be located.</p>
2792    ResourceNotFound(String),
2793}
2794
2795impl UpdateDataSourceError {
2796    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateDataSourceError> {
2797        if let Some(err) = proto::json::Error::parse(&res) {
2798            match err.typ.as_str() {
2799                "InternalServerException" => {
2800                    return RusotoError::Service(UpdateDataSourceError::InternalServer(err.msg))
2801                }
2802                "InvalidInputException" => {
2803                    return RusotoError::Service(UpdateDataSourceError::InvalidInput(err.msg))
2804                }
2805                "ResourceNotFoundException" => {
2806                    return RusotoError::Service(UpdateDataSourceError::ResourceNotFound(err.msg))
2807                }
2808                "ValidationException" => return RusotoError::Validation(err.msg),
2809                _ => {}
2810            }
2811        }
2812        RusotoError::Unknown(res)
2813    }
2814}
2815impl fmt::Display for UpdateDataSourceError {
2816    #[allow(unused_variables)]
2817    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2818        match *self {
2819            UpdateDataSourceError::InternalServer(ref cause) => write!(f, "{}", cause),
2820            UpdateDataSourceError::InvalidInput(ref cause) => write!(f, "{}", cause),
2821            UpdateDataSourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2822        }
2823    }
2824}
2825impl Error for UpdateDataSourceError {}
2826/// Errors returned by UpdateEvaluation
2827#[derive(Debug, PartialEq)]
2828pub enum UpdateEvaluationError {
2829    /// <p>An error on the server occurred when trying to process a request.</p>
2830    InternalServer(String),
2831    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2832    InvalidInput(String),
2833    /// <p>A specified resource cannot be located.</p>
2834    ResourceNotFound(String),
2835}
2836
2837impl UpdateEvaluationError {
2838    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateEvaluationError> {
2839        if let Some(err) = proto::json::Error::parse(&res) {
2840            match err.typ.as_str() {
2841                "InternalServerException" => {
2842                    return RusotoError::Service(UpdateEvaluationError::InternalServer(err.msg))
2843                }
2844                "InvalidInputException" => {
2845                    return RusotoError::Service(UpdateEvaluationError::InvalidInput(err.msg))
2846                }
2847                "ResourceNotFoundException" => {
2848                    return RusotoError::Service(UpdateEvaluationError::ResourceNotFound(err.msg))
2849                }
2850                "ValidationException" => return RusotoError::Validation(err.msg),
2851                _ => {}
2852            }
2853        }
2854        RusotoError::Unknown(res)
2855    }
2856}
2857impl fmt::Display for UpdateEvaluationError {
2858    #[allow(unused_variables)]
2859    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2860        match *self {
2861            UpdateEvaluationError::InternalServer(ref cause) => write!(f, "{}", cause),
2862            UpdateEvaluationError::InvalidInput(ref cause) => write!(f, "{}", cause),
2863            UpdateEvaluationError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2864        }
2865    }
2866}
2867impl Error for UpdateEvaluationError {}
2868/// Errors returned by UpdateMLModel
2869#[derive(Debug, PartialEq)]
2870pub enum UpdateMLModelError {
2871    /// <p>An error on the server occurred when trying to process a request.</p>
2872    InternalServer(String),
2873    /// <p>An error on the client occurred. Typically, the cause is an invalid input value.</p>
2874    InvalidInput(String),
2875    /// <p>A specified resource cannot be located.</p>
2876    ResourceNotFound(String),
2877}
2878
2879impl UpdateMLModelError {
2880    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateMLModelError> {
2881        if let Some(err) = proto::json::Error::parse(&res) {
2882            match err.typ.as_str() {
2883                "InternalServerException" => {
2884                    return RusotoError::Service(UpdateMLModelError::InternalServer(err.msg))
2885                }
2886                "InvalidInputException" => {
2887                    return RusotoError::Service(UpdateMLModelError::InvalidInput(err.msg))
2888                }
2889                "ResourceNotFoundException" => {
2890                    return RusotoError::Service(UpdateMLModelError::ResourceNotFound(err.msg))
2891                }
2892                "ValidationException" => return RusotoError::Validation(err.msg),
2893                _ => {}
2894            }
2895        }
2896        RusotoError::Unknown(res)
2897    }
2898}
2899impl fmt::Display for UpdateMLModelError {
2900    #[allow(unused_variables)]
2901    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2902        match *self {
2903            UpdateMLModelError::InternalServer(ref cause) => write!(f, "{}", cause),
2904            UpdateMLModelError::InvalidInput(ref cause) => write!(f, "{}", cause),
2905            UpdateMLModelError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2906        }
2907    }
2908}
2909impl Error for UpdateMLModelError {}
2910/// Trait representing the capabilities of the Amazon Machine Learning API. Amazon Machine Learning clients implement this trait.
2911#[async_trait]
2912pub trait MachineLearning {
2913    /// <p>Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, <code>AddTags</code> updates the tag's value.</p>
2914    async fn add_tags(
2915        &self,
2916        input: AddTagsInput,
2917    ) -> Result<AddTagsOutput, RusotoError<AddTagsError>>;
2918
2919    /// <p>Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a <code>DataSource</code>. This operation creates a new <code>BatchPrediction</code>, and uses an <code>MLModel</code> and the data files referenced by the <code>DataSource</code> as information sources. </p> <p><code>CreateBatchPrediction</code> is an asynchronous operation. In response to <code>CreateBatchPrediction</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>BatchPrediction</code> status to <code>PENDING</code>. After the <code>BatchPrediction</code> completes, Amazon ML sets the status to <code>COMPLETED</code>. </p> <p>You can poll for status updates by using the <a>GetBatchPrediction</a> operation and checking the <code>Status</code> parameter of the result. After the <code>COMPLETED</code> status appears, the results are available in the location specified by the <code>OutputUri</code> parameter.</p>
2920    async fn create_batch_prediction(
2921        &self,
2922        input: CreateBatchPredictionInput,
2923    ) -> Result<CreateBatchPredictionOutput, RusotoError<CreateBatchPredictionError>>;
2924
2925    /// <p>Creates a <code>DataSource</code> object from an <a href="http://aws.amazon.com/rds/"> Amazon Relational Database Service</a> (Amazon RDS). A <code>DataSource</code> references data that can be used to perform <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p> <p><code>CreateDataSourceFromRDS</code> is an asynchronous operation. In response to <code>CreateDataSourceFromRDS</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>DataSource</code> status to <code>PENDING</code>. After the <code>DataSource</code> is created and ready for use, Amazon ML sets the <code>Status</code> parameter to <code>COMPLETED</code>. <code>DataSource</code> in the <code>COMPLETED</code> or <code>PENDING</code> state can be used only to perform <code>&gt;CreateMLModel</code>&gt;, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations. </p> <p> If Amazon ML cannot accept the input source, it sets the <code>Status</code> parameter to <code>FAILED</code> and includes an error message in the <code>Message</code> attribute of the <code>GetDataSource</code> operation response. </p>
2926    async fn create_data_source_from_rds(
2927        &self,
2928        input: CreateDataSourceFromRDSInput,
2929    ) -> Result<CreateDataSourceFromRDSOutput, RusotoError<CreateDataSourceFromRDSError>>;
2930
2931    /// <p><p>Creates a <code>DataSource</code> from a database hosted on an Amazon Redshift cluster. A <code>DataSource</code> references data that can be used to perform either <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p> <p><code>CreateDataSourceFromRedshift</code> is an asynchronous operation. In response to <code>CreateDataSourceFromRedshift</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>DataSource</code> status to <code>PENDING</code>. After the <code>DataSource</code> is created and ready for use, Amazon ML sets the <code>Status</code> parameter to <code>COMPLETED</code>. <code>DataSource</code> in <code>COMPLETED</code> or <code>PENDING</code> states can be used to perform only <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations. </p> <p> If Amazon ML can&#39;t accept the input source, it sets the <code>Status</code> parameter to <code>FAILED</code> and includes an error message in the <code>Message</code> attribute of the <code>GetDataSource</code> operation response. </p> <p>The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a <code>SelectSqlQuery</code> query. Amazon ML executes an <code>Unload</code> command in Amazon Redshift to transfer the result set of the <code>SelectSqlQuery</code> query to <code>S3StagingLocation</code>.</p> <p>After the <code>DataSource</code> has been created, it&#39;s ready for use in evaluations and batch predictions. If you plan to use the <code>DataSource</code> to train an <code>MLModel</code>, the <code>DataSource</code> also requires a recipe. A recipe describes how each input variable will be used in training an <code>MLModel</code>. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.</p> &lt;?oxy<em>insert</em>start author=&quot;laurama&quot; timestamp=&quot;20160406T153842-0700&quot;&gt;<p>You can&#39;t change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call <code>GetDataSource</code> for an existing datasource and copy the values to a <code>CreateDataSource</code> call. Change the settings that you want to change and make sure that all required fields have the appropriate values.</p> &lt;?oxy<em>insert</em>end&gt;</p>
2932    async fn create_data_source_from_redshift(
2933        &self,
2934        input: CreateDataSourceFromRedshiftInput,
2935    ) -> Result<CreateDataSourceFromRedshiftOutput, RusotoError<CreateDataSourceFromRedshiftError>>;
2936
2937    /// <p>Creates a <code>DataSource</code> object. A <code>DataSource</code> references data that can be used to perform <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p> <p><code>CreateDataSourceFromS3</code> is an asynchronous operation. In response to <code>CreateDataSourceFromS3</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>DataSource</code> status to <code>PENDING</code>. After the <code>DataSource</code> has been created and is ready for use, Amazon ML sets the <code>Status</code> parameter to <code>COMPLETED</code>. <code>DataSource</code> in the <code>COMPLETED</code> or <code>PENDING</code> state can be used to perform only <code>CreateMLModel</code>, <code>CreateEvaluation</code> or <code>CreateBatchPrediction</code> operations. </p> <p> If Amazon ML can't accept the input source, it sets the <code>Status</code> parameter to <code>FAILED</code> and includes an error message in the <code>Message</code> attribute of the <code>GetDataSource</code> operation response. </p> <p>The observation data used in a <code>DataSource</code> should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the <code>DataSource</code>. </p> <p>After the <code>DataSource</code> has been created, it's ready to use in evaluations and batch predictions. If you plan to use the <code>DataSource</code> to train an <code>MLModel</code>, the <code>DataSource</code> also needs a recipe. A recipe describes how each input variable will be used in training an <code>MLModel</code>. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.</p>
2938    async fn create_data_source_from_s3(
2939        &self,
2940        input: CreateDataSourceFromS3Input,
2941    ) -> Result<CreateDataSourceFromS3Output, RusotoError<CreateDataSourceFromS3Error>>;
2942
2943    /// <p>Creates a new <code>Evaluation</code> of an <code>MLModel</code>. An <code>MLModel</code> is evaluated on a set of observations associated to a <code>DataSource</code>. Like a <code>DataSource</code> for an <code>MLModel</code>, the <code>DataSource</code> for an <code>Evaluation</code> contains values for the <code>Target Variable</code>. The <code>Evaluation</code> compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the <code>MLModel</code> functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding <code>MLModelType</code>: <code>BINARY</code>, <code>REGRESSION</code> or <code>MULTICLASS</code>. </p> <p><code>CreateEvaluation</code> is an asynchronous operation. In response to <code>CreateEvaluation</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to <code>PENDING</code>. After the <code>Evaluation</code> is created and ready for use, Amazon ML sets the status to <code>COMPLETED</code>. </p> <p>You can use the <code>GetEvaluation</code> operation to check progress of the evaluation during the creation operation.</p>
2944    async fn create_evaluation(
2945        &self,
2946        input: CreateEvaluationInput,
2947    ) -> Result<CreateEvaluationOutput, RusotoError<CreateEvaluationError>>;
2948
2949    /// <p>Creates a new <code>MLModel</code> using the <code>DataSource</code> and the recipe as information sources. </p> <p>An <code>MLModel</code> is nearly immutable. Users can update only the <code>MLModelName</code> and the <code>ScoreThreshold</code> in an <code>MLModel</code> without creating a new <code>MLModel</code>. </p> <p><code>CreateMLModel</code> is an asynchronous operation. In response to <code>CreateMLModel</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>MLModel</code> status to <code>PENDING</code>. After the <code>MLModel</code> has been created and ready is for use, Amazon ML sets the status to <code>COMPLETED</code>. </p> <p>You can use the <code>GetMLModel</code> operation to check the progress of the <code>MLModel</code> during the creation operation.</p> <p> <code>CreateMLModel</code> requires a <code>DataSource</code> with computed statistics, which can be created by setting <code>ComputeStatistics</code> to <code>true</code> in <code>CreateDataSourceFromRDS</code>, <code>CreateDataSourceFromS3</code>, or <code>CreateDataSourceFromRedshift</code> operations. </p>
2950    async fn create_ml_model(
2951        &self,
2952        input: CreateMLModelInput,
2953    ) -> Result<CreateMLModelOutput, RusotoError<CreateMLModelError>>;
2954
2955    /// <p>Creates a real-time endpoint for the <code>MLModel</code>. The endpoint contains the URI of the <code>MLModel</code>; that is, the location to send real-time prediction requests for the specified <code>MLModel</code>.</p>
2956    async fn create_realtime_endpoint(
2957        &self,
2958        input: CreateRealtimeEndpointInput,
2959    ) -> Result<CreateRealtimeEndpointOutput, RusotoError<CreateRealtimeEndpointError>>;
2960
2961    /// <p>Assigns the DELETED status to a <code>BatchPrediction</code>, rendering it unusable.</p> <p>After using the <code>DeleteBatchPrediction</code> operation, you can use the <a>GetBatchPrediction</a> operation to verify that the status of the <code>BatchPrediction</code> changed to DELETED.</p> <p><b>Caution:</b> The result of the <code>DeleteBatchPrediction</code> operation is irreversible.</p>
2962    async fn delete_batch_prediction(
2963        &self,
2964        input: DeleteBatchPredictionInput,
2965    ) -> Result<DeleteBatchPredictionOutput, RusotoError<DeleteBatchPredictionError>>;
2966
2967    /// <p>Assigns the DELETED status to a <code>DataSource</code>, rendering it unusable.</p> <p>After using the <code>DeleteDataSource</code> operation, you can use the <a>GetDataSource</a> operation to verify that the status of the <code>DataSource</code> changed to DELETED.</p> <p><b>Caution:</b> The results of the <code>DeleteDataSource</code> operation are irreversible.</p>
2968    async fn delete_data_source(
2969        &self,
2970        input: DeleteDataSourceInput,
2971    ) -> Result<DeleteDataSourceOutput, RusotoError<DeleteDataSourceError>>;
2972
2973    /// <p><p>Assigns the <code>DELETED</code> status to an <code>Evaluation</code>, rendering it unusable.</p> <p>After invoking the <code>DeleteEvaluation</code> operation, you can use the <code>GetEvaluation</code> operation to verify that the status of the <code>Evaluation</code> changed to <code>DELETED</code>.</p> <caution><title>Caution</title> <p>The results of the <code>DeleteEvaluation</code> operation are irreversible.</p></caution></p>
2974    async fn delete_evaluation(
2975        &self,
2976        input: DeleteEvaluationInput,
2977    ) -> Result<DeleteEvaluationOutput, RusotoError<DeleteEvaluationError>>;
2978
2979    /// <p>Assigns the <code>DELETED</code> status to an <code>MLModel</code>, rendering it unusable.</p> <p>After using the <code>DeleteMLModel</code> operation, you can use the <code>GetMLModel</code> operation to verify that the status of the <code>MLModel</code> changed to DELETED.</p> <p><b>Caution:</b> The result of the <code>DeleteMLModel</code> operation is irreversible.</p>
2980    async fn delete_ml_model(
2981        &self,
2982        input: DeleteMLModelInput,
2983    ) -> Result<DeleteMLModelOutput, RusotoError<DeleteMLModelError>>;
2984
2985    /// <p>Deletes a real time endpoint of an <code>MLModel</code>.</p>
2986    async fn delete_realtime_endpoint(
2987        &self,
2988        input: DeleteRealtimeEndpointInput,
2989    ) -> Result<DeleteRealtimeEndpointOutput, RusotoError<DeleteRealtimeEndpointError>>;
2990
2991    /// <p>Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.</p> <p>If you specify a tag that doesn't exist, Amazon ML ignores it.</p>
2992    async fn delete_tags(
2993        &self,
2994        input: DeleteTagsInput,
2995    ) -> Result<DeleteTagsOutput, RusotoError<DeleteTagsError>>;
2996
2997    /// <p>Returns a list of <code>BatchPrediction</code> operations that match the search criteria in the request.</p>
2998    async fn describe_batch_predictions(
2999        &self,
3000        input: DescribeBatchPredictionsInput,
3001    ) -> Result<DescribeBatchPredictionsOutput, RusotoError<DescribeBatchPredictionsError>>;
3002
3003    /// <p>Returns a list of <code>DataSource</code> that match the search criteria in the request.</p>
3004    async fn describe_data_sources(
3005        &self,
3006        input: DescribeDataSourcesInput,
3007    ) -> Result<DescribeDataSourcesOutput, RusotoError<DescribeDataSourcesError>>;
3008
3009    /// <p>Returns a list of <code>DescribeEvaluations</code> that match the search criteria in the request.</p>
3010    async fn describe_evaluations(
3011        &self,
3012        input: DescribeEvaluationsInput,
3013    ) -> Result<DescribeEvaluationsOutput, RusotoError<DescribeEvaluationsError>>;
3014
3015    /// <p>Returns a list of <code>MLModel</code> that match the search criteria in the request.</p>
3016    async fn describe_ml_models(
3017        &self,
3018        input: DescribeMLModelsInput,
3019    ) -> Result<DescribeMLModelsOutput, RusotoError<DescribeMLModelsError>>;
3020
3021    /// <p>Describes one or more of the tags for your Amazon ML object.</p>
3022    async fn describe_tags(
3023        &self,
3024        input: DescribeTagsInput,
3025    ) -> Result<DescribeTagsOutput, RusotoError<DescribeTagsError>>;
3026
3027    /// <p>Returns a <code>BatchPrediction</code> that includes detailed metadata, status, and data file information for a <code>Batch Prediction</code> request.</p>
3028    async fn get_batch_prediction(
3029        &self,
3030        input: GetBatchPredictionInput,
3031    ) -> Result<GetBatchPredictionOutput, RusotoError<GetBatchPredictionError>>;
3032
3033    /// <p>Returns a <code>DataSource</code> that includes metadata and data file information, as well as the current status of the <code>DataSource</code>.</p> <p><code>GetDataSource</code> provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.</p>
3034    async fn get_data_source(
3035        &self,
3036        input: GetDataSourceInput,
3037    ) -> Result<GetDataSourceOutput, RusotoError<GetDataSourceError>>;
3038
3039    /// <p>Returns an <code>Evaluation</code> that includes metadata as well as the current status of the <code>Evaluation</code>.</p>
3040    async fn get_evaluation(
3041        &self,
3042        input: GetEvaluationInput,
3043    ) -> Result<GetEvaluationOutput, RusotoError<GetEvaluationError>>;
3044
3045    /// <p>Returns an <code>MLModel</code> that includes detailed metadata, data source information, and the current status of the <code>MLModel</code>.</p> <p><code>GetMLModel</code> provides results in normal or verbose format. </p>
3046    async fn get_ml_model(
3047        &self,
3048        input: GetMLModelInput,
3049    ) -> Result<GetMLModelOutput, RusotoError<GetMLModelError>>;
3050
3051    /// <p><p>Generates a prediction for the observation using the specified <code>ML Model</code>.</p> <note><title>Note</title> <p>Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.</p></note></p>
3052    async fn predict(
3053        &self,
3054        input: PredictInput,
3055    ) -> Result<PredictOutput, RusotoError<PredictError>>;
3056
3057    /// <p>Updates the <code>BatchPredictionName</code> of a <code>BatchPrediction</code>.</p> <p>You can use the <code>GetBatchPrediction</code> operation to view the contents of the updated data element.</p>
3058    async fn update_batch_prediction(
3059        &self,
3060        input: UpdateBatchPredictionInput,
3061    ) -> Result<UpdateBatchPredictionOutput, RusotoError<UpdateBatchPredictionError>>;
3062
3063    /// <p>Updates the <code>DataSourceName</code> of a <code>DataSource</code>.</p> <p>You can use the <code>GetDataSource</code> operation to view the contents of the updated data element.</p>
3064    async fn update_data_source(
3065        &self,
3066        input: UpdateDataSourceInput,
3067    ) -> Result<UpdateDataSourceOutput, RusotoError<UpdateDataSourceError>>;
3068
3069    /// <p>Updates the <code>EvaluationName</code> of an <code>Evaluation</code>.</p> <p>You can use the <code>GetEvaluation</code> operation to view the contents of the updated data element.</p>
3070    async fn update_evaluation(
3071        &self,
3072        input: UpdateEvaluationInput,
3073    ) -> Result<UpdateEvaluationOutput, RusotoError<UpdateEvaluationError>>;
3074
3075    /// <p>Updates the <code>MLModelName</code> and the <code>ScoreThreshold</code> of an <code>MLModel</code>.</p> <p>You can use the <code>GetMLModel</code> operation to view the contents of the updated data element.</p>
3076    async fn update_ml_model(
3077        &self,
3078        input: UpdateMLModelInput,
3079    ) -> Result<UpdateMLModelOutput, RusotoError<UpdateMLModelError>>;
3080}
3081/// A client for the Amazon Machine Learning API.
3082#[derive(Clone)]
3083pub struct MachineLearningClient {
3084    client: Client,
3085    region: region::Region,
3086}
3087
3088impl MachineLearningClient {
3089    /// Creates a client backed by the default tokio event loop.
3090    ///
3091    /// The client will use the default credentials provider and tls client.
3092    pub fn new(region: region::Region) -> MachineLearningClient {
3093        MachineLearningClient {
3094            client: Client::shared(),
3095            region,
3096        }
3097    }
3098
3099    pub fn new_with<P, D>(
3100        request_dispatcher: D,
3101        credentials_provider: P,
3102        region: region::Region,
3103    ) -> MachineLearningClient
3104    where
3105        P: ProvideAwsCredentials + Send + Sync + 'static,
3106        D: DispatchSignedRequest + Send + Sync + 'static,
3107    {
3108        MachineLearningClient {
3109            client: Client::new_with(credentials_provider, request_dispatcher),
3110            region,
3111        }
3112    }
3113
3114    pub fn new_with_client(client: Client, region: region::Region) -> MachineLearningClient {
3115        MachineLearningClient { client, region }
3116    }
3117}
3118
3119#[async_trait]
3120impl MachineLearning for MachineLearningClient {
3121    /// <p>Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, <code>AddTags</code> updates the tag's value.</p>
3122    async fn add_tags(
3123        &self,
3124        input: AddTagsInput,
3125    ) -> Result<AddTagsOutput, RusotoError<AddTagsError>> {
3126        let mut request = self.new_signed_request("POST", "/");
3127        request.add_header("x-amz-target", "AmazonML_20141212.AddTags");
3128        let encoded = serde_json::to_string(&input).unwrap();
3129        request.set_payload(Some(encoded));
3130
3131        let response = self
3132            .sign_and_dispatch(request, AddTagsError::from_response)
3133            .await?;
3134        let mut response = response;
3135        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3136        proto::json::ResponsePayload::new(&response).deserialize::<AddTagsOutput, _>()
3137    }
3138
3139    /// <p>Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a <code>DataSource</code>. This operation creates a new <code>BatchPrediction</code>, and uses an <code>MLModel</code> and the data files referenced by the <code>DataSource</code> as information sources. </p> <p><code>CreateBatchPrediction</code> is an asynchronous operation. In response to <code>CreateBatchPrediction</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>BatchPrediction</code> status to <code>PENDING</code>. After the <code>BatchPrediction</code> completes, Amazon ML sets the status to <code>COMPLETED</code>. </p> <p>You can poll for status updates by using the <a>GetBatchPrediction</a> operation and checking the <code>Status</code> parameter of the result. After the <code>COMPLETED</code> status appears, the results are available in the location specified by the <code>OutputUri</code> parameter.</p>
3140    async fn create_batch_prediction(
3141        &self,
3142        input: CreateBatchPredictionInput,
3143    ) -> Result<CreateBatchPredictionOutput, RusotoError<CreateBatchPredictionError>> {
3144        let mut request = self.new_signed_request("POST", "/");
3145        request.add_header("x-amz-target", "AmazonML_20141212.CreateBatchPrediction");
3146        let encoded = serde_json::to_string(&input).unwrap();
3147        request.set_payload(Some(encoded));
3148
3149        let response = self
3150            .sign_and_dispatch(request, CreateBatchPredictionError::from_response)
3151            .await?;
3152        let mut response = response;
3153        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3154        proto::json::ResponsePayload::new(&response).deserialize::<CreateBatchPredictionOutput, _>()
3155    }
3156
3157    /// <p>Creates a <code>DataSource</code> object from an <a href="http://aws.amazon.com/rds/"> Amazon Relational Database Service</a> (Amazon RDS). A <code>DataSource</code> references data that can be used to perform <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p> <p><code>CreateDataSourceFromRDS</code> is an asynchronous operation. In response to <code>CreateDataSourceFromRDS</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>DataSource</code> status to <code>PENDING</code>. After the <code>DataSource</code> is created and ready for use, Amazon ML sets the <code>Status</code> parameter to <code>COMPLETED</code>. <code>DataSource</code> in the <code>COMPLETED</code> or <code>PENDING</code> state can be used only to perform <code>&gt;CreateMLModel</code>&gt;, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations. </p> <p> If Amazon ML cannot accept the input source, it sets the <code>Status</code> parameter to <code>FAILED</code> and includes an error message in the <code>Message</code> attribute of the <code>GetDataSource</code> operation response. </p>
3158    async fn create_data_source_from_rds(
3159        &self,
3160        input: CreateDataSourceFromRDSInput,
3161    ) -> Result<CreateDataSourceFromRDSOutput, RusotoError<CreateDataSourceFromRDSError>> {
3162        let mut request = self.new_signed_request("POST", "/");
3163        request.add_header("x-amz-target", "AmazonML_20141212.CreateDataSourceFromRDS");
3164        let encoded = serde_json::to_string(&input).unwrap();
3165        request.set_payload(Some(encoded));
3166
3167        let response = self
3168            .sign_and_dispatch(request, CreateDataSourceFromRDSError::from_response)
3169            .await?;
3170        let mut response = response;
3171        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3172        proto::json::ResponsePayload::new(&response)
3173            .deserialize::<CreateDataSourceFromRDSOutput, _>()
3174    }
3175
3176    /// <p><p>Creates a <code>DataSource</code> from a database hosted on an Amazon Redshift cluster. A <code>DataSource</code> references data that can be used to perform either <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p> <p><code>CreateDataSourceFromRedshift</code> is an asynchronous operation. In response to <code>CreateDataSourceFromRedshift</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>DataSource</code> status to <code>PENDING</code>. After the <code>DataSource</code> is created and ready for use, Amazon ML sets the <code>Status</code> parameter to <code>COMPLETED</code>. <code>DataSource</code> in <code>COMPLETED</code> or <code>PENDING</code> states can be used to perform only <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations. </p> <p> If Amazon ML can&#39;t accept the input source, it sets the <code>Status</code> parameter to <code>FAILED</code> and includes an error message in the <code>Message</code> attribute of the <code>GetDataSource</code> operation response. </p> <p>The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a <code>SelectSqlQuery</code> query. Amazon ML executes an <code>Unload</code> command in Amazon Redshift to transfer the result set of the <code>SelectSqlQuery</code> query to <code>S3StagingLocation</code>.</p> <p>After the <code>DataSource</code> has been created, it&#39;s ready for use in evaluations and batch predictions. If you plan to use the <code>DataSource</code> to train an <code>MLModel</code>, the <code>DataSource</code> also requires a recipe. A recipe describes how each input variable will be used in training an <code>MLModel</code>. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.</p> &lt;?oxy<em>insert</em>start author=&quot;laurama&quot; timestamp=&quot;20160406T153842-0700&quot;&gt;<p>You can&#39;t change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call <code>GetDataSource</code> for an existing datasource and copy the values to a <code>CreateDataSource</code> call. Change the settings that you want to change and make sure that all required fields have the appropriate values.</p> &lt;?oxy<em>insert</em>end&gt;</p>
3177    async fn create_data_source_from_redshift(
3178        &self,
3179        input: CreateDataSourceFromRedshiftInput,
3180    ) -> Result<CreateDataSourceFromRedshiftOutput, RusotoError<CreateDataSourceFromRedshiftError>>
3181    {
3182        let mut request = self.new_signed_request("POST", "/");
3183        request.add_header(
3184            "x-amz-target",
3185            "AmazonML_20141212.CreateDataSourceFromRedshift",
3186        );
3187        let encoded = serde_json::to_string(&input).unwrap();
3188        request.set_payload(Some(encoded));
3189
3190        let response = self
3191            .sign_and_dispatch(request, CreateDataSourceFromRedshiftError::from_response)
3192            .await?;
3193        let mut response = response;
3194        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3195        proto::json::ResponsePayload::new(&response)
3196            .deserialize::<CreateDataSourceFromRedshiftOutput, _>()
3197    }
3198
3199    /// <p>Creates a <code>DataSource</code> object. A <code>DataSource</code> references data that can be used to perform <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p> <p><code>CreateDataSourceFromS3</code> is an asynchronous operation. In response to <code>CreateDataSourceFromS3</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>DataSource</code> status to <code>PENDING</code>. After the <code>DataSource</code> has been created and is ready for use, Amazon ML sets the <code>Status</code> parameter to <code>COMPLETED</code>. <code>DataSource</code> in the <code>COMPLETED</code> or <code>PENDING</code> state can be used to perform only <code>CreateMLModel</code>, <code>CreateEvaluation</code> or <code>CreateBatchPrediction</code> operations. </p> <p> If Amazon ML can't accept the input source, it sets the <code>Status</code> parameter to <code>FAILED</code> and includes an error message in the <code>Message</code> attribute of the <code>GetDataSource</code> operation response. </p> <p>The observation data used in a <code>DataSource</code> should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the <code>DataSource</code>. </p> <p>After the <code>DataSource</code> has been created, it's ready to use in evaluations and batch predictions. If you plan to use the <code>DataSource</code> to train an <code>MLModel</code>, the <code>DataSource</code> also needs a recipe. A recipe describes how each input variable will be used in training an <code>MLModel</code>. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.</p>
3200    async fn create_data_source_from_s3(
3201        &self,
3202        input: CreateDataSourceFromS3Input,
3203    ) -> Result<CreateDataSourceFromS3Output, RusotoError<CreateDataSourceFromS3Error>> {
3204        let mut request = self.new_signed_request("POST", "/");
3205        request.add_header("x-amz-target", "AmazonML_20141212.CreateDataSourceFromS3");
3206        let encoded = serde_json::to_string(&input).unwrap();
3207        request.set_payload(Some(encoded));
3208
3209        let response = self
3210            .sign_and_dispatch(request, CreateDataSourceFromS3Error::from_response)
3211            .await?;
3212        let mut response = response;
3213        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3214        proto::json::ResponsePayload::new(&response)
3215            .deserialize::<CreateDataSourceFromS3Output, _>()
3216    }
3217
3218    /// <p>Creates a new <code>Evaluation</code> of an <code>MLModel</code>. An <code>MLModel</code> is evaluated on a set of observations associated to a <code>DataSource</code>. Like a <code>DataSource</code> for an <code>MLModel</code>, the <code>DataSource</code> for an <code>Evaluation</code> contains values for the <code>Target Variable</code>. The <code>Evaluation</code> compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the <code>MLModel</code> functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding <code>MLModelType</code>: <code>BINARY</code>, <code>REGRESSION</code> or <code>MULTICLASS</code>. </p> <p><code>CreateEvaluation</code> is an asynchronous operation. In response to <code>CreateEvaluation</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to <code>PENDING</code>. After the <code>Evaluation</code> is created and ready for use, Amazon ML sets the status to <code>COMPLETED</code>. </p> <p>You can use the <code>GetEvaluation</code> operation to check progress of the evaluation during the creation operation.</p>
3219    async fn create_evaluation(
3220        &self,
3221        input: CreateEvaluationInput,
3222    ) -> Result<CreateEvaluationOutput, RusotoError<CreateEvaluationError>> {
3223        let mut request = self.new_signed_request("POST", "/");
3224        request.add_header("x-amz-target", "AmazonML_20141212.CreateEvaluation");
3225        let encoded = serde_json::to_string(&input).unwrap();
3226        request.set_payload(Some(encoded));
3227
3228        let response = self
3229            .sign_and_dispatch(request, CreateEvaluationError::from_response)
3230            .await?;
3231        let mut response = response;
3232        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3233        proto::json::ResponsePayload::new(&response).deserialize::<CreateEvaluationOutput, _>()
3234    }
3235
3236    /// <p>Creates a new <code>MLModel</code> using the <code>DataSource</code> and the recipe as information sources. </p> <p>An <code>MLModel</code> is nearly immutable. Users can update only the <code>MLModelName</code> and the <code>ScoreThreshold</code> in an <code>MLModel</code> without creating a new <code>MLModel</code>. </p> <p><code>CreateMLModel</code> is an asynchronous operation. In response to <code>CreateMLModel</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>MLModel</code> status to <code>PENDING</code>. After the <code>MLModel</code> has been created and ready is for use, Amazon ML sets the status to <code>COMPLETED</code>. </p> <p>You can use the <code>GetMLModel</code> operation to check the progress of the <code>MLModel</code> during the creation operation.</p> <p> <code>CreateMLModel</code> requires a <code>DataSource</code> with computed statistics, which can be created by setting <code>ComputeStatistics</code> to <code>true</code> in <code>CreateDataSourceFromRDS</code>, <code>CreateDataSourceFromS3</code>, or <code>CreateDataSourceFromRedshift</code> operations. </p>
3237    async fn create_ml_model(
3238        &self,
3239        input: CreateMLModelInput,
3240    ) -> Result<CreateMLModelOutput, RusotoError<CreateMLModelError>> {
3241        let mut request = self.new_signed_request("POST", "/");
3242        request.add_header("x-amz-target", "AmazonML_20141212.CreateMLModel");
3243        let encoded = serde_json::to_string(&input).unwrap();
3244        request.set_payload(Some(encoded));
3245
3246        let response = self
3247            .sign_and_dispatch(request, CreateMLModelError::from_response)
3248            .await?;
3249        let mut response = response;
3250        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3251        proto::json::ResponsePayload::new(&response).deserialize::<CreateMLModelOutput, _>()
3252    }
3253
3254    /// <p>Creates a real-time endpoint for the <code>MLModel</code>. The endpoint contains the URI of the <code>MLModel</code>; that is, the location to send real-time prediction requests for the specified <code>MLModel</code>.</p>
3255    async fn create_realtime_endpoint(
3256        &self,
3257        input: CreateRealtimeEndpointInput,
3258    ) -> Result<CreateRealtimeEndpointOutput, RusotoError<CreateRealtimeEndpointError>> {
3259        let mut request = self.new_signed_request("POST", "/");
3260        request.add_header("x-amz-target", "AmazonML_20141212.CreateRealtimeEndpoint");
3261        let encoded = serde_json::to_string(&input).unwrap();
3262        request.set_payload(Some(encoded));
3263
3264        let response = self
3265            .sign_and_dispatch(request, CreateRealtimeEndpointError::from_response)
3266            .await?;
3267        let mut response = response;
3268        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3269        proto::json::ResponsePayload::new(&response)
3270            .deserialize::<CreateRealtimeEndpointOutput, _>()
3271    }
3272
3273    /// <p>Assigns the DELETED status to a <code>BatchPrediction</code>, rendering it unusable.</p> <p>After using the <code>DeleteBatchPrediction</code> operation, you can use the <a>GetBatchPrediction</a> operation to verify that the status of the <code>BatchPrediction</code> changed to DELETED.</p> <p><b>Caution:</b> The result of the <code>DeleteBatchPrediction</code> operation is irreversible.</p>
3274    async fn delete_batch_prediction(
3275        &self,
3276        input: DeleteBatchPredictionInput,
3277    ) -> Result<DeleteBatchPredictionOutput, RusotoError<DeleteBatchPredictionError>> {
3278        let mut request = self.new_signed_request("POST", "/");
3279        request.add_header("x-amz-target", "AmazonML_20141212.DeleteBatchPrediction");
3280        let encoded = serde_json::to_string(&input).unwrap();
3281        request.set_payload(Some(encoded));
3282
3283        let response = self
3284            .sign_and_dispatch(request, DeleteBatchPredictionError::from_response)
3285            .await?;
3286        let mut response = response;
3287        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3288        proto::json::ResponsePayload::new(&response).deserialize::<DeleteBatchPredictionOutput, _>()
3289    }
3290
3291    /// <p>Assigns the DELETED status to a <code>DataSource</code>, rendering it unusable.</p> <p>After using the <code>DeleteDataSource</code> operation, you can use the <a>GetDataSource</a> operation to verify that the status of the <code>DataSource</code> changed to DELETED.</p> <p><b>Caution:</b> The results of the <code>DeleteDataSource</code> operation are irreversible.</p>
3292    async fn delete_data_source(
3293        &self,
3294        input: DeleteDataSourceInput,
3295    ) -> Result<DeleteDataSourceOutput, RusotoError<DeleteDataSourceError>> {
3296        let mut request = self.new_signed_request("POST", "/");
3297        request.add_header("x-amz-target", "AmazonML_20141212.DeleteDataSource");
3298        let encoded = serde_json::to_string(&input).unwrap();
3299        request.set_payload(Some(encoded));
3300
3301        let response = self
3302            .sign_and_dispatch(request, DeleteDataSourceError::from_response)
3303            .await?;
3304        let mut response = response;
3305        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3306        proto::json::ResponsePayload::new(&response).deserialize::<DeleteDataSourceOutput, _>()
3307    }
3308
3309    /// <p><p>Assigns the <code>DELETED</code> status to an <code>Evaluation</code>, rendering it unusable.</p> <p>After invoking the <code>DeleteEvaluation</code> operation, you can use the <code>GetEvaluation</code> operation to verify that the status of the <code>Evaluation</code> changed to <code>DELETED</code>.</p> <caution><title>Caution</title> <p>The results of the <code>DeleteEvaluation</code> operation are irreversible.</p></caution></p>
3310    async fn delete_evaluation(
3311        &self,
3312        input: DeleteEvaluationInput,
3313    ) -> Result<DeleteEvaluationOutput, RusotoError<DeleteEvaluationError>> {
3314        let mut request = self.new_signed_request("POST", "/");
3315        request.add_header("x-amz-target", "AmazonML_20141212.DeleteEvaluation");
3316        let encoded = serde_json::to_string(&input).unwrap();
3317        request.set_payload(Some(encoded));
3318
3319        let response = self
3320            .sign_and_dispatch(request, DeleteEvaluationError::from_response)
3321            .await?;
3322        let mut response = response;
3323        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3324        proto::json::ResponsePayload::new(&response).deserialize::<DeleteEvaluationOutput, _>()
3325    }
3326
3327    /// <p>Assigns the <code>DELETED</code> status to an <code>MLModel</code>, rendering it unusable.</p> <p>After using the <code>DeleteMLModel</code> operation, you can use the <code>GetMLModel</code> operation to verify that the status of the <code>MLModel</code> changed to DELETED.</p> <p><b>Caution:</b> The result of the <code>DeleteMLModel</code> operation is irreversible.</p>
3328    async fn delete_ml_model(
3329        &self,
3330        input: DeleteMLModelInput,
3331    ) -> Result<DeleteMLModelOutput, RusotoError<DeleteMLModelError>> {
3332        let mut request = self.new_signed_request("POST", "/");
3333        request.add_header("x-amz-target", "AmazonML_20141212.DeleteMLModel");
3334        let encoded = serde_json::to_string(&input).unwrap();
3335        request.set_payload(Some(encoded));
3336
3337        let response = self
3338            .sign_and_dispatch(request, DeleteMLModelError::from_response)
3339            .await?;
3340        let mut response = response;
3341        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3342        proto::json::ResponsePayload::new(&response).deserialize::<DeleteMLModelOutput, _>()
3343    }
3344
3345    /// <p>Deletes a real time endpoint of an <code>MLModel</code>.</p>
3346    async fn delete_realtime_endpoint(
3347        &self,
3348        input: DeleteRealtimeEndpointInput,
3349    ) -> Result<DeleteRealtimeEndpointOutput, RusotoError<DeleteRealtimeEndpointError>> {
3350        let mut request = self.new_signed_request("POST", "/");
3351        request.add_header("x-amz-target", "AmazonML_20141212.DeleteRealtimeEndpoint");
3352        let encoded = serde_json::to_string(&input).unwrap();
3353        request.set_payload(Some(encoded));
3354
3355        let response = self
3356            .sign_and_dispatch(request, DeleteRealtimeEndpointError::from_response)
3357            .await?;
3358        let mut response = response;
3359        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3360        proto::json::ResponsePayload::new(&response)
3361            .deserialize::<DeleteRealtimeEndpointOutput, _>()
3362    }
3363
3364    /// <p>Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.</p> <p>If you specify a tag that doesn't exist, Amazon ML ignores it.</p>
3365    async fn delete_tags(
3366        &self,
3367        input: DeleteTagsInput,
3368    ) -> Result<DeleteTagsOutput, RusotoError<DeleteTagsError>> {
3369        let mut request = self.new_signed_request("POST", "/");
3370        request.add_header("x-amz-target", "AmazonML_20141212.DeleteTags");
3371        let encoded = serde_json::to_string(&input).unwrap();
3372        request.set_payload(Some(encoded));
3373
3374        let response = self
3375            .sign_and_dispatch(request, DeleteTagsError::from_response)
3376            .await?;
3377        let mut response = response;
3378        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3379        proto::json::ResponsePayload::new(&response).deserialize::<DeleteTagsOutput, _>()
3380    }
3381
3382    /// <p>Returns a list of <code>BatchPrediction</code> operations that match the search criteria in the request.</p>
3383    async fn describe_batch_predictions(
3384        &self,
3385        input: DescribeBatchPredictionsInput,
3386    ) -> Result<DescribeBatchPredictionsOutput, RusotoError<DescribeBatchPredictionsError>> {
3387        let mut request = self.new_signed_request("POST", "/");
3388        request.add_header("x-amz-target", "AmazonML_20141212.DescribeBatchPredictions");
3389        let encoded = serde_json::to_string(&input).unwrap();
3390        request.set_payload(Some(encoded));
3391
3392        let response = self
3393            .sign_and_dispatch(request, DescribeBatchPredictionsError::from_response)
3394            .await?;
3395        let mut response = response;
3396        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3397        proto::json::ResponsePayload::new(&response)
3398            .deserialize::<DescribeBatchPredictionsOutput, _>()
3399    }
3400
3401    /// <p>Returns a list of <code>DataSource</code> that match the search criteria in the request.</p>
3402    async fn describe_data_sources(
3403        &self,
3404        input: DescribeDataSourcesInput,
3405    ) -> Result<DescribeDataSourcesOutput, RusotoError<DescribeDataSourcesError>> {
3406        let mut request = self.new_signed_request("POST", "/");
3407        request.add_header("x-amz-target", "AmazonML_20141212.DescribeDataSources");
3408        let encoded = serde_json::to_string(&input).unwrap();
3409        request.set_payload(Some(encoded));
3410
3411        let response = self
3412            .sign_and_dispatch(request, DescribeDataSourcesError::from_response)
3413            .await?;
3414        let mut response = response;
3415        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3416        proto::json::ResponsePayload::new(&response).deserialize::<DescribeDataSourcesOutput, _>()
3417    }
3418
3419    /// <p>Returns a list of <code>DescribeEvaluations</code> that match the search criteria in the request.</p>
3420    async fn describe_evaluations(
3421        &self,
3422        input: DescribeEvaluationsInput,
3423    ) -> Result<DescribeEvaluationsOutput, RusotoError<DescribeEvaluationsError>> {
3424        let mut request = self.new_signed_request("POST", "/");
3425        request.add_header("x-amz-target", "AmazonML_20141212.DescribeEvaluations");
3426        let encoded = serde_json::to_string(&input).unwrap();
3427        request.set_payload(Some(encoded));
3428
3429        let response = self
3430            .sign_and_dispatch(request, DescribeEvaluationsError::from_response)
3431            .await?;
3432        let mut response = response;
3433        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3434        proto::json::ResponsePayload::new(&response).deserialize::<DescribeEvaluationsOutput, _>()
3435    }
3436
3437    /// <p>Returns a list of <code>MLModel</code> that match the search criteria in the request.</p>
3438    async fn describe_ml_models(
3439        &self,
3440        input: DescribeMLModelsInput,
3441    ) -> Result<DescribeMLModelsOutput, RusotoError<DescribeMLModelsError>> {
3442        let mut request = self.new_signed_request("POST", "/");
3443        request.add_header("x-amz-target", "AmazonML_20141212.DescribeMLModels");
3444        let encoded = serde_json::to_string(&input).unwrap();
3445        request.set_payload(Some(encoded));
3446
3447        let response = self
3448            .sign_and_dispatch(request, DescribeMLModelsError::from_response)
3449            .await?;
3450        let mut response = response;
3451        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3452        proto::json::ResponsePayload::new(&response).deserialize::<DescribeMLModelsOutput, _>()
3453    }
3454
3455    /// <p>Describes one or more of the tags for your Amazon ML object.</p>
3456    async fn describe_tags(
3457        &self,
3458        input: DescribeTagsInput,
3459    ) -> Result<DescribeTagsOutput, RusotoError<DescribeTagsError>> {
3460        let mut request = self.new_signed_request("POST", "/");
3461        request.add_header("x-amz-target", "AmazonML_20141212.DescribeTags");
3462        let encoded = serde_json::to_string(&input).unwrap();
3463        request.set_payload(Some(encoded));
3464
3465        let response = self
3466            .sign_and_dispatch(request, DescribeTagsError::from_response)
3467            .await?;
3468        let mut response = response;
3469        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3470        proto::json::ResponsePayload::new(&response).deserialize::<DescribeTagsOutput, _>()
3471    }
3472
3473    /// <p>Returns a <code>BatchPrediction</code> that includes detailed metadata, status, and data file information for a <code>Batch Prediction</code> request.</p>
3474    async fn get_batch_prediction(
3475        &self,
3476        input: GetBatchPredictionInput,
3477    ) -> Result<GetBatchPredictionOutput, RusotoError<GetBatchPredictionError>> {
3478        let mut request = self.new_signed_request("POST", "/");
3479        request.add_header("x-amz-target", "AmazonML_20141212.GetBatchPrediction");
3480        let encoded = serde_json::to_string(&input).unwrap();
3481        request.set_payload(Some(encoded));
3482
3483        let response = self
3484            .sign_and_dispatch(request, GetBatchPredictionError::from_response)
3485            .await?;
3486        let mut response = response;
3487        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3488        proto::json::ResponsePayload::new(&response).deserialize::<GetBatchPredictionOutput, _>()
3489    }
3490
3491    /// <p>Returns a <code>DataSource</code> that includes metadata and data file information, as well as the current status of the <code>DataSource</code>.</p> <p><code>GetDataSource</code> provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.</p>
3492    async fn get_data_source(
3493        &self,
3494        input: GetDataSourceInput,
3495    ) -> Result<GetDataSourceOutput, RusotoError<GetDataSourceError>> {
3496        let mut request = self.new_signed_request("POST", "/");
3497        request.add_header("x-amz-target", "AmazonML_20141212.GetDataSource");
3498        let encoded = serde_json::to_string(&input).unwrap();
3499        request.set_payload(Some(encoded));
3500
3501        let response = self
3502            .sign_and_dispatch(request, GetDataSourceError::from_response)
3503            .await?;
3504        let mut response = response;
3505        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3506        proto::json::ResponsePayload::new(&response).deserialize::<GetDataSourceOutput, _>()
3507    }
3508
3509    /// <p>Returns an <code>Evaluation</code> that includes metadata as well as the current status of the <code>Evaluation</code>.</p>
3510    async fn get_evaluation(
3511        &self,
3512        input: GetEvaluationInput,
3513    ) -> Result<GetEvaluationOutput, RusotoError<GetEvaluationError>> {
3514        let mut request = self.new_signed_request("POST", "/");
3515        request.add_header("x-amz-target", "AmazonML_20141212.GetEvaluation");
3516        let encoded = serde_json::to_string(&input).unwrap();
3517        request.set_payload(Some(encoded));
3518
3519        let response = self
3520            .sign_and_dispatch(request, GetEvaluationError::from_response)
3521            .await?;
3522        let mut response = response;
3523        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3524        proto::json::ResponsePayload::new(&response).deserialize::<GetEvaluationOutput, _>()
3525    }
3526
3527    /// <p>Returns an <code>MLModel</code> that includes detailed metadata, data source information, and the current status of the <code>MLModel</code>.</p> <p><code>GetMLModel</code> provides results in normal or verbose format. </p>
3528    async fn get_ml_model(
3529        &self,
3530        input: GetMLModelInput,
3531    ) -> Result<GetMLModelOutput, RusotoError<GetMLModelError>> {
3532        let mut request = self.new_signed_request("POST", "/");
3533        request.add_header("x-amz-target", "AmazonML_20141212.GetMLModel");
3534        let encoded = serde_json::to_string(&input).unwrap();
3535        request.set_payload(Some(encoded));
3536
3537        let response = self
3538            .sign_and_dispatch(request, GetMLModelError::from_response)
3539            .await?;
3540        let mut response = response;
3541        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3542        proto::json::ResponsePayload::new(&response).deserialize::<GetMLModelOutput, _>()
3543    }
3544
3545    /// <p><p>Generates a prediction for the observation using the specified <code>ML Model</code>.</p> <note><title>Note</title> <p>Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.</p></note></p>
3546    async fn predict(
3547        &self,
3548        input: PredictInput,
3549    ) -> Result<PredictOutput, RusotoError<PredictError>> {
3550        let mut request = self.new_signed_request("POST", "/");
3551        request.add_header("x-amz-target", "AmazonML_20141212.Predict");
3552        let encoded = serde_json::to_string(&input).unwrap();
3553        request.set_payload(Some(encoded));
3554
3555        let response = self
3556            .sign_and_dispatch(request, PredictError::from_response)
3557            .await?;
3558        let mut response = response;
3559        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3560        proto::json::ResponsePayload::new(&response).deserialize::<PredictOutput, _>()
3561    }
3562
3563    /// <p>Updates the <code>BatchPredictionName</code> of a <code>BatchPrediction</code>.</p> <p>You can use the <code>GetBatchPrediction</code> operation to view the contents of the updated data element.</p>
3564    async fn update_batch_prediction(
3565        &self,
3566        input: UpdateBatchPredictionInput,
3567    ) -> Result<UpdateBatchPredictionOutput, RusotoError<UpdateBatchPredictionError>> {
3568        let mut request = self.new_signed_request("POST", "/");
3569        request.add_header("x-amz-target", "AmazonML_20141212.UpdateBatchPrediction");
3570        let encoded = serde_json::to_string(&input).unwrap();
3571        request.set_payload(Some(encoded));
3572
3573        let response = self
3574            .sign_and_dispatch(request, UpdateBatchPredictionError::from_response)
3575            .await?;
3576        let mut response = response;
3577        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3578        proto::json::ResponsePayload::new(&response).deserialize::<UpdateBatchPredictionOutput, _>()
3579    }
3580
3581    /// <p>Updates the <code>DataSourceName</code> of a <code>DataSource</code>.</p> <p>You can use the <code>GetDataSource</code> operation to view the contents of the updated data element.</p>
3582    async fn update_data_source(
3583        &self,
3584        input: UpdateDataSourceInput,
3585    ) -> Result<UpdateDataSourceOutput, RusotoError<UpdateDataSourceError>> {
3586        let mut request = self.new_signed_request("POST", "/");
3587        request.add_header("x-amz-target", "AmazonML_20141212.UpdateDataSource");
3588        let encoded = serde_json::to_string(&input).unwrap();
3589        request.set_payload(Some(encoded));
3590
3591        let response = self
3592            .sign_and_dispatch(request, UpdateDataSourceError::from_response)
3593            .await?;
3594        let mut response = response;
3595        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3596        proto::json::ResponsePayload::new(&response).deserialize::<UpdateDataSourceOutput, _>()
3597    }
3598
3599    /// <p>Updates the <code>EvaluationName</code> of an <code>Evaluation</code>.</p> <p>You can use the <code>GetEvaluation</code> operation to view the contents of the updated data element.</p>
3600    async fn update_evaluation(
3601        &self,
3602        input: UpdateEvaluationInput,
3603    ) -> Result<UpdateEvaluationOutput, RusotoError<UpdateEvaluationError>> {
3604        let mut request = self.new_signed_request("POST", "/");
3605        request.add_header("x-amz-target", "AmazonML_20141212.UpdateEvaluation");
3606        let encoded = serde_json::to_string(&input).unwrap();
3607        request.set_payload(Some(encoded));
3608
3609        let response = self
3610            .sign_and_dispatch(request, UpdateEvaluationError::from_response)
3611            .await?;
3612        let mut response = response;
3613        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3614        proto::json::ResponsePayload::new(&response).deserialize::<UpdateEvaluationOutput, _>()
3615    }
3616
3617    /// <p>Updates the <code>MLModelName</code> and the <code>ScoreThreshold</code> of an <code>MLModel</code>.</p> <p>You can use the <code>GetMLModel</code> operation to view the contents of the updated data element.</p>
3618    async fn update_ml_model(
3619        &self,
3620        input: UpdateMLModelInput,
3621    ) -> Result<UpdateMLModelOutput, RusotoError<UpdateMLModelError>> {
3622        let mut request = self.new_signed_request("POST", "/");
3623        request.add_header("x-amz-target", "AmazonML_20141212.UpdateMLModel");
3624        let encoded = serde_json::to_string(&input).unwrap();
3625        request.set_payload(Some(encoded));
3626
3627        let response = self
3628            .sign_and_dispatch(request, UpdateMLModelError::from_response)
3629            .await?;
3630        let mut response = response;
3631        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3632        proto::json::ResponsePayload::new(&response).deserialize::<UpdateMLModelOutput, _>()
3633    }
3634}