rusoto_forecast/
generated.rs

1// =================================================================
2//
3//                           * WARNING *
4//
5//                    This file is generated!
6//
7//  Changes made to this file will be overwritten. If changes are
8//  required to the generated code, the service_crategen project
9//  must be updated to generate the changes.
10//
11// =================================================================
12
13use std::error::Error;
14use std::fmt;
15
16use async_trait::async_trait;
17use rusoto_core::credential::ProvideAwsCredentials;
18use rusoto_core::region;
19use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest};
20use rusoto_core::{Client, RusotoError};
21
22use rusoto_core::proto;
23use rusoto_core::request::HttpResponse;
24use rusoto_core::signature::SignedRequest;
25#[allow(unused_imports)]
26use serde::{Deserialize, Serialize};
27
28impl ForecastClient {
29    fn new_signed_request(&self, http_method: &str, request_uri: &str) -> SignedRequest {
30        let mut request = SignedRequest::new(http_method, "forecast", &self.region, request_uri);
31
32        request.set_content_type("application/x-amz-json-1.1".to_owned());
33
34        request
35    }
36
37    async fn sign_and_dispatch<E>(
38        &self,
39        request: SignedRequest,
40        from_response: fn(BufferedHttpResponse) -> RusotoError<E>,
41    ) -> Result<HttpResponse, RusotoError<E>> {
42        let mut response = self.client.sign_and_dispatch(request).await?;
43        if !response.status.is_success() {
44            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
45            return Err(from_response(response));
46        }
47
48        Ok(response)
49    }
50}
51
52use serde_json;
53/// <p>Specifies a categorical hyperparameter and it's range of tunable values. This object is part of the <a>ParameterRanges</a> object.</p>
54#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
55pub struct CategoricalParameterRange {
56    /// <p>The name of the categorical hyperparameter to tune.</p>
57    #[serde(rename = "Name")]
58    pub name: String,
59    /// <p>A list of the tunable categories for the hyperparameter.</p>
60    #[serde(rename = "Values")]
61    pub values: Vec<String>,
62}
63
64/// <p>Specifies a continuous hyperparameter and it's range of tunable values. This object is part of the <a>ParameterRanges</a> object.</p>
65#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
66pub struct ContinuousParameterRange {
67    /// <p>The maximum tunable value of the hyperparameter.</p>
68    #[serde(rename = "MaxValue")]
69    pub max_value: f64,
70    /// <p>The minimum tunable value of the hyperparameter.</p>
71    #[serde(rename = "MinValue")]
72    pub min_value: f64,
73    /// <p>The name of the hyperparameter to tune.</p>
74    #[serde(rename = "Name")]
75    pub name: String,
76    /// <p>The scale that hyperparameter tuning uses to search the hyperparameter range. Valid values:</p> <dl> <dt>Auto</dt> <dd> <p>Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter.</p> </dd> <dt>Linear</dt> <dd> <p>Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.</p> </dd> <dt>Logarithmic</dt> <dd> <p>Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.</p> <p>Logarithmic scaling works only for ranges that have values greater than 0.</p> </dd> <dt>ReverseLogarithmic</dt> <dd> <p>hyperparameter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.</p> <p>Reverse logarithmic scaling works only for ranges that are entirely within the range 0 &lt;= x &lt; 1.0.</p> </dd> </dl> <p>For information about choosing a hyperparameter scale, see <a href="http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type">Hyperparameter Scaling</a>. One of the following values:</p>
77    #[serde(rename = "ScalingType")]
78    #[serde(skip_serializing_if = "Option::is_none")]
79    pub scaling_type: Option<String>,
80}
81
82#[derive(Clone, Debug, Default, PartialEq, Serialize)]
83#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
84pub struct CreateDatasetGroupRequest {
85    /// <p>An array of Amazon Resource Names (ARNs) of the datasets that you want to include in the dataset group.</p>
86    #[serde(rename = "DatasetArns")]
87    #[serde(skip_serializing_if = "Option::is_none")]
88    pub dataset_arns: Option<Vec<String>>,
89    /// <p>A name for the dataset group.</p>
90    #[serde(rename = "DatasetGroupName")]
91    pub dataset_group_name: String,
92    /// <p>The domain associated with the dataset group. When you add a dataset to a dataset group, this value and the value specified for the <code>Domain</code> parameter of the <a>CreateDataset</a> operation must match.</p> <p>The <code>Domain</code> and <code>DatasetType</code> that you choose determine the fields that must be present in training data that you import to a dataset. For example, if you choose the <code>RETAIL</code> domain and <code>TARGET_TIME_SERIES</code> as the <code>DatasetType</code>, Amazon Forecast requires that <code>item_id</code>, <code>timestamp</code>, and <code>demand</code> fields are present in your data. For more information, see <a>howitworks-datasets-groups</a>.</p>
93    #[serde(rename = "Domain")]
94    pub domain: String,
95    /// <p><p>The optional metadata that you apply to the dataset group to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
96    #[serde(rename = "Tags")]
97    #[serde(skip_serializing_if = "Option::is_none")]
98    pub tags: Option<Vec<Tag>>,
99}
100
101#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
102#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
103pub struct CreateDatasetGroupResponse {
104    /// <p>The Amazon Resource Name (ARN) of the dataset group.</p>
105    #[serde(rename = "DatasetGroupArn")]
106    #[serde(skip_serializing_if = "Option::is_none")]
107    pub dataset_group_arn: Option<String>,
108}
109
110#[derive(Clone, Debug, Default, PartialEq, Serialize)]
111#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
112pub struct CreateDatasetImportJobRequest {
113    /// <p>The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.</p> <p>If encryption is used, <code>DataSource</code> must include an AWS Key Management Service (KMS) key and the IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in the <code>EncryptionConfig</code> parameter of the <a>CreateDataset</a> operation.</p>
114    #[serde(rename = "DataSource")]
115    pub data_source: DataSource,
116    /// <p>The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.</p>
117    #[serde(rename = "DatasetArn")]
118    pub dataset_arn: String,
119    /// <p>The name for the dataset import job. We recommend including the current timestamp in the name, for example, <code>20190721DatasetImport</code>. This can help you avoid getting a <code>ResourceAlreadyExistsException</code> exception.</p>
120    #[serde(rename = "DatasetImportJobName")]
121    pub dataset_import_job_name: String,
122    /// <p><p>The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
123    #[serde(rename = "Tags")]
124    #[serde(skip_serializing_if = "Option::is_none")]
125    pub tags: Option<Vec<Tag>>,
126    /// <p>The format of timestamps in the dataset. The format that you specify depends on the <code>DataFrequency</code> specified when the dataset was created. The following formats are supported</p> <ul> <li> <p>"yyyy-MM-dd"</p> <p>For the following data frequencies: Y, M, W, and D</p> </li> <li> <p>"yyyy-MM-dd HH:mm:ss"</p> <p>For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D</p> </li> </ul> <p>If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".</p>
127    #[serde(rename = "TimestampFormat")]
128    #[serde(skip_serializing_if = "Option::is_none")]
129    pub timestamp_format: Option<String>,
130}
131
132#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
133#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
134pub struct CreateDatasetImportJobResponse {
135    /// <p>The Amazon Resource Name (ARN) of the dataset import job.</p>
136    #[serde(rename = "DatasetImportJobArn")]
137    #[serde(skip_serializing_if = "Option::is_none")]
138    pub dataset_import_job_arn: Option<String>,
139}
140
141#[derive(Clone, Debug, Default, PartialEq, Serialize)]
142#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
143pub struct CreateDatasetRequest {
144    /// <p>The frequency of data collection. This parameter is required for RELATED_TIME_SERIES datasets.</p> <p>Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, "D" indicates every day and "15min" indicates every 15 minutes.</p>
145    #[serde(rename = "DataFrequency")]
146    #[serde(skip_serializing_if = "Option::is_none")]
147    pub data_frequency: Option<String>,
148    /// <p>A name for the dataset.</p>
149    #[serde(rename = "DatasetName")]
150    pub dataset_name: String,
151    /// <p>The dataset type. Valid values depend on the chosen <code>Domain</code>.</p>
152    #[serde(rename = "DatasetType")]
153    pub dataset_type: String,
154    /// <p>The domain associated with the dataset. When you add a dataset to a dataset group, this value and the value specified for the <code>Domain</code> parameter of the <a>CreateDatasetGroup</a> operation must match.</p> <p>The <code>Domain</code> and <code>DatasetType</code> that you choose determine the fields that must be present in the training data that you import to the dataset. For example, if you choose the <code>RETAIL</code> domain and <code>TARGET_TIME_SERIES</code> as the <code>DatasetType</code>, Amazon Forecast requires <code>item_id</code>, <code>timestamp</code>, and <code>demand</code> fields to be present in your data. For more information, see <a>howitworks-datasets-groups</a>.</p>
155    #[serde(rename = "Domain")]
156    pub domain: String,
157    /// <p>An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.</p>
158    #[serde(rename = "EncryptionConfig")]
159    #[serde(skip_serializing_if = "Option::is_none")]
160    pub encryption_config: Option<EncryptionConfig>,
161    /// <p>The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset <code>Domain</code> and <code>DatasetType</code> that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see <a>howitworks-domains-ds-types</a>.</p>
162    #[serde(rename = "Schema")]
163    pub schema: Schema,
164    /// <p><p>The optional metadata that you apply to the dataset to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
165    #[serde(rename = "Tags")]
166    #[serde(skip_serializing_if = "Option::is_none")]
167    pub tags: Option<Vec<Tag>>,
168}
169
170#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
171#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
172pub struct CreateDatasetResponse {
173    /// <p>The Amazon Resource Name (ARN) of the dataset.</p>
174    #[serde(rename = "DatasetArn")]
175    #[serde(skip_serializing_if = "Option::is_none")]
176    pub dataset_arn: Option<String>,
177}
178
179#[derive(Clone, Debug, Default, PartialEq, Serialize)]
180#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
181pub struct CreateForecastExportJobRequest {
182    /// <p>The location where you want to save the forecast and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the location. The forecast must be exported to an Amazon S3 bucket.</p> <p>If encryption is used, <code>Destination</code> must include an AWS Key Management Service (KMS) key. The IAM role must allow Amazon Forecast permission to access the key.</p>
183    #[serde(rename = "Destination")]
184    pub destination: DataDestination,
185    /// <p>The Amazon Resource Name (ARN) of the forecast that you want to export.</p>
186    #[serde(rename = "ForecastArn")]
187    pub forecast_arn: String,
188    /// <p>The name for the forecast export job.</p>
189    #[serde(rename = "ForecastExportJobName")]
190    pub forecast_export_job_name: String,
191    /// <p><p>The optional metadata that you apply to the forecast export job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
192    #[serde(rename = "Tags")]
193    #[serde(skip_serializing_if = "Option::is_none")]
194    pub tags: Option<Vec<Tag>>,
195}
196
197#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
198#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
199pub struct CreateForecastExportJobResponse {
200    /// <p>The Amazon Resource Name (ARN) of the export job.</p>
201    #[serde(rename = "ForecastExportJobArn")]
202    #[serde(skip_serializing_if = "Option::is_none")]
203    pub forecast_export_job_arn: Option<String>,
204}
205
206#[derive(Clone, Debug, Default, PartialEq, Serialize)]
207#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
208pub struct CreateForecastRequest {
209    /// <p>A name for the forecast.</p>
210    #[serde(rename = "ForecastName")]
211    pub forecast_name: String,
212    /// <p>The quantiles at which probabilistic forecasts are generated. <b>You can currently specify up to 5 quantiles per forecast</b>. Accepted values include <code>0.01 to 0.99</code> (increments of .01 only) and <code>mean</code>. The mean forecast is different from the median (0.50) when the distribution is not symmetric (for example, Beta and Negative Binomial). The default value is <code>["0.1", "0.5", "0.9"]</code>.</p>
213    #[serde(rename = "ForecastTypes")]
214    #[serde(skip_serializing_if = "Option::is_none")]
215    pub forecast_types: Option<Vec<String>>,
216    /// <p>The Amazon Resource Name (ARN) of the predictor to use to generate the forecast.</p>
217    #[serde(rename = "PredictorArn")]
218    pub predictor_arn: String,
219    /// <p><p>The optional metadata that you apply to the forecast to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
220    #[serde(rename = "Tags")]
221    #[serde(skip_serializing_if = "Option::is_none")]
222    pub tags: Option<Vec<Tag>>,
223}
224
225#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
226#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
227pub struct CreateForecastResponse {
228    /// <p>The Amazon Resource Name (ARN) of the forecast.</p>
229    #[serde(rename = "ForecastArn")]
230    #[serde(skip_serializing_if = "Option::is_none")]
231    pub forecast_arn: Option<String>,
232}
233
234#[derive(Clone, Debug, Default, PartialEq, Serialize)]
235#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
236pub struct CreatePredictorRequest {
237    /// <p><p>The Amazon Resource Name (ARN) of the algorithm to use for model training. Required if <code>PerformAutoML</code> is not set to <code>true</code>.</p> <p class="title"> <b>Supported algorithms:</b> </p> <ul> <li> <p> <code>arn:aws:forecast:::algorithm/ARIMA</code> </p> </li> <li> <p> <code>arn:aws:forecast:::algorithm/Deep<em>AR</em>Plus</code> </p> <p>Supports hyperparameter optimization (HPO)</p> </li> <li> <p> <code>arn:aws:forecast:::algorithm/ETS</code> </p> </li> <li> <p> <code>arn:aws:forecast:::algorithm/NPTS</code> </p> </li> <li> <p> <code>arn:aws:forecast:::algorithm/Prophet</code> </p> </li> </ul></p>
238    #[serde(rename = "AlgorithmArn")]
239    #[serde(skip_serializing_if = "Option::is_none")]
240    pub algorithm_arn: Option<String>,
241    /// <p>An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.</p>
242    #[serde(rename = "EncryptionConfig")]
243    #[serde(skip_serializing_if = "Option::is_none")]
244    pub encryption_config: Option<EncryptionConfig>,
245    /// <p>Used to override the default evaluation parameters of the specified algorithm. Amazon Forecast evaluates a predictor by splitting a dataset into training data and testing data. The evaluation parameters define how to perform the split and the number of iterations.</p>
246    #[serde(rename = "EvaluationParameters")]
247    #[serde(skip_serializing_if = "Option::is_none")]
248    pub evaluation_parameters: Option<EvaluationParameters>,
249    /// <p>The featurization configuration.</p>
250    #[serde(rename = "FeaturizationConfig")]
251    pub featurization_config: FeaturizationConfig,
252    /// <p>Specifies the number of time-steps that the model is trained to predict. The forecast horizon is also called the prediction length.</p> <p>For example, if you configure a dataset for daily data collection (using the <code>DataFrequency</code> parameter of the <a>CreateDataset</a> operation) and set the forecast horizon to 10, the model returns predictions for 10 days.</p> <p>The maximum forecast horizon is the lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length.</p>
253    #[serde(rename = "ForecastHorizon")]
254    pub forecast_horizon: i64,
255    /// <p>Provides hyperparameter override values for the algorithm. If you don't provide this parameter, Amazon Forecast uses default values. The individual algorithms specify which hyperparameters support hyperparameter optimization (HPO). For more information, see <a>aws-forecast-choosing-recipes</a>.</p> <p>If you included the <code>HPOConfig</code> object, you must set <code>PerformHPO</code> to true.</p>
256    #[serde(rename = "HPOConfig")]
257    #[serde(skip_serializing_if = "Option::is_none")]
258    pub hpo_config: Option<HyperParameterTuningJobConfig>,
259    /// <p>Describes the dataset group that contains the data to use to train the predictor.</p>
260    #[serde(rename = "InputDataConfig")]
261    pub input_data_config: InputDataConfig,
262    /// <p>Whether to perform AutoML. When Amazon Forecast performs AutoML, it evaluates the algorithms it provides and chooses the best algorithm and configuration for your training dataset.</p> <p>The default value is <code>false</code>. In this case, you are required to specify an algorithm.</p> <p>Set <code>PerformAutoML</code> to <code>true</code> to have Amazon Forecast perform AutoML. This is a good option if you aren't sure which algorithm is suitable for your training data. In this case, <code>PerformHPO</code> must be false.</p>
263    #[serde(rename = "PerformAutoML")]
264    #[serde(skip_serializing_if = "Option::is_none")]
265    pub perform_auto_ml: Option<bool>,
266    /// <p><p>Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as running a hyperparameter tuning job.</p> <p>The default value is <code>false</code>. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.</p> <p>To override the default values, set <code>PerformHPO</code> to <code>true</code> and, optionally, supply the <a>HyperParameterTuningJobConfig</a> object. The tuning job specifies a metric to optimize, which hyperparameters participate in tuning, and the valid range for each tunable hyperparameter. In this case, you are required to specify an algorithm and <code>PerformAutoML</code> must be false.</p> <p>The following algorithm supports HPO:</p> <ul> <li> <p>DeepAR+</p> </li> </ul></p>
267    #[serde(rename = "PerformHPO")]
268    #[serde(skip_serializing_if = "Option::is_none")]
269    pub perform_hpo: Option<bool>,
270    /// <p>A name for the predictor.</p>
271    #[serde(rename = "PredictorName")]
272    pub predictor_name: String,
273    /// <p><p>The optional metadata that you apply to the predictor to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
274    #[serde(rename = "Tags")]
275    #[serde(skip_serializing_if = "Option::is_none")]
276    pub tags: Option<Vec<Tag>>,
277    /// <p>The hyperparameters to override for model training. The hyperparameters that you can override are listed in the individual algorithms. For the list of supported algorithms, see <a>aws-forecast-choosing-recipes</a>.</p>
278    #[serde(rename = "TrainingParameters")]
279    #[serde(skip_serializing_if = "Option::is_none")]
280    pub training_parameters: Option<::std::collections::HashMap<String, String>>,
281}
282
283#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
284#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
285pub struct CreatePredictorResponse {
286    /// <p>The Amazon Resource Name (ARN) of the predictor.</p>
287    #[serde(rename = "PredictorArn")]
288    #[serde(skip_serializing_if = "Option::is_none")]
289    pub predictor_arn: Option<String>,
290}
291
292/// <p>The destination for an exported forecast, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the location and, optionally, an AWS Key Management Service (KMS) key. This object is submitted in the <a>CreateForecastExportJob</a> request.</p>
293#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
294pub struct DataDestination {
295    /// <p>The path to an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the bucket.</p>
296    #[serde(rename = "S3Config")]
297    pub s3_config: S3Config,
298}
299
300/// <p>The source of your training data, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the data and, optionally, an AWS Key Management Service (KMS) key. This object is submitted in the <a>CreateDatasetImportJob</a> request.</p>
301#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
302pub struct DataSource {
303    /// <p>The path to the training data stored in an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the data.</p>
304    #[serde(rename = "S3Config")]
305    pub s3_config: S3Config,
306}
307
308/// <p>Provides a summary of the dataset group properties used in the <a>ListDatasetGroups</a> operation. To get the complete set of properties, call the <a>DescribeDatasetGroup</a> operation, and provide the <code>DatasetGroupArn</code>.</p>
309#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
310#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
311pub struct DatasetGroupSummary {
312    /// <p>When the dataset group was created.</p>
313    #[serde(rename = "CreationTime")]
314    #[serde(skip_serializing_if = "Option::is_none")]
315    pub creation_time: Option<f64>,
316    /// <p>The Amazon Resource Name (ARN) of the dataset group.</p>
317    #[serde(rename = "DatasetGroupArn")]
318    #[serde(skip_serializing_if = "Option::is_none")]
319    pub dataset_group_arn: Option<String>,
320    /// <p>The name of the dataset group.</p>
321    #[serde(rename = "DatasetGroupName")]
322    #[serde(skip_serializing_if = "Option::is_none")]
323    pub dataset_group_name: Option<String>,
324    /// <p>When the dataset group was created or last updated from a call to the <a>UpdateDatasetGroup</a> operation. While the dataset group is being updated, <code>LastModificationTime</code> is the current time of the <code>ListDatasetGroups</code> call.</p>
325    #[serde(rename = "LastModificationTime")]
326    #[serde(skip_serializing_if = "Option::is_none")]
327    pub last_modification_time: Option<f64>,
328}
329
330/// <p>Provides a summary of the dataset import job properties used in the <a>ListDatasetImportJobs</a> operation. To get the complete set of properties, call the <a>DescribeDatasetImportJob</a> operation, and provide the <code>DatasetImportJobArn</code>.</p>
331#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
332#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
333pub struct DatasetImportJobSummary {
334    /// <p>When the dataset import job was created.</p>
335    #[serde(rename = "CreationTime")]
336    #[serde(skip_serializing_if = "Option::is_none")]
337    pub creation_time: Option<f64>,
338    /// <p>The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.</p> <p>If encryption is used, <code>DataSource</code> includes an AWS Key Management Service (KMS) key.</p>
339    #[serde(rename = "DataSource")]
340    #[serde(skip_serializing_if = "Option::is_none")]
341    pub data_source: Option<DataSource>,
342    /// <p>The Amazon Resource Name (ARN) of the dataset import job.</p>
343    #[serde(rename = "DatasetImportJobArn")]
344    #[serde(skip_serializing_if = "Option::is_none")]
345    pub dataset_import_job_arn: Option<String>,
346    /// <p>The name of the dataset import job.</p>
347    #[serde(rename = "DatasetImportJobName")]
348    #[serde(skip_serializing_if = "Option::is_none")]
349    pub dataset_import_job_name: Option<String>,
350    /// <p><p>The last time that the dataset was modified. The time depends on the status of the job, as follows:</p> <ul> <li> <p> <code>CREATE<em>PENDING</code> - The same time as <code>CreationTime</code>.</p> </li> <li> <p> <code>CREATE</em>IN<em>PROGRESS</code> - The current timestamp.</p> </li> <li> <p> <code>ACTIVE</code> or <code>CREATE</em>FAILED</code> - When the job finished or failed.</p> </li> </ul></p>
351    #[serde(rename = "LastModificationTime")]
352    #[serde(skip_serializing_if = "Option::is_none")]
353    pub last_modification_time: Option<f64>,
354    /// <p>If an error occurred, an informational message about the error.</p>
355    #[serde(rename = "Message")]
356    #[serde(skip_serializing_if = "Option::is_none")]
357    pub message: Option<String>,
358    /// <p><p>The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is <code>CREATE<em>IN</em>PROGRESS</code>, the status of the dataset is <code>UPDATE<em>IN</em>PROGRESS</code>. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> </ul></p>
359    #[serde(rename = "Status")]
360    #[serde(skip_serializing_if = "Option::is_none")]
361    pub status: Option<String>,
362}
363
364/// <p>Provides a summary of the dataset properties used in the <a>ListDatasets</a> operation. To get the complete set of properties, call the <a>DescribeDataset</a> operation, and provide the <code>DatasetArn</code>.</p>
365#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
366#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
367pub struct DatasetSummary {
368    /// <p>When the dataset was created.</p>
369    #[serde(rename = "CreationTime")]
370    #[serde(skip_serializing_if = "Option::is_none")]
371    pub creation_time: Option<f64>,
372    /// <p>The Amazon Resource Name (ARN) of the dataset.</p>
373    #[serde(rename = "DatasetArn")]
374    #[serde(skip_serializing_if = "Option::is_none")]
375    pub dataset_arn: Option<String>,
376    /// <p>The name of the dataset.</p>
377    #[serde(rename = "DatasetName")]
378    #[serde(skip_serializing_if = "Option::is_none")]
379    pub dataset_name: Option<String>,
380    /// <p>The dataset type.</p>
381    #[serde(rename = "DatasetType")]
382    #[serde(skip_serializing_if = "Option::is_none")]
383    pub dataset_type: Option<String>,
384    /// <p>The domain associated with the dataset.</p>
385    #[serde(rename = "Domain")]
386    #[serde(skip_serializing_if = "Option::is_none")]
387    pub domain: Option<String>,
388    /// <p>When you create a dataset, <code>LastModificationTime</code> is the same as <code>CreationTime</code>. While data is being imported to the dataset, <code>LastModificationTime</code> is the current time of the <code>ListDatasets</code> call. After a <a>CreateDatasetImportJob</a> operation has finished, <code>LastModificationTime</code> is when the import job completed or failed.</p>
389    #[serde(rename = "LastModificationTime")]
390    #[serde(skip_serializing_if = "Option::is_none")]
391    pub last_modification_time: Option<f64>,
392}
393
394#[derive(Clone, Debug, Default, PartialEq, Serialize)]
395#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
396pub struct DeleteDatasetGroupRequest {
397    /// <p>The Amazon Resource Name (ARN) of the dataset group to delete.</p>
398    #[serde(rename = "DatasetGroupArn")]
399    pub dataset_group_arn: String,
400}
401
402#[derive(Clone, Debug, Default, PartialEq, Serialize)]
403#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
404pub struct DeleteDatasetImportJobRequest {
405    /// <p>The Amazon Resource Name (ARN) of the dataset import job to delete.</p>
406    #[serde(rename = "DatasetImportJobArn")]
407    pub dataset_import_job_arn: String,
408}
409
410#[derive(Clone, Debug, Default, PartialEq, Serialize)]
411#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
412pub struct DeleteDatasetRequest {
413    /// <p>The Amazon Resource Name (ARN) of the dataset to delete.</p>
414    #[serde(rename = "DatasetArn")]
415    pub dataset_arn: String,
416}
417
418#[derive(Clone, Debug, Default, PartialEq, Serialize)]
419#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
420pub struct DeleteForecastExportJobRequest {
421    /// <p>The Amazon Resource Name (ARN) of the forecast export job to delete.</p>
422    #[serde(rename = "ForecastExportJobArn")]
423    pub forecast_export_job_arn: String,
424}
425
426#[derive(Clone, Debug, Default, PartialEq, Serialize)]
427#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
428pub struct DeleteForecastRequest {
429    /// <p>The Amazon Resource Name (ARN) of the forecast to delete.</p>
430    #[serde(rename = "ForecastArn")]
431    pub forecast_arn: String,
432}
433
434#[derive(Clone, Debug, Default, PartialEq, Serialize)]
435#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
436pub struct DeletePredictorRequest {
437    /// <p>The Amazon Resource Name (ARN) of the predictor to delete.</p>
438    #[serde(rename = "PredictorArn")]
439    pub predictor_arn: String,
440}
441
442#[derive(Clone, Debug, Default, PartialEq, Serialize)]
443#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
444pub struct DescribeDatasetGroupRequest {
445    /// <p>The Amazon Resource Name (ARN) of the dataset group.</p>
446    #[serde(rename = "DatasetGroupArn")]
447    pub dataset_group_arn: String,
448}
449
450#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
451#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
452pub struct DescribeDatasetGroupResponse {
453    /// <p>When the dataset group was created.</p>
454    #[serde(rename = "CreationTime")]
455    #[serde(skip_serializing_if = "Option::is_none")]
456    pub creation_time: Option<f64>,
457    /// <p>An array of Amazon Resource Names (ARNs) of the datasets contained in the dataset group.</p>
458    #[serde(rename = "DatasetArns")]
459    #[serde(skip_serializing_if = "Option::is_none")]
460    pub dataset_arns: Option<Vec<String>>,
461    /// <p>The ARN of the dataset group.</p>
462    #[serde(rename = "DatasetGroupArn")]
463    #[serde(skip_serializing_if = "Option::is_none")]
464    pub dataset_group_arn: Option<String>,
465    /// <p>The name of the dataset group.</p>
466    #[serde(rename = "DatasetGroupName")]
467    #[serde(skip_serializing_if = "Option::is_none")]
468    pub dataset_group_name: Option<String>,
469    /// <p>The domain associated with the dataset group.</p>
470    #[serde(rename = "Domain")]
471    #[serde(skip_serializing_if = "Option::is_none")]
472    pub domain: Option<String>,
473    /// <p>When the dataset group was created or last updated from a call to the <a>UpdateDatasetGroup</a> operation. While the dataset group is being updated, <code>LastModificationTime</code> is the current time of the <code>DescribeDatasetGroup</code> call.</p>
474    #[serde(rename = "LastModificationTime")]
475    #[serde(skip_serializing_if = "Option::is_none")]
476    pub last_modification_time: Option<f64>,
477    /// <p><p>The status of the dataset group. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> <li> <p> <code>UPDATE<em>PENDING</code>, <code>UPDATE</em>IN<em>PROGRESS</code>, <code>UPDATE</em>FAILED</code> </p> </li> </ul> <p>The <code>UPDATE</code> states apply when you call the <a>UpdateDatasetGroup</a> operation.</p> <note> <p>The <code>Status</code> of the dataset group must be <code>ACTIVE</code> before you can use the dataset group to create a predictor.</p> </note></p>
478    #[serde(rename = "Status")]
479    #[serde(skip_serializing_if = "Option::is_none")]
480    pub status: Option<String>,
481}
482
483#[derive(Clone, Debug, Default, PartialEq, Serialize)]
484#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
485pub struct DescribeDatasetImportJobRequest {
486    /// <p>The Amazon Resource Name (ARN) of the dataset import job.</p>
487    #[serde(rename = "DatasetImportJobArn")]
488    pub dataset_import_job_arn: String,
489}
490
491#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
492#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
493pub struct DescribeDatasetImportJobResponse {
494    /// <p>When the dataset import job was created.</p>
495    #[serde(rename = "CreationTime")]
496    #[serde(skip_serializing_if = "Option::is_none")]
497    pub creation_time: Option<f64>,
498    /// <p>The size of the dataset in gigabytes (GB) after the import job has finished.</p>
499    #[serde(rename = "DataSize")]
500    #[serde(skip_serializing_if = "Option::is_none")]
501    pub data_size: Option<f64>,
502    /// <p>The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data.</p> <p>If encryption is used, <code>DataSource</code> includes an AWS Key Management Service (KMS) key.</p>
503    #[serde(rename = "DataSource")]
504    #[serde(skip_serializing_if = "Option::is_none")]
505    pub data_source: Option<DataSource>,
506    /// <p>The Amazon Resource Name (ARN) of the dataset that the training data was imported to.</p>
507    #[serde(rename = "DatasetArn")]
508    #[serde(skip_serializing_if = "Option::is_none")]
509    pub dataset_arn: Option<String>,
510    /// <p>The ARN of the dataset import job.</p>
511    #[serde(rename = "DatasetImportJobArn")]
512    #[serde(skip_serializing_if = "Option::is_none")]
513    pub dataset_import_job_arn: Option<String>,
514    /// <p>The name of the dataset import job.</p>
515    #[serde(rename = "DatasetImportJobName")]
516    #[serde(skip_serializing_if = "Option::is_none")]
517    pub dataset_import_job_name: Option<String>,
518    /// <p>Statistical information about each field in the input data.</p>
519    #[serde(rename = "FieldStatistics")]
520    #[serde(skip_serializing_if = "Option::is_none")]
521    pub field_statistics: Option<::std::collections::HashMap<String, Statistics>>,
522    /// <p><p>The last time that the dataset was modified. The time depends on the status of the job, as follows:</p> <ul> <li> <p> <code>CREATE<em>PENDING</code> - The same time as <code>CreationTime</code>.</p> </li> <li> <p> <code>CREATE</em>IN<em>PROGRESS</code> - The current timestamp.</p> </li> <li> <p> <code>ACTIVE</code> or <code>CREATE</em>FAILED</code> - When the job finished or failed.</p> </li> </ul></p>
523    #[serde(rename = "LastModificationTime")]
524    #[serde(skip_serializing_if = "Option::is_none")]
525    pub last_modification_time: Option<f64>,
526    /// <p>If an error occurred, an informational message about the error.</p>
527    #[serde(rename = "Message")]
528    #[serde(skip_serializing_if = "Option::is_none")]
529    pub message: Option<String>,
530    /// <p><p>The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is <code>CREATE<em>IN</em>PROGRESS</code>, the status of the dataset is <code>UPDATE<em>IN</em>PROGRESS</code>. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> </ul></p>
531    #[serde(rename = "Status")]
532    #[serde(skip_serializing_if = "Option::is_none")]
533    pub status: Option<String>,
534    /// <p><p>The format of timestamps in the dataset. The format that you specify depends on the <code>DataFrequency</code> specified when the dataset was created. The following formats are supported</p> <ul> <li> <p>&quot;yyyy-MM-dd&quot;</p> <p>For the following data frequencies: Y, M, W, and D</p> </li> <li> <p>&quot;yyyy-MM-dd HH:mm:ss&quot;</p> <p>For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D</p> </li> </ul></p>
535    #[serde(rename = "TimestampFormat")]
536    #[serde(skip_serializing_if = "Option::is_none")]
537    pub timestamp_format: Option<String>,
538}
539
540#[derive(Clone, Debug, Default, PartialEq, Serialize)]
541#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
542pub struct DescribeDatasetRequest {
543    /// <p>The Amazon Resource Name (ARN) of the dataset.</p>
544    #[serde(rename = "DatasetArn")]
545    pub dataset_arn: String,
546}
547
548#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
549#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
550pub struct DescribeDatasetResponse {
551    /// <p>When the dataset was created.</p>
552    #[serde(rename = "CreationTime")]
553    #[serde(skip_serializing_if = "Option::is_none")]
554    pub creation_time: Option<f64>,
555    /// <p>The frequency of data collection.</p> <p>Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, "M" indicates every month and "30min" indicates every 30 minutes.</p>
556    #[serde(rename = "DataFrequency")]
557    #[serde(skip_serializing_if = "Option::is_none")]
558    pub data_frequency: Option<String>,
559    /// <p>The Amazon Resource Name (ARN) of the dataset.</p>
560    #[serde(rename = "DatasetArn")]
561    #[serde(skip_serializing_if = "Option::is_none")]
562    pub dataset_arn: Option<String>,
563    /// <p>The name of the dataset.</p>
564    #[serde(rename = "DatasetName")]
565    #[serde(skip_serializing_if = "Option::is_none")]
566    pub dataset_name: Option<String>,
567    /// <p>The dataset type.</p>
568    #[serde(rename = "DatasetType")]
569    #[serde(skip_serializing_if = "Option::is_none")]
570    pub dataset_type: Option<String>,
571    /// <p>The domain associated with the dataset.</p>
572    #[serde(rename = "Domain")]
573    #[serde(skip_serializing_if = "Option::is_none")]
574    pub domain: Option<String>,
575    /// <p>The AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.</p>
576    #[serde(rename = "EncryptionConfig")]
577    #[serde(skip_serializing_if = "Option::is_none")]
578    pub encryption_config: Option<EncryptionConfig>,
579    /// <p>When you create a dataset, <code>LastModificationTime</code> is the same as <code>CreationTime</code>. While data is being imported to the dataset, <code>LastModificationTime</code> is the current time of the <code>DescribeDataset</code> call. After a <a>CreateDatasetImportJob</a> operation has finished, <code>LastModificationTime</code> is when the import job completed or failed.</p>
580    #[serde(rename = "LastModificationTime")]
581    #[serde(skip_serializing_if = "Option::is_none")]
582    pub last_modification_time: Option<f64>,
583    /// <p>An array of <code>SchemaAttribute</code> objects that specify the dataset fields. Each <code>SchemaAttribute</code> specifies the name and data type of a field.</p>
584    #[serde(rename = "Schema")]
585    #[serde(skip_serializing_if = "Option::is_none")]
586    pub schema: Option<Schema>,
587    /// <p><p>The status of the dataset. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> <li> <p> <code>UPDATE<em>PENDING</code>, <code>UPDATE</em>IN<em>PROGRESS</code>, <code>UPDATE</em>FAILED</code> </p> </li> </ul> <p>The <code>UPDATE</code> states apply while data is imported to the dataset from a call to the <a>CreateDatasetImportJob</a> operation and reflect the status of the dataset import job. For example, when the import job status is <code>CREATE<em>IN</em>PROGRESS</code>, the status of the dataset is <code>UPDATE<em>IN</em>PROGRESS</code>.</p> <note> <p>The <code>Status</code> of the dataset must be <code>ACTIVE</code> before you can import training data.</p> </note></p>
588    #[serde(rename = "Status")]
589    #[serde(skip_serializing_if = "Option::is_none")]
590    pub status: Option<String>,
591}
592
593#[derive(Clone, Debug, Default, PartialEq, Serialize)]
594#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
595pub struct DescribeForecastExportJobRequest {
596    /// <p>The Amazon Resource Name (ARN) of the forecast export job.</p>
597    #[serde(rename = "ForecastExportJobArn")]
598    pub forecast_export_job_arn: String,
599}
600
601#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
602#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
603pub struct DescribeForecastExportJobResponse {
604    /// <p>When the forecast export job was created.</p>
605    #[serde(rename = "CreationTime")]
606    #[serde(skip_serializing_if = "Option::is_none")]
607    pub creation_time: Option<f64>,
608    /// <p>The path to the Amazon Simple Storage Service (Amazon S3) bucket where the forecast is exported.</p>
609    #[serde(rename = "Destination")]
610    #[serde(skip_serializing_if = "Option::is_none")]
611    pub destination: Option<DataDestination>,
612    /// <p>The Amazon Resource Name (ARN) of the exported forecast.</p>
613    #[serde(rename = "ForecastArn")]
614    #[serde(skip_serializing_if = "Option::is_none")]
615    pub forecast_arn: Option<String>,
616    /// <p>The ARN of the forecast export job.</p>
617    #[serde(rename = "ForecastExportJobArn")]
618    #[serde(skip_serializing_if = "Option::is_none")]
619    pub forecast_export_job_arn: Option<String>,
620    /// <p>The name of the forecast export job.</p>
621    #[serde(rename = "ForecastExportJobName")]
622    #[serde(skip_serializing_if = "Option::is_none")]
623    pub forecast_export_job_name: Option<String>,
624    /// <p>When the last successful export job finished.</p>
625    #[serde(rename = "LastModificationTime")]
626    #[serde(skip_serializing_if = "Option::is_none")]
627    pub last_modification_time: Option<f64>,
628    /// <p>If an error occurred, an informational message about the error.</p>
629    #[serde(rename = "Message")]
630    #[serde(skip_serializing_if = "Option::is_none")]
631    pub message: Option<String>,
632    /// <p><p>The status of the forecast export job. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> </ul> <note> <p>The <code>Status</code> of the forecast export job must be <code>ACTIVE</code> before you can access the forecast in your S3 bucket.</p> </note></p>
633    #[serde(rename = "Status")]
634    #[serde(skip_serializing_if = "Option::is_none")]
635    pub status: Option<String>,
636}
637
638#[derive(Clone, Debug, Default, PartialEq, Serialize)]
639#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
640pub struct DescribeForecastRequest {
641    /// <p>The Amazon Resource Name (ARN) of the forecast.</p>
642    #[serde(rename = "ForecastArn")]
643    pub forecast_arn: String,
644}
645
646#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
647#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
648pub struct DescribeForecastResponse {
649    /// <p>When the forecast creation task was created.</p>
650    #[serde(rename = "CreationTime")]
651    #[serde(skip_serializing_if = "Option::is_none")]
652    pub creation_time: Option<f64>,
653    /// <p>The ARN of the dataset group that provided the data used to train the predictor.</p>
654    #[serde(rename = "DatasetGroupArn")]
655    #[serde(skip_serializing_if = "Option::is_none")]
656    pub dataset_group_arn: Option<String>,
657    /// <p>The forecast ARN as specified in the request.</p>
658    #[serde(rename = "ForecastArn")]
659    #[serde(skip_serializing_if = "Option::is_none")]
660    pub forecast_arn: Option<String>,
661    /// <p>The name of the forecast.</p>
662    #[serde(rename = "ForecastName")]
663    #[serde(skip_serializing_if = "Option::is_none")]
664    pub forecast_name: Option<String>,
665    /// <p>The quantiles at which probabilistic forecasts were generated.</p>
666    #[serde(rename = "ForecastTypes")]
667    #[serde(skip_serializing_if = "Option::is_none")]
668    pub forecast_types: Option<Vec<String>>,
669    /// <p>Initially, the same as <code>CreationTime</code> (status is <code>CREATE_PENDING</code>). Updated when inference (creating the forecast) starts (status changed to <code>CREATE_IN_PROGRESS</code>), and when inference is complete (status changed to <code>ACTIVE</code>) or fails (status changed to <code>CREATE_FAILED</code>).</p>
670    #[serde(rename = "LastModificationTime")]
671    #[serde(skip_serializing_if = "Option::is_none")]
672    pub last_modification_time: Option<f64>,
673    /// <p>If an error occurred, an informational message about the error.</p>
674    #[serde(rename = "Message")]
675    #[serde(skip_serializing_if = "Option::is_none")]
676    pub message: Option<String>,
677    /// <p>The ARN of the predictor used to generate the forecast.</p>
678    #[serde(rename = "PredictorArn")]
679    #[serde(skip_serializing_if = "Option::is_none")]
680    pub predictor_arn: Option<String>,
681    /// <p><p>The status of the forecast. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> </ul> <note> <p>The <code>Status</code> of the forecast must be <code>ACTIVE</code> before you can query or export the forecast.</p> </note></p>
682    #[serde(rename = "Status")]
683    #[serde(skip_serializing_if = "Option::is_none")]
684    pub status: Option<String>,
685}
686
687#[derive(Clone, Debug, Default, PartialEq, Serialize)]
688#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
689pub struct DescribePredictorRequest {
690    /// <p>The Amazon Resource Name (ARN) of the predictor that you want information about.</p>
691    #[serde(rename = "PredictorArn")]
692    pub predictor_arn: String,
693}
694
695#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
696#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
697pub struct DescribePredictorResponse {
698    /// <p>The Amazon Resource Name (ARN) of the algorithm used for model training.</p>
699    #[serde(rename = "AlgorithmArn")]
700    #[serde(skip_serializing_if = "Option::is_none")]
701    pub algorithm_arn: Option<String>,
702    /// <p>When <code>PerformAutoML</code> is specified, the ARN of the chosen algorithm.</p>
703    #[serde(rename = "AutoMLAlgorithmArns")]
704    #[serde(skip_serializing_if = "Option::is_none")]
705    pub auto_ml_algorithm_arns: Option<Vec<String>>,
706    /// <p>When the model training task was created.</p>
707    #[serde(rename = "CreationTime")]
708    #[serde(skip_serializing_if = "Option::is_none")]
709    pub creation_time: Option<f64>,
710    /// <p>An array of the ARNs of the dataset import jobs used to import training data for the predictor.</p>
711    #[serde(rename = "DatasetImportJobArns")]
712    #[serde(skip_serializing_if = "Option::is_none")]
713    pub dataset_import_job_arns: Option<Vec<String>>,
714    /// <p>An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.</p>
715    #[serde(rename = "EncryptionConfig")]
716    #[serde(skip_serializing_if = "Option::is_none")]
717    pub encryption_config: Option<EncryptionConfig>,
718    /// <p>Used to override the default evaluation parameters of the specified algorithm. Amazon Forecast evaluates a predictor by splitting a dataset into training data and testing data. The evaluation parameters define how to perform the split and the number of iterations.</p>
719    #[serde(rename = "EvaluationParameters")]
720    #[serde(skip_serializing_if = "Option::is_none")]
721    pub evaluation_parameters: Option<EvaluationParameters>,
722    /// <p>The featurization configuration.</p>
723    #[serde(rename = "FeaturizationConfig")]
724    #[serde(skip_serializing_if = "Option::is_none")]
725    pub featurization_config: Option<FeaturizationConfig>,
726    /// <p>The number of time-steps of the forecast. The forecast horizon is also called the prediction length.</p>
727    #[serde(rename = "ForecastHorizon")]
728    #[serde(skip_serializing_if = "Option::is_none")]
729    pub forecast_horizon: Option<i64>,
730    /// <p>The hyperparameter override values for the algorithm.</p>
731    #[serde(rename = "HPOConfig")]
732    #[serde(skip_serializing_if = "Option::is_none")]
733    pub hpo_config: Option<HyperParameterTuningJobConfig>,
734    /// <p>Describes the dataset group that contains the data to use to train the predictor.</p>
735    #[serde(rename = "InputDataConfig")]
736    #[serde(skip_serializing_if = "Option::is_none")]
737    pub input_data_config: Option<InputDataConfig>,
738    /// <p>Initially, the same as <code>CreationTime</code> (when the status is <code>CREATE_PENDING</code>). This value is updated when training starts (when the status changes to <code>CREATE_IN_PROGRESS</code>), and when training has completed (when the status changes to <code>ACTIVE</code>) or fails (when the status changes to <code>CREATE_FAILED</code>).</p>
739    #[serde(rename = "LastModificationTime")]
740    #[serde(skip_serializing_if = "Option::is_none")]
741    pub last_modification_time: Option<f64>,
742    /// <p>If an error occurred, an informational message about the error.</p>
743    #[serde(rename = "Message")]
744    #[serde(skip_serializing_if = "Option::is_none")]
745    pub message: Option<String>,
746    /// <p>Whether the predictor is set to perform AutoML.</p>
747    #[serde(rename = "PerformAutoML")]
748    #[serde(skip_serializing_if = "Option::is_none")]
749    pub perform_auto_ml: Option<bool>,
750    /// <p>Whether the predictor is set to perform hyperparameter optimization (HPO).</p>
751    #[serde(rename = "PerformHPO")]
752    #[serde(skip_serializing_if = "Option::is_none")]
753    pub perform_hpo: Option<bool>,
754    /// <p>The ARN of the predictor.</p>
755    #[serde(rename = "PredictorArn")]
756    #[serde(skip_serializing_if = "Option::is_none")]
757    pub predictor_arn: Option<String>,
758    /// <p>Details on the the status and results of the backtests performed to evaluate the accuracy of the predictor. You specify the number of backtests to perform when you call the operation.</p>
759    #[serde(rename = "PredictorExecutionDetails")]
760    #[serde(skip_serializing_if = "Option::is_none")]
761    pub predictor_execution_details: Option<PredictorExecutionDetails>,
762    /// <p>The name of the predictor.</p>
763    #[serde(rename = "PredictorName")]
764    #[serde(skip_serializing_if = "Option::is_none")]
765    pub predictor_name: Option<String>,
766    /// <p><p>The status of the predictor. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> <li> <p> <code>UPDATE<em>PENDING</code>, <code>UPDATE</em>IN<em>PROGRESS</code>, <code>UPDATE</em>FAILED</code> </p> </li> </ul> <note> <p>The <code>Status</code> of the predictor must be <code>ACTIVE</code> before you can use the predictor to create a forecast.</p> </note></p>
767    #[serde(rename = "Status")]
768    #[serde(skip_serializing_if = "Option::is_none")]
769    pub status: Option<String>,
770    /// <p>The default training parameters or overrides selected during model training. If using the AutoML algorithm or if HPO is turned on while using the DeepAR+ algorithms, the optimized values for the chosen hyperparameters are returned. For more information, see <a>aws-forecast-choosing-recipes</a>.</p>
771    #[serde(rename = "TrainingParameters")]
772    #[serde(skip_serializing_if = "Option::is_none")]
773    pub training_parameters: Option<::std::collections::HashMap<String, String>>,
774}
775
776/// <p>An AWS Key Management Service (KMS) key and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key. You can specify this optional object in the <a>CreateDataset</a> and <a>CreatePredictor</a> requests.</p>
777#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
778pub struct EncryptionConfig {
779    /// <p>The Amazon Resource Name (ARN) of the KMS key.</p>
780    #[serde(rename = "KMSKeyArn")]
781    pub kms_key_arn: String,
782    /// <p>The ARN of the IAM role that Amazon Forecast can assume to access the AWS KMS key.</p> <p>Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an <code>InvalidInputException</code> error.</p>
783    #[serde(rename = "RoleArn")]
784    pub role_arn: String,
785}
786
787/// <p>Parameters that define how to split a dataset into training data and testing data, and the number of iterations to perform. These parameters are specified in the predefined algorithms but you can override them in the <a>CreatePredictor</a> request.</p>
788#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
789pub struct EvaluationParameters {
790    /// <p>The point from the end of the dataset where you want to split the data for model training and testing (evaluation). Specify the value as the number of data points. The default is the value of the forecast horizon. <code>BackTestWindowOffset</code> can be used to mimic a past virtual forecast start date. This value must be greater than or equal to the forecast horizon and less than half of the TARGET_TIME_SERIES dataset length.</p> <p> <code>ForecastHorizon</code> &lt;= <code>BackTestWindowOffset</code> &lt; 1/2 * TARGET_TIME_SERIES dataset length</p>
791    #[serde(rename = "BackTestWindowOffset")]
792    #[serde(skip_serializing_if = "Option::is_none")]
793    pub back_test_window_offset: Option<i64>,
794    /// <p>The number of times to split the input data. The default is 1. Valid values are 1 through 5.</p>
795    #[serde(rename = "NumberOfBacktestWindows")]
796    #[serde(skip_serializing_if = "Option::is_none")]
797    pub number_of_backtest_windows: Option<i64>,
798}
799
800/// <p>The results of evaluating an algorithm. Returned as part of the <a>GetAccuracyMetrics</a> response.</p>
801#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
802#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
803pub struct EvaluationResult {
804    /// <p>The Amazon Resource Name (ARN) of the algorithm that was evaluated.</p>
805    #[serde(rename = "AlgorithmArn")]
806    #[serde(skip_serializing_if = "Option::is_none")]
807    pub algorithm_arn: Option<String>,
808    /// <p>The array of test windows used for evaluating the algorithm. The <code>NumberOfBacktestWindows</code> from the <a>EvaluationParameters</a> object determines the number of windows in the array.</p>
809    #[serde(rename = "TestWindows")]
810    #[serde(skip_serializing_if = "Option::is_none")]
811    pub test_windows: Option<Vec<WindowSummary>>,
812}
813
814/// <p>Provides featurization (transformation) information for a dataset field. This object is part of the <a>FeaturizationConfig</a> object.</p> <p>For example:</p> <p> <code>{</code> </p> <p> <code>"AttributeName": "demand",</code> </p> <p> <code>FeaturizationPipeline [ {</code> </p> <p> <code>"FeaturizationMethodName": "filling",</code> </p> <p> <code>"FeaturizationMethodParameters": {"aggregation": "avg", "backfill": "nan"}</code> </p> <p> <code>} ]</code> </p> <p> <code>}</code> </p>
815#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
816pub struct Featurization {
817    /// <p>The name of the schema attribute that specifies the data field to be featurized. Amazon Forecast supports the target field of the <code>TARGET_TIME_SERIES</code> and the <code>RELATED_TIME_SERIES</code> datasets. For example, for the <code>RETAIL</code> domain, the target is <code>demand</code>, and for the <code>CUSTOM</code> domain, the target is <code>target_value</code>. For more information, see <a>howitworks-missing-values</a>.</p>
818    #[serde(rename = "AttributeName")]
819    pub attribute_name: String,
820    /// <p>An array of one <code>FeaturizationMethod</code> object that specifies the feature transformation method.</p>
821    #[serde(rename = "FeaturizationPipeline")]
822    #[serde(skip_serializing_if = "Option::is_none")]
823    pub featurization_pipeline: Option<Vec<FeaturizationMethod>>,
824}
825
826/// <p>In a <a>CreatePredictor</a> operation, the specified algorithm trains a model using the specified dataset group. You can optionally tell the operation to modify data fields prior to training a model. These modifications are referred to as <i>featurization</i>.</p> <p>You define featurization using the <code>FeaturizationConfig</code> object. You specify an array of transformations, one for each field that you want to featurize. You then include the <code>FeaturizationConfig</code> object in your <code>CreatePredictor</code> request. Amazon Forecast applies the featurization to the <code>TARGET_TIME_SERIES</code> and <code>RELATED_TIME_SERIES</code> datasets before model training.</p> <p>You can create multiple featurization configurations. For example, you might call the <code>CreatePredictor</code> operation twice by specifying different featurization configurations.</p>
827#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
828pub struct FeaturizationConfig {
829    /// <p>An array of featurization (transformation) information for the fields of a dataset.</p>
830    #[serde(rename = "Featurizations")]
831    #[serde(skip_serializing_if = "Option::is_none")]
832    pub featurizations: Option<Vec<Featurization>>,
833    /// <p>An array of dimension (field) names that specify how to group the generated forecast.</p> <p>For example, suppose that you are generating a forecast for item sales across all of your stores, and your dataset contains a <code>store_id</code> field. If you want the sales forecast for each item by store, you would specify <code>store_id</code> as the dimension.</p> <p>All forecast dimensions specified in the <code>TARGET_TIME_SERIES</code> dataset don't need to be specified in the <code>CreatePredictor</code> request. All forecast dimensions specified in the <code>RELATED_TIME_SERIES</code> dataset must be specified in the <code>CreatePredictor</code> request.</p>
834    #[serde(rename = "ForecastDimensions")]
835    #[serde(skip_serializing_if = "Option::is_none")]
836    pub forecast_dimensions: Option<Vec<String>>,
837    /// <p>The frequency of predictions in a forecast.</p> <p>Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min (30 minutes), 15min (15 minutes), 10min (10 minutes), 5min (5 minutes), and 1min (1 minute). For example, "Y" indicates every year and "5min" indicates every five minutes.</p> <p>The frequency must be greater than or equal to the TARGET_TIME_SERIES dataset frequency.</p> <p>When a RELATED_TIME_SERIES dataset is provided, the frequency must be equal to the RELATED_TIME_SERIES dataset frequency.</p>
838    #[serde(rename = "ForecastFrequency")]
839    pub forecast_frequency: String,
840}
841
842/// <p>Provides information about the method that featurizes (transforms) a dataset field. The method is part of the <code>FeaturizationPipeline</code> of the <a>Featurization</a> object. </p> <p>The following is an example of how you specify a <code>FeaturizationMethod</code> object.</p> <p> <code>{</code> </p> <p> <code>"FeaturizationMethodName": "filling",</code> </p> <p> <code>"FeaturizationMethodParameters": {"aggregation": "sum", "middlefill": "zero", "backfill": "zero"}</code> </p> <p> <code>}</code> </p>
843#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
844pub struct FeaturizationMethod {
845    /// <p>The name of the method. The "filling" method is the only supported method.</p>
846    #[serde(rename = "FeaturizationMethodName")]
847    pub featurization_method_name: String,
848    /// <p><p>The method parameters (key-value pairs), which are a map of override parameters. Specify these parameters to override the default values. Related Time Series attributes do not accept aggregation parameters.</p> <p>The following list shows the parameters and their valid values for the &quot;filling&quot; featurization method for a <b>Target Time Series</b> dataset. Bold signifies the default value.</p> <ul> <li> <p> <code>aggregation</code>: <b>sum</b>, <code>avg</code>, <code>first</code>, <code>min</code>, <code>max</code> </p> </li> <li> <p> <code>frontfill</code>: <b>none</b> </p> </li> <li> <p> <code>middlefill</code>: <b>zero</b>, <code>nan</code> (not a number), <code>value</code>, <code>median</code>, <code>mean</code>, <code>min</code>, <code>max</code> </p> </li> <li> <p> <code>backfill</code>: <b>zero</b>, <code>nan</code>, <code>value</code>, <code>median</code>, <code>mean</code>, <code>min</code>, <code>max</code> </p> </li> </ul> <p>The following list shows the parameters and their valid values for a <b>Related Time Series</b> featurization method (there are no defaults):</p> <ul> <li> <p> <code>middlefill</code>: <code>zero</code>, <code>value</code>, <code>median</code>, <code>mean</code>, <code>min</code>, <code>max</code> </p> </li> <li> <p> <code>backfill</code>: <code>zero</code>, <code>value</code>, <code>median</code>, <code>mean</code>, <code>min</code>, <code>max</code> </p> </li> <li> <p> <code>futurefill</code>: <code>zero</code>, <code>value</code>, <code>median</code>, <code>mean</code>, <code>min</code>, <code>max</code> </p> </li> </ul></p>
849    #[serde(rename = "FeaturizationMethodParameters")]
850    #[serde(skip_serializing_if = "Option::is_none")]
851    pub featurization_method_parameters: Option<::std::collections::HashMap<String, String>>,
852}
853
854/// <p>Describes a filter for choosing a subset of objects. Each filter consists of a condition and a match statement. The condition is either <code>IS</code> or <code>IS_NOT</code>, which specifies whether to include or exclude the objects that match the statement, respectively. The match statement consists of a key and a value.</p>
855#[derive(Clone, Debug, Default, PartialEq, Serialize)]
856#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
857pub struct Filter {
858    /// <p>The condition to apply. To include the objects that match the statement, specify <code>IS</code>. To exclude matching objects, specify <code>IS_NOT</code>.</p>
859    #[serde(rename = "Condition")]
860    pub condition: String,
861    /// <p>The name of the parameter to filter on.</p>
862    #[serde(rename = "Key")]
863    pub key: String,
864    /// <p>The value to match.</p>
865    #[serde(rename = "Value")]
866    pub value: String,
867}
868
869/// <p>Provides a summary of the forecast export job properties used in the <a>ListForecastExportJobs</a> operation. To get the complete set of properties, call the <a>DescribeForecastExportJob</a> operation, and provide the listed <code>ForecastExportJobArn</code>.</p>
870#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
871#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
872pub struct ForecastExportJobSummary {
873    /// <p>When the forecast export job was created.</p>
874    #[serde(rename = "CreationTime")]
875    #[serde(skip_serializing_if = "Option::is_none")]
876    pub creation_time: Option<f64>,
877    /// <p>The path to the Amazon Simple Storage Service (Amazon S3) bucket where the forecast is exported.</p>
878    #[serde(rename = "Destination")]
879    #[serde(skip_serializing_if = "Option::is_none")]
880    pub destination: Option<DataDestination>,
881    /// <p>The Amazon Resource Name (ARN) of the forecast export job.</p>
882    #[serde(rename = "ForecastExportJobArn")]
883    #[serde(skip_serializing_if = "Option::is_none")]
884    pub forecast_export_job_arn: Option<String>,
885    /// <p>The name of the forecast export job.</p>
886    #[serde(rename = "ForecastExportJobName")]
887    #[serde(skip_serializing_if = "Option::is_none")]
888    pub forecast_export_job_name: Option<String>,
889    /// <p>When the last successful export job finished.</p>
890    #[serde(rename = "LastModificationTime")]
891    #[serde(skip_serializing_if = "Option::is_none")]
892    pub last_modification_time: Option<f64>,
893    /// <p>If an error occurred, an informational message about the error.</p>
894    #[serde(rename = "Message")]
895    #[serde(skip_serializing_if = "Option::is_none")]
896    pub message: Option<String>,
897    /// <p><p>The status of the forecast export job. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> </ul> <note> <p>The <code>Status</code> of the forecast export job must be <code>ACTIVE</code> before you can access the forecast in your S3 bucket.</p> </note></p>
898    #[serde(rename = "Status")]
899    #[serde(skip_serializing_if = "Option::is_none")]
900    pub status: Option<String>,
901}
902
903/// <p>Provides a summary of the forecast properties used in the <a>ListForecasts</a> operation. To get the complete set of properties, call the <a>DescribeForecast</a> operation, and provide the <code>ForecastArn</code> that is listed in the summary.</p>
904#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
905#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
906pub struct ForecastSummary {
907    /// <p>When the forecast creation task was created.</p>
908    #[serde(rename = "CreationTime")]
909    #[serde(skip_serializing_if = "Option::is_none")]
910    pub creation_time: Option<f64>,
911    /// <p>The Amazon Resource Name (ARN) of the dataset group that provided the data used to train the predictor.</p>
912    #[serde(rename = "DatasetGroupArn")]
913    #[serde(skip_serializing_if = "Option::is_none")]
914    pub dataset_group_arn: Option<String>,
915    /// <p>The ARN of the forecast.</p>
916    #[serde(rename = "ForecastArn")]
917    #[serde(skip_serializing_if = "Option::is_none")]
918    pub forecast_arn: Option<String>,
919    /// <p>The name of the forecast.</p>
920    #[serde(rename = "ForecastName")]
921    #[serde(skip_serializing_if = "Option::is_none")]
922    pub forecast_name: Option<String>,
923    /// <p>Initially, the same as <code>CreationTime</code> (status is <code>CREATE_PENDING</code>). Updated when inference (creating the forecast) starts (status changed to <code>CREATE_IN_PROGRESS</code>), and when inference is complete (status changed to <code>ACTIVE</code>) or fails (status changed to <code>CREATE_FAILED</code>).</p>
924    #[serde(rename = "LastModificationTime")]
925    #[serde(skip_serializing_if = "Option::is_none")]
926    pub last_modification_time: Option<f64>,
927    /// <p>If an error occurred, an informational message about the error.</p>
928    #[serde(rename = "Message")]
929    #[serde(skip_serializing_if = "Option::is_none")]
930    pub message: Option<String>,
931    /// <p>The ARN of the predictor used to generate the forecast.</p>
932    #[serde(rename = "PredictorArn")]
933    #[serde(skip_serializing_if = "Option::is_none")]
934    pub predictor_arn: Option<String>,
935    /// <p><p>The status of the forecast. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> </ul> <note> <p>The <code>Status</code> of the forecast must be <code>ACTIVE</code> before you can query or export the forecast.</p> </note></p>
936    #[serde(rename = "Status")]
937    #[serde(skip_serializing_if = "Option::is_none")]
938    pub status: Option<String>,
939}
940
941#[derive(Clone, Debug, Default, PartialEq, Serialize)]
942#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
943pub struct GetAccuracyMetricsRequest {
944    /// <p>The Amazon Resource Name (ARN) of the predictor to get metrics for.</p>
945    #[serde(rename = "PredictorArn")]
946    pub predictor_arn: String,
947}
948
949#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
950#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
951pub struct GetAccuracyMetricsResponse {
952    /// <p>An array of results from evaluating the predictor.</p>
953    #[serde(rename = "PredictorEvaluationResults")]
954    #[serde(skip_serializing_if = "Option::is_none")]
955    pub predictor_evaluation_results: Option<Vec<EvaluationResult>>,
956}
957
958/// <p>Configuration information for a hyperparameter tuning job. You specify this object in the <a>CreatePredictor</a> request.</p> <p>A <i>hyperparameter</i> is a parameter that governs the model training process. You set hyperparameters before training starts, unlike model parameters, which are determined during training. The values of the hyperparameters effect which values are chosen for the model parameters.</p> <p>In a <i>hyperparameter tuning job</i>, Amazon Forecast chooses the set of hyperparameter values that optimize a specified metric. Forecast accomplishes this by running many training jobs over a range of hyperparameter values. The optimum set of values depends on the algorithm, the training data, and the specified metric objective.</p>
959#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
960pub struct HyperParameterTuningJobConfig {
961    /// <p>Specifies the ranges of valid values for the hyperparameters.</p>
962    #[serde(rename = "ParameterRanges")]
963    #[serde(skip_serializing_if = "Option::is_none")]
964    pub parameter_ranges: Option<ParameterRanges>,
965}
966
967/// <p>The data used to train a predictor. The data includes a dataset group and any supplementary features. You specify this object in the <a>CreatePredictor</a> request.</p>
968#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
969pub struct InputDataConfig {
970    /// <p>The Amazon Resource Name (ARN) of the dataset group.</p>
971    #[serde(rename = "DatasetGroupArn")]
972    pub dataset_group_arn: String,
973    /// <p>An array of supplementary features. The only supported feature is a holiday calendar.</p>
974    #[serde(rename = "SupplementaryFeatures")]
975    #[serde(skip_serializing_if = "Option::is_none")]
976    pub supplementary_features: Option<Vec<SupplementaryFeature>>,
977}
978
979/// <p>Specifies an integer hyperparameter and it's range of tunable values. This object is part of the <a>ParameterRanges</a> object.</p>
980#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
981pub struct IntegerParameterRange {
982    /// <p>The maximum tunable value of the hyperparameter.</p>
983    #[serde(rename = "MaxValue")]
984    pub max_value: i64,
985    /// <p>The minimum tunable value of the hyperparameter.</p>
986    #[serde(rename = "MinValue")]
987    pub min_value: i64,
988    /// <p>The name of the hyperparameter to tune.</p>
989    #[serde(rename = "Name")]
990    pub name: String,
991    /// <p>The scale that hyperparameter tuning uses to search the hyperparameter range. Valid values:</p> <dl> <dt>Auto</dt> <dd> <p>Amazon Forecast hyperparameter tuning chooses the best scale for the hyperparameter.</p> </dd> <dt>Linear</dt> <dd> <p>Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.</p> </dd> <dt>Logarithmic</dt> <dd> <p>Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.</p> <p>Logarithmic scaling works only for ranges that have values greater than 0.</p> </dd> <dt>ReverseLogarithmic</dt> <dd> <p>Not supported for <code>IntegerParameterRange</code>.</p> <p>Reverse logarithmic scaling works only for ranges that are entirely within the range 0 &lt;= x &lt; 1.0.</p> </dd> </dl> <p>For information about choosing a hyperparameter scale, see <a href="http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type">Hyperparameter Scaling</a>. One of the following values:</p>
992    #[serde(rename = "ScalingType")]
993    #[serde(skip_serializing_if = "Option::is_none")]
994    pub scaling_type: Option<String>,
995}
996
997#[derive(Clone, Debug, Default, PartialEq, Serialize)]
998#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
999pub struct ListDatasetGroupsRequest {
1000    /// <p>The number of items to return in the response.</p>
1001    #[serde(rename = "MaxResults")]
1002    #[serde(skip_serializing_if = "Option::is_none")]
1003    pub max_results: Option<i64>,
1004    /// <p>If the result of the previous request was truncated, the response includes a <code>NextToken</code>. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.</p>
1005    #[serde(rename = "NextToken")]
1006    #[serde(skip_serializing_if = "Option::is_none")]
1007    pub next_token: Option<String>,
1008}
1009
1010#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1011#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1012pub struct ListDatasetGroupsResponse {
1013    /// <p>An array of objects that summarize each dataset group's properties.</p>
1014    #[serde(rename = "DatasetGroups")]
1015    #[serde(skip_serializing_if = "Option::is_none")]
1016    pub dataset_groups: Option<Vec<DatasetGroupSummary>>,
1017    /// <p>If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.</p>
1018    #[serde(rename = "NextToken")]
1019    #[serde(skip_serializing_if = "Option::is_none")]
1020    pub next_token: Option<String>,
1021}
1022
1023#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1024#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1025pub struct ListDatasetImportJobsRequest {
1026    /// <p>An array of filters. For each filter, you provide a condition and a match statement. The condition is either <code>IS</code> or <code>IS_NOT</code>, which specifies whether to include or exclude the datasets that match the statement from the list, respectively. The match statement consists of a key and a value.</p> <p> <b>Filter properties</b> </p> <ul> <li> <p> <code>Condition</code> - The condition to apply. Valid values are <code>IS</code> and <code>IS_NOT</code>. To include the datasets that match the statement, specify <code>IS</code>. To exclude matching datasets, specify <code>IS_NOT</code>.</p> </li> <li> <p> <code>Key</code> - The name of the parameter to filter on. Valid values are <code>DatasetArn</code> and <code>Status</code>.</p> </li> <li> <p> <code>Value</code> - The value to match.</p> </li> </ul> <p>For example, to list all dataset import jobs whose status is ACTIVE, you specify the following filter:</p> <p> <code>"Filters": [ { "Condition": "IS", "Key": "Status", "Value": "ACTIVE" } ]</code> </p>
1027    #[serde(rename = "Filters")]
1028    #[serde(skip_serializing_if = "Option::is_none")]
1029    pub filters: Option<Vec<Filter>>,
1030    /// <p>The number of items to return in the response.</p>
1031    #[serde(rename = "MaxResults")]
1032    #[serde(skip_serializing_if = "Option::is_none")]
1033    pub max_results: Option<i64>,
1034    /// <p>If the result of the previous request was truncated, the response includes a <code>NextToken</code>. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.</p>
1035    #[serde(rename = "NextToken")]
1036    #[serde(skip_serializing_if = "Option::is_none")]
1037    pub next_token: Option<String>,
1038}
1039
1040#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1041#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1042pub struct ListDatasetImportJobsResponse {
1043    /// <p>An array of objects that summarize each dataset import job's properties.</p>
1044    #[serde(rename = "DatasetImportJobs")]
1045    #[serde(skip_serializing_if = "Option::is_none")]
1046    pub dataset_import_jobs: Option<Vec<DatasetImportJobSummary>>,
1047    /// <p>If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.</p>
1048    #[serde(rename = "NextToken")]
1049    #[serde(skip_serializing_if = "Option::is_none")]
1050    pub next_token: Option<String>,
1051}
1052
1053#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1054#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1055pub struct ListDatasetsRequest {
1056    /// <p>The number of items to return in the response.</p>
1057    #[serde(rename = "MaxResults")]
1058    #[serde(skip_serializing_if = "Option::is_none")]
1059    pub max_results: Option<i64>,
1060    /// <p>If the result of the previous request was truncated, the response includes a <code>NextToken</code>. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.</p>
1061    #[serde(rename = "NextToken")]
1062    #[serde(skip_serializing_if = "Option::is_none")]
1063    pub next_token: Option<String>,
1064}
1065
1066#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1067#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1068pub struct ListDatasetsResponse {
1069    /// <p>An array of objects that summarize each dataset's properties.</p>
1070    #[serde(rename = "Datasets")]
1071    #[serde(skip_serializing_if = "Option::is_none")]
1072    pub datasets: Option<Vec<DatasetSummary>>,
1073    /// <p>If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.</p>
1074    #[serde(rename = "NextToken")]
1075    #[serde(skip_serializing_if = "Option::is_none")]
1076    pub next_token: Option<String>,
1077}
1078
1079#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1080#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1081pub struct ListForecastExportJobsRequest {
1082    /// <p>An array of filters. For each filter, you provide a condition and a match statement. The condition is either <code>IS</code> or <code>IS_NOT</code>, which specifies whether to include or exclude the forecast export jobs that match the statement from the list, respectively. The match statement consists of a key and a value.</p> <p> <b>Filter properties</b> </p> <ul> <li> <p> <code>Condition</code> - The condition to apply. Valid values are <code>IS</code> and <code>IS_NOT</code>. To include the forecast export jobs that match the statement, specify <code>IS</code>. To exclude matching forecast export jobs, specify <code>IS_NOT</code>.</p> </li> <li> <p> <code>Key</code> - The name of the parameter to filter on. Valid values are <code>ForecastArn</code> and <code>Status</code>.</p> </li> <li> <p> <code>Value</code> - The value to match.</p> </li> </ul> <p>For example, to list all jobs that export a forecast named <i>electricityforecast</i>, specify the following filter:</p> <p> <code>"Filters": [ { "Condition": "IS", "Key": "ForecastArn", "Value": "arn:aws:forecast:us-west-2:&lt;acct-id&gt;:forecast/electricityforecast" } ]</code> </p>
1083    #[serde(rename = "Filters")]
1084    #[serde(skip_serializing_if = "Option::is_none")]
1085    pub filters: Option<Vec<Filter>>,
1086    /// <p>The number of items to return in the response.</p>
1087    #[serde(rename = "MaxResults")]
1088    #[serde(skip_serializing_if = "Option::is_none")]
1089    pub max_results: Option<i64>,
1090    /// <p>If the result of the previous request was truncated, the response includes a <code>NextToken</code>. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.</p>
1091    #[serde(rename = "NextToken")]
1092    #[serde(skip_serializing_if = "Option::is_none")]
1093    pub next_token: Option<String>,
1094}
1095
1096#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1097#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1098pub struct ListForecastExportJobsResponse {
1099    /// <p>An array of objects that summarize each export job's properties.</p>
1100    #[serde(rename = "ForecastExportJobs")]
1101    #[serde(skip_serializing_if = "Option::is_none")]
1102    pub forecast_export_jobs: Option<Vec<ForecastExportJobSummary>>,
1103    /// <p>If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.</p>
1104    #[serde(rename = "NextToken")]
1105    #[serde(skip_serializing_if = "Option::is_none")]
1106    pub next_token: Option<String>,
1107}
1108
1109#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1110#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1111pub struct ListForecastsRequest {
1112    /// <p>An array of filters. For each filter, you provide a condition and a match statement. The condition is either <code>IS</code> or <code>IS_NOT</code>, which specifies whether to include or exclude the forecasts that match the statement from the list, respectively. The match statement consists of a key and a value.</p> <p> <b>Filter properties</b> </p> <ul> <li> <p> <code>Condition</code> - The condition to apply. Valid values are <code>IS</code> and <code>IS_NOT</code>. To include the forecasts that match the statement, specify <code>IS</code>. To exclude matching forecasts, specify <code>IS_NOT</code>.</p> </li> <li> <p> <code>Key</code> - The name of the parameter to filter on. Valid values are <code>DatasetGroupArn</code>, <code>PredictorArn</code>, and <code>Status</code>.</p> </li> <li> <p> <code>Value</code> - The value to match.</p> </li> </ul> <p>For example, to list all forecasts whose status is not ACTIVE, you would specify:</p> <p> <code>"Filters": [ { "Condition": "IS_NOT", "Key": "Status", "Value": "ACTIVE" } ]</code> </p>
1113    #[serde(rename = "Filters")]
1114    #[serde(skip_serializing_if = "Option::is_none")]
1115    pub filters: Option<Vec<Filter>>,
1116    /// <p>The number of items to return in the response.</p>
1117    #[serde(rename = "MaxResults")]
1118    #[serde(skip_serializing_if = "Option::is_none")]
1119    pub max_results: Option<i64>,
1120    /// <p>If the result of the previous request was truncated, the response includes a <code>NextToken</code>. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.</p>
1121    #[serde(rename = "NextToken")]
1122    #[serde(skip_serializing_if = "Option::is_none")]
1123    pub next_token: Option<String>,
1124}
1125
1126#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1127#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1128pub struct ListForecastsResponse {
1129    /// <p>An array of objects that summarize each forecast's properties.</p>
1130    #[serde(rename = "Forecasts")]
1131    #[serde(skip_serializing_if = "Option::is_none")]
1132    pub forecasts: Option<Vec<ForecastSummary>>,
1133    /// <p>If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.</p>
1134    #[serde(rename = "NextToken")]
1135    #[serde(skip_serializing_if = "Option::is_none")]
1136    pub next_token: Option<String>,
1137}
1138
1139#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1140#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1141pub struct ListPredictorsRequest {
1142    /// <p>An array of filters. For each filter, you provide a condition and a match statement. The condition is either <code>IS</code> or <code>IS_NOT</code>, which specifies whether to include or exclude the predictors that match the statement from the list, respectively. The match statement consists of a key and a value.</p> <p> <b>Filter properties</b> </p> <ul> <li> <p> <code>Condition</code> - The condition to apply. Valid values are <code>IS</code> and <code>IS_NOT</code>. To include the predictors that match the statement, specify <code>IS</code>. To exclude matching predictors, specify <code>IS_NOT</code>.</p> </li> <li> <p> <code>Key</code> - The name of the parameter to filter on. Valid values are <code>DatasetGroupArn</code> and <code>Status</code>.</p> </li> <li> <p> <code>Value</code> - The value to match.</p> </li> </ul> <p>For example, to list all predictors whose status is ACTIVE, you would specify:</p> <p> <code>"Filters": [ { "Condition": "IS", "Key": "Status", "Value": "ACTIVE" } ]</code> </p>
1143    #[serde(rename = "Filters")]
1144    #[serde(skip_serializing_if = "Option::is_none")]
1145    pub filters: Option<Vec<Filter>>,
1146    /// <p>The number of items to return in the response.</p>
1147    #[serde(rename = "MaxResults")]
1148    #[serde(skip_serializing_if = "Option::is_none")]
1149    pub max_results: Option<i64>,
1150    /// <p>If the result of the previous request was truncated, the response includes a <code>NextToken</code>. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.</p>
1151    #[serde(rename = "NextToken")]
1152    #[serde(skip_serializing_if = "Option::is_none")]
1153    pub next_token: Option<String>,
1154}
1155
1156#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1157#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1158pub struct ListPredictorsResponse {
1159    /// <p>If the response is truncated, Amazon Forecast returns this token. To retrieve the next set of results, use the token in the next request.</p>
1160    #[serde(rename = "NextToken")]
1161    #[serde(skip_serializing_if = "Option::is_none")]
1162    pub next_token: Option<String>,
1163    /// <p>An array of objects that summarize each predictor's properties.</p>
1164    #[serde(rename = "Predictors")]
1165    #[serde(skip_serializing_if = "Option::is_none")]
1166    pub predictors: Option<Vec<PredictorSummary>>,
1167}
1168
1169#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1170#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1171pub struct ListTagsForResourceRequest {
1172    /// <p>The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.</p>
1173    #[serde(rename = "ResourceArn")]
1174    pub resource_arn: String,
1175}
1176
1177#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1178#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1179pub struct ListTagsForResourceResponse {
1180    /// <p>The tags for the resource.</p>
1181    #[serde(rename = "Tags")]
1182    #[serde(skip_serializing_if = "Option::is_none")]
1183    pub tags: Option<Vec<Tag>>,
1184}
1185
1186/// <p>Provides metrics that are used to evaluate the performance of a predictor. This object is part of the <a>WindowSummary</a> object.</p>
1187#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1188#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1189pub struct Metrics {
1190    /// <p>The root mean square error (RMSE).</p>
1191    #[serde(rename = "RMSE")]
1192    #[serde(skip_serializing_if = "Option::is_none")]
1193    pub rmse: Option<f64>,
1194    /// <p>An array of weighted quantile losses. Quantiles divide a probability distribution into regions of equal probability. The distribution in this case is the loss function.</p>
1195    #[serde(rename = "WeightedQuantileLosses")]
1196    #[serde(skip_serializing_if = "Option::is_none")]
1197    pub weighted_quantile_losses: Option<Vec<WeightedQuantileLoss>>,
1198}
1199
1200/// <p>Specifies the categorical, continuous, and integer hyperparameters, and their ranges of tunable values. The range of tunable values determines which values that a hyperparameter tuning job can choose for the specified hyperparameter. This object is part of the <a>HyperParameterTuningJobConfig</a> object.</p>
1201#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1202pub struct ParameterRanges {
1203    /// <p>Specifies the tunable range for each categorical hyperparameter.</p>
1204    #[serde(rename = "CategoricalParameterRanges")]
1205    #[serde(skip_serializing_if = "Option::is_none")]
1206    pub categorical_parameter_ranges: Option<Vec<CategoricalParameterRange>>,
1207    /// <p>Specifies the tunable range for each continuous hyperparameter.</p>
1208    #[serde(rename = "ContinuousParameterRanges")]
1209    #[serde(skip_serializing_if = "Option::is_none")]
1210    pub continuous_parameter_ranges: Option<Vec<ContinuousParameterRange>>,
1211    /// <p>Specifies the tunable range for each integer hyperparameter.</p>
1212    #[serde(rename = "IntegerParameterRanges")]
1213    #[serde(skip_serializing_if = "Option::is_none")]
1214    pub integer_parameter_ranges: Option<Vec<IntegerParameterRange>>,
1215}
1216
1217/// <p>The algorithm used to perform a backtest and the status of those tests.</p>
1218#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1219#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1220pub struct PredictorExecution {
1221    /// <p>The ARN of the algorithm used to test the predictor.</p>
1222    #[serde(rename = "AlgorithmArn")]
1223    #[serde(skip_serializing_if = "Option::is_none")]
1224    pub algorithm_arn: Option<String>,
1225    /// <p>An array of test windows used to evaluate the algorithm. The <code>NumberOfBacktestWindows</code> from the object determines the number of windows in the array.</p>
1226    #[serde(rename = "TestWindows")]
1227    #[serde(skip_serializing_if = "Option::is_none")]
1228    pub test_windows: Option<Vec<TestWindowSummary>>,
1229}
1230
1231/// <p>Contains details on the backtests performed to evaluate the accuracy of the predictor. The tests are returned in descending order of accuracy, with the most accurate backtest appearing first. You specify the number of backtests to perform when you call the operation.</p>
1232#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1233#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1234pub struct PredictorExecutionDetails {
1235    /// <p>An array of the backtests performed to evaluate the accuracy of the predictor against a particular algorithm. The <code>NumberOfBacktestWindows</code> from the object determines the number of windows in the array.</p>
1236    #[serde(rename = "PredictorExecutions")]
1237    #[serde(skip_serializing_if = "Option::is_none")]
1238    pub predictor_executions: Option<Vec<PredictorExecution>>,
1239}
1240
1241/// <p>Provides a summary of the predictor properties that are used in the <a>ListPredictors</a> operation. To get the complete set of properties, call the <a>DescribePredictor</a> operation, and provide the listed <code>PredictorArn</code>.</p>
1242#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1243#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1244pub struct PredictorSummary {
1245    /// <p>When the model training task was created.</p>
1246    #[serde(rename = "CreationTime")]
1247    #[serde(skip_serializing_if = "Option::is_none")]
1248    pub creation_time: Option<f64>,
1249    /// <p>The Amazon Resource Name (ARN) of the dataset group that contains the data used to train the predictor.</p>
1250    #[serde(rename = "DatasetGroupArn")]
1251    #[serde(skip_serializing_if = "Option::is_none")]
1252    pub dataset_group_arn: Option<String>,
1253    /// <p>Initially, the same as <code>CreationTime</code> (status is <code>CREATE_PENDING</code>). Updated when training starts (status changed to <code>CREATE_IN_PROGRESS</code>), and when training is complete (status changed to <code>ACTIVE</code>) or fails (status changed to <code>CREATE_FAILED</code>).</p>
1254    #[serde(rename = "LastModificationTime")]
1255    #[serde(skip_serializing_if = "Option::is_none")]
1256    pub last_modification_time: Option<f64>,
1257    /// <p>If an error occurred, an informational message about the error.</p>
1258    #[serde(rename = "Message")]
1259    #[serde(skip_serializing_if = "Option::is_none")]
1260    pub message: Option<String>,
1261    /// <p>The ARN of the predictor.</p>
1262    #[serde(rename = "PredictorArn")]
1263    #[serde(skip_serializing_if = "Option::is_none")]
1264    pub predictor_arn: Option<String>,
1265    /// <p>The name of the predictor.</p>
1266    #[serde(rename = "PredictorName")]
1267    #[serde(skip_serializing_if = "Option::is_none")]
1268    pub predictor_name: Option<String>,
1269    /// <p><p>The status of the predictor. States include:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>PENDING</code>, <code>CREATE</em>IN<em>PROGRESS</code>, <code>CREATE</em>FAILED</code> </p> </li> <li> <p> <code>DELETE<em>PENDING</code>, <code>DELETE</em>IN<em>PROGRESS</code>, <code>DELETE</em>FAILED</code> </p> </li> <li> <p> <code>UPDATE<em>PENDING</code>, <code>UPDATE</em>IN<em>PROGRESS</code>, <code>UPDATE</em>FAILED</code> </p> </li> </ul> <note> <p>The <code>Status</code> of the predictor must be <code>ACTIVE</code> before you can use the predictor to create a forecast.</p> </note></p>
1270    #[serde(rename = "Status")]
1271    #[serde(skip_serializing_if = "Option::is_none")]
1272    pub status: Option<String>,
1273}
1274
1275/// <p>The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is part of the <a>DataSource</a> object that is submitted in the <a>CreateDatasetImportJob</a> request, and part of the <a>DataDestination</a> object that is submitted in the <a>CreateForecastExportJob</a> request.</p>
1276#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1277pub struct S3Config {
1278    /// <p>The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key.</p>
1279    #[serde(rename = "KMSKeyArn")]
1280    #[serde(skip_serializing_if = "Option::is_none")]
1281    pub kms_key_arn: Option<String>,
1282    /// <p>The path to an Amazon Simple Storage Service (Amazon S3) bucket or file(s) in an Amazon S3 bucket.</p>
1283    #[serde(rename = "Path")]
1284    pub path: String,
1285    /// <p>The ARN of the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket or files. If you provide a value for the <code>KMSKeyArn</code> key, the role must allow access to the key.</p> <p>Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an <code>InvalidInputException</code> error.</p>
1286    #[serde(rename = "RoleArn")]
1287    pub role_arn: String,
1288}
1289
1290/// <p>Defines the fields of a dataset. You specify this object in the <a>CreateDataset</a> request.</p>
1291#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1292pub struct Schema {
1293    /// <p>An array of attributes specifying the name and type of each field in a dataset.</p>
1294    #[serde(rename = "Attributes")]
1295    #[serde(skip_serializing_if = "Option::is_none")]
1296    pub attributes: Option<Vec<SchemaAttribute>>,
1297}
1298
1299/// <p>An attribute of a schema, which defines a dataset field. A schema attribute is required for every field in a dataset. The <a>Schema</a> object contains an array of <code>SchemaAttribute</code> objects.</p>
1300#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1301pub struct SchemaAttribute {
1302    /// <p>The name of the dataset field.</p>
1303    #[serde(rename = "AttributeName")]
1304    #[serde(skip_serializing_if = "Option::is_none")]
1305    pub attribute_name: Option<String>,
1306    /// <p>The data type of the field.</p>
1307    #[serde(rename = "AttributeType")]
1308    #[serde(skip_serializing_if = "Option::is_none")]
1309    pub attribute_type: Option<String>,
1310}
1311
1312/// <p>Provides statistics for each data field imported into to an Amazon Forecast dataset with the <a>CreateDatasetImportJob</a> operation.</p>
1313#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1314#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1315pub struct Statistics {
1316    /// <p>For a numeric field, the average value in the field.</p>
1317    #[serde(rename = "Avg")]
1318    #[serde(skip_serializing_if = "Option::is_none")]
1319    pub avg: Option<f64>,
1320    /// <p>The number of values in the field.</p>
1321    #[serde(rename = "Count")]
1322    #[serde(skip_serializing_if = "Option::is_none")]
1323    pub count: Option<i64>,
1324    /// <p>The number of distinct values in the field.</p>
1325    #[serde(rename = "CountDistinct")]
1326    #[serde(skip_serializing_if = "Option::is_none")]
1327    pub count_distinct: Option<i64>,
1328    /// <p>The number of NAN (not a number) values in the field.</p>
1329    #[serde(rename = "CountNan")]
1330    #[serde(skip_serializing_if = "Option::is_none")]
1331    pub count_nan: Option<i64>,
1332    /// <p>The number of null values in the field.</p>
1333    #[serde(rename = "CountNull")]
1334    #[serde(skip_serializing_if = "Option::is_none")]
1335    pub count_null: Option<i64>,
1336    /// <p>For a numeric field, the maximum value in the field.</p>
1337    #[serde(rename = "Max")]
1338    #[serde(skip_serializing_if = "Option::is_none")]
1339    pub max: Option<String>,
1340    /// <p>For a numeric field, the minimum value in the field.</p>
1341    #[serde(rename = "Min")]
1342    #[serde(skip_serializing_if = "Option::is_none")]
1343    pub min: Option<String>,
1344    /// <p>For a numeric field, the standard deviation.</p>
1345    #[serde(rename = "Stddev")]
1346    #[serde(skip_serializing_if = "Option::is_none")]
1347    pub stddev: Option<f64>,
1348}
1349
1350/// <p><p>Describes a supplementary feature of a dataset group. This object is part of the <a>InputDataConfig</a> object.</p> <p>The only supported feature is a holiday calendar. If you use the calendar, all data in the datasets should belong to the same country as the calendar. For the holiday calendar data, see the <a href="http://jollyday.sourceforge.net/data.html">Jollyday</a> web site.</p> <p>India and Korea&#39;s holidays are not included in the Jollyday library, but both are supported by Amazon Forecast. Their holidays are:</p> <p> <b>&quot;IN&quot; - INDIA</b> </p> <ul> <li> <p> <code>JANUARY 26 - REPUBLIC DAY</code> </p> </li> <li> <p> <code>AUGUST 15 - INDEPENDENCE DAY</code> </p> </li> <li> <p> <code>OCTOBER 2 GANDHI&#39;S BIRTHDAY</code> </p> </li> </ul> <p> <b>&quot;KR&quot; - KOREA</b> </p> <ul> <li> <p> <code>JANUARY 1 - NEW YEAR</code> </p> </li> <li> <p> <code>MARCH 1 - INDEPENDENCE MOVEMENT DAY</code> </p> </li> <li> <p> <code>MAY 5 - CHILDREN&#39;S DAY</code> </p> </li> <li> <p> <code>JUNE 6 - MEMORIAL DAY</code> </p> </li> <li> <p> <code>AUGUST 15 - LIBERATION DAY</code> </p> </li> <li> <p> <code>OCTOBER 3 - NATIONAL FOUNDATION DAY</code> </p> </li> <li> <p> <code>OCTOBER 9 - HANGEUL DAY</code> </p> </li> <li> <p> <code>DECEMBER 25 - CHRISTMAS DAY</code> </p> </li> </ul></p>
1351#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1352pub struct SupplementaryFeature {
1353    /// <p>The name of the feature. This must be "holiday".</p>
1354    #[serde(rename = "Name")]
1355    pub name: String,
1356    /// <p><p>One of the following 2 letter country codes:</p> <ul> <li> <p>&quot;AR&quot; - ARGENTINA</p> </li> <li> <p>&quot;AT&quot; - AUSTRIA</p> </li> <li> <p>&quot;AU&quot; - AUSTRALIA</p> </li> <li> <p>&quot;BE&quot; - BELGIUM</p> </li> <li> <p>&quot;BR&quot; - BRAZIL</p> </li> <li> <p>&quot;CA&quot; - CANADA</p> </li> <li> <p>&quot;CN&quot; - CHINA</p> </li> <li> <p>&quot;CZ&quot; - CZECH REPUBLIC</p> </li> <li> <p>&quot;DK&quot; - DENMARK</p> </li> <li> <p>&quot;EC&quot; - ECUADOR</p> </li> <li> <p>&quot;FI&quot; - FINLAND</p> </li> <li> <p>&quot;FR&quot; - FRANCE</p> </li> <li> <p>&quot;DE&quot; - GERMANY</p> </li> <li> <p>&quot;HU&quot; - HUNGARY</p> </li> <li> <p>&quot;IE&quot; - IRELAND</p> </li> <li> <p>&quot;IN&quot; - INDIA</p> </li> <li> <p>&quot;IT&quot; - ITALY</p> </li> <li> <p>&quot;JP&quot; - JAPAN</p> </li> <li> <p>&quot;KR&quot; - KOREA</p> </li> <li> <p>&quot;LU&quot; - LUXEMBOURG</p> </li> <li> <p>&quot;MX&quot; - MEXICO</p> </li> <li> <p>&quot;NL&quot; - NETHERLANDS</p> </li> <li> <p>&quot;NO&quot; - NORWAY</p> </li> <li> <p>&quot;PL&quot; - POLAND</p> </li> <li> <p>&quot;PT&quot; - PORTUGAL</p> </li> <li> <p>&quot;RU&quot; - RUSSIA</p> </li> <li> <p>&quot;ZA&quot; - SOUTH AFRICA</p> </li> <li> <p>&quot;ES&quot; - SPAIN</p> </li> <li> <p>&quot;SE&quot; - SWEDEN</p> </li> <li> <p>&quot;CH&quot; - SWITZERLAND</p> </li> <li> <p>&quot;US&quot; - UNITED STATES</p> </li> <li> <p>&quot;UK&quot; - UNITED KINGDOM</p> </li> </ul></p>
1357    #[serde(rename = "Value")]
1358    pub value: String,
1359}
1360
1361/// <p><p>The optional metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
1362#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1363pub struct Tag {
1364    /// <p>One part of a key-value pair that makes up a tag. A <code>key</code> is a general label that acts like a category for more specific tag values.</p>
1365    #[serde(rename = "Key")]
1366    pub key: String,
1367    /// <p>The optional part of a key-value pair that makes up a tag. A <code>value</code> acts as a descriptor within a tag category (key).</p>
1368    #[serde(rename = "Value")]
1369    pub value: String,
1370}
1371
1372#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1373#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1374pub struct TagResourceRequest {
1375    /// <p>The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.</p>
1376    #[serde(rename = "ResourceArn")]
1377    pub resource_arn: String,
1378    /// <p><p>The tags to add to the resource. A tag is an array of key-value pairs.</p> <p>The following basic restrictions apply to tags:</p> <ul> <li> <p>Maximum number of tags per resource - 50.</p> </li> <li> <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p> </li> <li> <p>Maximum key length - 128 Unicode characters in UTF-8.</p> </li> <li> <p>Maximum value length - 256 Unicode characters in UTF-8.</p> </li> <li> <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p> </li> <li> <p>Tag keys and values are case sensitive.</p> </li> <li> <p>Do not use <code>aws:</code>, <code>AWS:</code>, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has <code>aws</code> as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of <code>aws</code> do not count against your tags per resource limit.</p> </li> </ul></p>
1379    #[serde(rename = "Tags")]
1380    pub tags: Vec<Tag>,
1381}
1382
1383#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1384#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1385pub struct TagResourceResponse {}
1386
1387/// <p>The status, start time, and end time of a backtest, as well as a failure reason if applicable.</p>
1388#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1389#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1390pub struct TestWindowSummary {
1391    /// <p>If the test failed, the reason why it failed.</p>
1392    #[serde(rename = "Message")]
1393    #[serde(skip_serializing_if = "Option::is_none")]
1394    pub message: Option<String>,
1395    /// <p><p>The status of the test. Possible status values are:</p> <ul> <li> <p> <code>ACTIVE</code> </p> </li> <li> <p> <code>CREATE<em>IN</em>PROGRESS</code> </p> </li> <li> <p> <code>CREATE_FAILED</code> </p> </li> </ul></p>
1396    #[serde(rename = "Status")]
1397    #[serde(skip_serializing_if = "Option::is_none")]
1398    pub status: Option<String>,
1399    /// <p>The time at which the test ended.</p>
1400    #[serde(rename = "TestWindowEnd")]
1401    #[serde(skip_serializing_if = "Option::is_none")]
1402    pub test_window_end: Option<f64>,
1403    /// <p>The time at which the test began.</p>
1404    #[serde(rename = "TestWindowStart")]
1405    #[serde(skip_serializing_if = "Option::is_none")]
1406    pub test_window_start: Option<f64>,
1407}
1408
1409#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1410#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1411pub struct UntagResourceRequest {
1412    /// <p>The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast exports.</p>
1413    #[serde(rename = "ResourceArn")]
1414    pub resource_arn: String,
1415    /// <p>The keys of the tags to be removed.</p>
1416    #[serde(rename = "TagKeys")]
1417    pub tag_keys: Vec<String>,
1418}
1419
1420#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1421#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1422pub struct UntagResourceResponse {}
1423
1424#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1425#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1426pub struct UpdateDatasetGroupRequest {
1427    /// <p>An array of the Amazon Resource Names (ARNs) of the datasets to add to the dataset group.</p>
1428    #[serde(rename = "DatasetArns")]
1429    pub dataset_arns: Vec<String>,
1430    /// <p>The ARN of the dataset group.</p>
1431    #[serde(rename = "DatasetGroupArn")]
1432    pub dataset_group_arn: String,
1433}
1434
1435#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1436#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1437pub struct UpdateDatasetGroupResponse {}
1438
1439/// <p>The weighted loss value for a quantile. This object is part of the <a>Metrics</a> object.</p>
1440#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1441#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1442pub struct WeightedQuantileLoss {
1443    /// <p>The difference between the predicted value and the actual value over the quantile, weighted (normalized) by dividing by the sum over all quantiles.</p>
1444    #[serde(rename = "LossValue")]
1445    #[serde(skip_serializing_if = "Option::is_none")]
1446    pub loss_value: Option<f64>,
1447    /// <p>The quantile. Quantiles divide a probability distribution into regions of equal probability. For example, if the distribution was divided into 5 regions of equal probability, the quantiles would be 0.2, 0.4, 0.6, and 0.8.</p>
1448    #[serde(rename = "Quantile")]
1449    #[serde(skip_serializing_if = "Option::is_none")]
1450    pub quantile: Option<f64>,
1451}
1452
1453/// <p>The metrics for a time range within the evaluation portion of a dataset. This object is part of the <a>EvaluationResult</a> object.</p> <p>The <code>TestWindowStart</code> and <code>TestWindowEnd</code> parameters are determined by the <code>BackTestWindowOffset</code> parameter of the <a>EvaluationParameters</a> object.</p>
1454#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1455#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1456pub struct WindowSummary {
1457    /// <p><p>The type of evaluation.</p> <ul> <li> <p> <code>SUMMARY</code> - The average metrics across all windows.</p> </li> <li> <p> <code>COMPUTED</code> - The metrics for the specified window.</p> </li> </ul></p>
1458    #[serde(rename = "EvaluationType")]
1459    #[serde(skip_serializing_if = "Option::is_none")]
1460    pub evaluation_type: Option<String>,
1461    /// <p>The number of data points within the window.</p>
1462    #[serde(rename = "ItemCount")]
1463    #[serde(skip_serializing_if = "Option::is_none")]
1464    pub item_count: Option<i64>,
1465    /// <p>Provides metrics used to evaluate the performance of a predictor.</p>
1466    #[serde(rename = "Metrics")]
1467    #[serde(skip_serializing_if = "Option::is_none")]
1468    pub metrics: Option<Metrics>,
1469    /// <p>The timestamp that defines the end of the window.</p>
1470    #[serde(rename = "TestWindowEnd")]
1471    #[serde(skip_serializing_if = "Option::is_none")]
1472    pub test_window_end: Option<f64>,
1473    /// <p>The timestamp that defines the start of the window.</p>
1474    #[serde(rename = "TestWindowStart")]
1475    #[serde(skip_serializing_if = "Option::is_none")]
1476    pub test_window_start: Option<f64>,
1477}
1478
1479/// Errors returned by CreateDataset
1480#[derive(Debug, PartialEq)]
1481pub enum CreateDatasetError {
1482    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1483    InvalidInput(String),
1484    /// <p>The limit on the number of resources per account has been exceeded.</p>
1485    LimitExceeded(String),
1486    /// <p>There is already a resource with this name. Try again with a different name.</p>
1487    ResourceAlreadyExists(String),
1488}
1489
1490impl CreateDatasetError {
1491    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateDatasetError> {
1492        if let Some(err) = proto::json::Error::parse(&res) {
1493            match err.typ.as_str() {
1494                "InvalidInputException" => {
1495                    return RusotoError::Service(CreateDatasetError::InvalidInput(err.msg))
1496                }
1497                "LimitExceededException" => {
1498                    return RusotoError::Service(CreateDatasetError::LimitExceeded(err.msg))
1499                }
1500                "ResourceAlreadyExistsException" => {
1501                    return RusotoError::Service(CreateDatasetError::ResourceAlreadyExists(err.msg))
1502                }
1503                "ValidationException" => return RusotoError::Validation(err.msg),
1504                _ => {}
1505            }
1506        }
1507        RusotoError::Unknown(res)
1508    }
1509}
1510impl fmt::Display for CreateDatasetError {
1511    #[allow(unused_variables)]
1512    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1513        match *self {
1514            CreateDatasetError::InvalidInput(ref cause) => write!(f, "{}", cause),
1515            CreateDatasetError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1516            CreateDatasetError::ResourceAlreadyExists(ref cause) => write!(f, "{}", cause),
1517        }
1518    }
1519}
1520impl Error for CreateDatasetError {}
1521/// Errors returned by CreateDatasetGroup
1522#[derive(Debug, PartialEq)]
1523pub enum CreateDatasetGroupError {
1524    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1525    InvalidInput(String),
1526    /// <p>The limit on the number of resources per account has been exceeded.</p>
1527    LimitExceeded(String),
1528    /// <p>There is already a resource with this name. Try again with a different name.</p>
1529    ResourceAlreadyExists(String),
1530    /// <p>The specified resource is in use.</p>
1531    ResourceInUse(String),
1532    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1533    ResourceNotFound(String),
1534}
1535
1536impl CreateDatasetGroupError {
1537    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateDatasetGroupError> {
1538        if let Some(err) = proto::json::Error::parse(&res) {
1539            match err.typ.as_str() {
1540                "InvalidInputException" => {
1541                    return RusotoError::Service(CreateDatasetGroupError::InvalidInput(err.msg))
1542                }
1543                "LimitExceededException" => {
1544                    return RusotoError::Service(CreateDatasetGroupError::LimitExceeded(err.msg))
1545                }
1546                "ResourceAlreadyExistsException" => {
1547                    return RusotoError::Service(CreateDatasetGroupError::ResourceAlreadyExists(
1548                        err.msg,
1549                    ))
1550                }
1551                "ResourceInUseException" => {
1552                    return RusotoError::Service(CreateDatasetGroupError::ResourceInUse(err.msg))
1553                }
1554                "ResourceNotFoundException" => {
1555                    return RusotoError::Service(CreateDatasetGroupError::ResourceNotFound(err.msg))
1556                }
1557                "ValidationException" => return RusotoError::Validation(err.msg),
1558                _ => {}
1559            }
1560        }
1561        RusotoError::Unknown(res)
1562    }
1563}
1564impl fmt::Display for CreateDatasetGroupError {
1565    #[allow(unused_variables)]
1566    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1567        match *self {
1568            CreateDatasetGroupError::InvalidInput(ref cause) => write!(f, "{}", cause),
1569            CreateDatasetGroupError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1570            CreateDatasetGroupError::ResourceAlreadyExists(ref cause) => write!(f, "{}", cause),
1571            CreateDatasetGroupError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1572            CreateDatasetGroupError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1573        }
1574    }
1575}
1576impl Error for CreateDatasetGroupError {}
1577/// Errors returned by CreateDatasetImportJob
1578#[derive(Debug, PartialEq)]
1579pub enum CreateDatasetImportJobError {
1580    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1581    InvalidInput(String),
1582    /// <p>The limit on the number of resources per account has been exceeded.</p>
1583    LimitExceeded(String),
1584    /// <p>There is already a resource with this name. Try again with a different name.</p>
1585    ResourceAlreadyExists(String),
1586    /// <p>The specified resource is in use.</p>
1587    ResourceInUse(String),
1588    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1589    ResourceNotFound(String),
1590}
1591
1592impl CreateDatasetImportJobError {
1593    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateDatasetImportJobError> {
1594        if let Some(err) = proto::json::Error::parse(&res) {
1595            match err.typ.as_str() {
1596                "InvalidInputException" => {
1597                    return RusotoError::Service(CreateDatasetImportJobError::InvalidInput(err.msg))
1598                }
1599                "LimitExceededException" => {
1600                    return RusotoError::Service(CreateDatasetImportJobError::LimitExceeded(
1601                        err.msg,
1602                    ))
1603                }
1604                "ResourceAlreadyExistsException" => {
1605                    return RusotoError::Service(
1606                        CreateDatasetImportJobError::ResourceAlreadyExists(err.msg),
1607                    )
1608                }
1609                "ResourceInUseException" => {
1610                    return RusotoError::Service(CreateDatasetImportJobError::ResourceInUse(
1611                        err.msg,
1612                    ))
1613                }
1614                "ResourceNotFoundException" => {
1615                    return RusotoError::Service(CreateDatasetImportJobError::ResourceNotFound(
1616                        err.msg,
1617                    ))
1618                }
1619                "ValidationException" => return RusotoError::Validation(err.msg),
1620                _ => {}
1621            }
1622        }
1623        RusotoError::Unknown(res)
1624    }
1625}
1626impl fmt::Display for CreateDatasetImportJobError {
1627    #[allow(unused_variables)]
1628    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1629        match *self {
1630            CreateDatasetImportJobError::InvalidInput(ref cause) => write!(f, "{}", cause),
1631            CreateDatasetImportJobError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1632            CreateDatasetImportJobError::ResourceAlreadyExists(ref cause) => write!(f, "{}", cause),
1633            CreateDatasetImportJobError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1634            CreateDatasetImportJobError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1635        }
1636    }
1637}
1638impl Error for CreateDatasetImportJobError {}
1639/// Errors returned by CreateForecast
1640#[derive(Debug, PartialEq)]
1641pub enum CreateForecastError {
1642    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1643    InvalidInput(String),
1644    /// <p>The limit on the number of resources per account has been exceeded.</p>
1645    LimitExceeded(String),
1646    /// <p>There is already a resource with this name. Try again with a different name.</p>
1647    ResourceAlreadyExists(String),
1648    /// <p>The specified resource is in use.</p>
1649    ResourceInUse(String),
1650    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1651    ResourceNotFound(String),
1652}
1653
1654impl CreateForecastError {
1655    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateForecastError> {
1656        if let Some(err) = proto::json::Error::parse(&res) {
1657            match err.typ.as_str() {
1658                "InvalidInputException" => {
1659                    return RusotoError::Service(CreateForecastError::InvalidInput(err.msg))
1660                }
1661                "LimitExceededException" => {
1662                    return RusotoError::Service(CreateForecastError::LimitExceeded(err.msg))
1663                }
1664                "ResourceAlreadyExistsException" => {
1665                    return RusotoError::Service(CreateForecastError::ResourceAlreadyExists(
1666                        err.msg,
1667                    ))
1668                }
1669                "ResourceInUseException" => {
1670                    return RusotoError::Service(CreateForecastError::ResourceInUse(err.msg))
1671                }
1672                "ResourceNotFoundException" => {
1673                    return RusotoError::Service(CreateForecastError::ResourceNotFound(err.msg))
1674                }
1675                "ValidationException" => return RusotoError::Validation(err.msg),
1676                _ => {}
1677            }
1678        }
1679        RusotoError::Unknown(res)
1680    }
1681}
1682impl fmt::Display for CreateForecastError {
1683    #[allow(unused_variables)]
1684    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1685        match *self {
1686            CreateForecastError::InvalidInput(ref cause) => write!(f, "{}", cause),
1687            CreateForecastError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1688            CreateForecastError::ResourceAlreadyExists(ref cause) => write!(f, "{}", cause),
1689            CreateForecastError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1690            CreateForecastError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1691        }
1692    }
1693}
1694impl Error for CreateForecastError {}
1695/// Errors returned by CreateForecastExportJob
1696#[derive(Debug, PartialEq)]
1697pub enum CreateForecastExportJobError {
1698    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1699    InvalidInput(String),
1700    /// <p>The limit on the number of resources per account has been exceeded.</p>
1701    LimitExceeded(String),
1702    /// <p>There is already a resource with this name. Try again with a different name.</p>
1703    ResourceAlreadyExists(String),
1704    /// <p>The specified resource is in use.</p>
1705    ResourceInUse(String),
1706    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1707    ResourceNotFound(String),
1708}
1709
1710impl CreateForecastExportJobError {
1711    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateForecastExportJobError> {
1712        if let Some(err) = proto::json::Error::parse(&res) {
1713            match err.typ.as_str() {
1714                "InvalidInputException" => {
1715                    return RusotoError::Service(CreateForecastExportJobError::InvalidInput(
1716                        err.msg,
1717                    ))
1718                }
1719                "LimitExceededException" => {
1720                    return RusotoError::Service(CreateForecastExportJobError::LimitExceeded(
1721                        err.msg,
1722                    ))
1723                }
1724                "ResourceAlreadyExistsException" => {
1725                    return RusotoError::Service(
1726                        CreateForecastExportJobError::ResourceAlreadyExists(err.msg),
1727                    )
1728                }
1729                "ResourceInUseException" => {
1730                    return RusotoError::Service(CreateForecastExportJobError::ResourceInUse(
1731                        err.msg,
1732                    ))
1733                }
1734                "ResourceNotFoundException" => {
1735                    return RusotoError::Service(CreateForecastExportJobError::ResourceNotFound(
1736                        err.msg,
1737                    ))
1738                }
1739                "ValidationException" => return RusotoError::Validation(err.msg),
1740                _ => {}
1741            }
1742        }
1743        RusotoError::Unknown(res)
1744    }
1745}
1746impl fmt::Display for CreateForecastExportJobError {
1747    #[allow(unused_variables)]
1748    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1749        match *self {
1750            CreateForecastExportJobError::InvalidInput(ref cause) => write!(f, "{}", cause),
1751            CreateForecastExportJobError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1752            CreateForecastExportJobError::ResourceAlreadyExists(ref cause) => {
1753                write!(f, "{}", cause)
1754            }
1755            CreateForecastExportJobError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1756            CreateForecastExportJobError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1757        }
1758    }
1759}
1760impl Error for CreateForecastExportJobError {}
1761/// Errors returned by CreatePredictor
1762#[derive(Debug, PartialEq)]
1763pub enum CreatePredictorError {
1764    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1765    InvalidInput(String),
1766    /// <p>The limit on the number of resources per account has been exceeded.</p>
1767    LimitExceeded(String),
1768    /// <p>There is already a resource with this name. Try again with a different name.</p>
1769    ResourceAlreadyExists(String),
1770    /// <p>The specified resource is in use.</p>
1771    ResourceInUse(String),
1772    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1773    ResourceNotFound(String),
1774}
1775
1776impl CreatePredictorError {
1777    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreatePredictorError> {
1778        if let Some(err) = proto::json::Error::parse(&res) {
1779            match err.typ.as_str() {
1780                "InvalidInputException" => {
1781                    return RusotoError::Service(CreatePredictorError::InvalidInput(err.msg))
1782                }
1783                "LimitExceededException" => {
1784                    return RusotoError::Service(CreatePredictorError::LimitExceeded(err.msg))
1785                }
1786                "ResourceAlreadyExistsException" => {
1787                    return RusotoError::Service(CreatePredictorError::ResourceAlreadyExists(
1788                        err.msg,
1789                    ))
1790                }
1791                "ResourceInUseException" => {
1792                    return RusotoError::Service(CreatePredictorError::ResourceInUse(err.msg))
1793                }
1794                "ResourceNotFoundException" => {
1795                    return RusotoError::Service(CreatePredictorError::ResourceNotFound(err.msg))
1796                }
1797                "ValidationException" => return RusotoError::Validation(err.msg),
1798                _ => {}
1799            }
1800        }
1801        RusotoError::Unknown(res)
1802    }
1803}
1804impl fmt::Display for CreatePredictorError {
1805    #[allow(unused_variables)]
1806    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1807        match *self {
1808            CreatePredictorError::InvalidInput(ref cause) => write!(f, "{}", cause),
1809            CreatePredictorError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1810            CreatePredictorError::ResourceAlreadyExists(ref cause) => write!(f, "{}", cause),
1811            CreatePredictorError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1812            CreatePredictorError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1813        }
1814    }
1815}
1816impl Error for CreatePredictorError {}
1817/// Errors returned by DeleteDataset
1818#[derive(Debug, PartialEq)]
1819pub enum DeleteDatasetError {
1820    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1821    InvalidInput(String),
1822    /// <p>The specified resource is in use.</p>
1823    ResourceInUse(String),
1824    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1825    ResourceNotFound(String),
1826}
1827
1828impl DeleteDatasetError {
1829    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteDatasetError> {
1830        if let Some(err) = proto::json::Error::parse(&res) {
1831            match err.typ.as_str() {
1832                "InvalidInputException" => {
1833                    return RusotoError::Service(DeleteDatasetError::InvalidInput(err.msg))
1834                }
1835                "ResourceInUseException" => {
1836                    return RusotoError::Service(DeleteDatasetError::ResourceInUse(err.msg))
1837                }
1838                "ResourceNotFoundException" => {
1839                    return RusotoError::Service(DeleteDatasetError::ResourceNotFound(err.msg))
1840                }
1841                "ValidationException" => return RusotoError::Validation(err.msg),
1842                _ => {}
1843            }
1844        }
1845        RusotoError::Unknown(res)
1846    }
1847}
1848impl fmt::Display for DeleteDatasetError {
1849    #[allow(unused_variables)]
1850    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1851        match *self {
1852            DeleteDatasetError::InvalidInput(ref cause) => write!(f, "{}", cause),
1853            DeleteDatasetError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1854            DeleteDatasetError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1855        }
1856    }
1857}
1858impl Error for DeleteDatasetError {}
1859/// Errors returned by DeleteDatasetGroup
1860#[derive(Debug, PartialEq)]
1861pub enum DeleteDatasetGroupError {
1862    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1863    InvalidInput(String),
1864    /// <p>The specified resource is in use.</p>
1865    ResourceInUse(String),
1866    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1867    ResourceNotFound(String),
1868}
1869
1870impl DeleteDatasetGroupError {
1871    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteDatasetGroupError> {
1872        if let Some(err) = proto::json::Error::parse(&res) {
1873            match err.typ.as_str() {
1874                "InvalidInputException" => {
1875                    return RusotoError::Service(DeleteDatasetGroupError::InvalidInput(err.msg))
1876                }
1877                "ResourceInUseException" => {
1878                    return RusotoError::Service(DeleteDatasetGroupError::ResourceInUse(err.msg))
1879                }
1880                "ResourceNotFoundException" => {
1881                    return RusotoError::Service(DeleteDatasetGroupError::ResourceNotFound(err.msg))
1882                }
1883                "ValidationException" => return RusotoError::Validation(err.msg),
1884                _ => {}
1885            }
1886        }
1887        RusotoError::Unknown(res)
1888    }
1889}
1890impl fmt::Display for DeleteDatasetGroupError {
1891    #[allow(unused_variables)]
1892    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1893        match *self {
1894            DeleteDatasetGroupError::InvalidInput(ref cause) => write!(f, "{}", cause),
1895            DeleteDatasetGroupError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1896            DeleteDatasetGroupError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1897        }
1898    }
1899}
1900impl Error for DeleteDatasetGroupError {}
1901/// Errors returned by DeleteDatasetImportJob
1902#[derive(Debug, PartialEq)]
1903pub enum DeleteDatasetImportJobError {
1904    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1905    InvalidInput(String),
1906    /// <p>The specified resource is in use.</p>
1907    ResourceInUse(String),
1908    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1909    ResourceNotFound(String),
1910}
1911
1912impl DeleteDatasetImportJobError {
1913    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteDatasetImportJobError> {
1914        if let Some(err) = proto::json::Error::parse(&res) {
1915            match err.typ.as_str() {
1916                "InvalidInputException" => {
1917                    return RusotoError::Service(DeleteDatasetImportJobError::InvalidInput(err.msg))
1918                }
1919                "ResourceInUseException" => {
1920                    return RusotoError::Service(DeleteDatasetImportJobError::ResourceInUse(
1921                        err.msg,
1922                    ))
1923                }
1924                "ResourceNotFoundException" => {
1925                    return RusotoError::Service(DeleteDatasetImportJobError::ResourceNotFound(
1926                        err.msg,
1927                    ))
1928                }
1929                "ValidationException" => return RusotoError::Validation(err.msg),
1930                _ => {}
1931            }
1932        }
1933        RusotoError::Unknown(res)
1934    }
1935}
1936impl fmt::Display for DeleteDatasetImportJobError {
1937    #[allow(unused_variables)]
1938    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1939        match *self {
1940            DeleteDatasetImportJobError::InvalidInput(ref cause) => write!(f, "{}", cause),
1941            DeleteDatasetImportJobError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1942            DeleteDatasetImportJobError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1943        }
1944    }
1945}
1946impl Error for DeleteDatasetImportJobError {}
1947/// Errors returned by DeleteForecast
1948#[derive(Debug, PartialEq)]
1949pub enum DeleteForecastError {
1950    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1951    InvalidInput(String),
1952    /// <p>The specified resource is in use.</p>
1953    ResourceInUse(String),
1954    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1955    ResourceNotFound(String),
1956}
1957
1958impl DeleteForecastError {
1959    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteForecastError> {
1960        if let Some(err) = proto::json::Error::parse(&res) {
1961            match err.typ.as_str() {
1962                "InvalidInputException" => {
1963                    return RusotoError::Service(DeleteForecastError::InvalidInput(err.msg))
1964                }
1965                "ResourceInUseException" => {
1966                    return RusotoError::Service(DeleteForecastError::ResourceInUse(err.msg))
1967                }
1968                "ResourceNotFoundException" => {
1969                    return RusotoError::Service(DeleteForecastError::ResourceNotFound(err.msg))
1970                }
1971                "ValidationException" => return RusotoError::Validation(err.msg),
1972                _ => {}
1973            }
1974        }
1975        RusotoError::Unknown(res)
1976    }
1977}
1978impl fmt::Display for DeleteForecastError {
1979    #[allow(unused_variables)]
1980    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1981        match *self {
1982            DeleteForecastError::InvalidInput(ref cause) => write!(f, "{}", cause),
1983            DeleteForecastError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1984            DeleteForecastError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1985        }
1986    }
1987}
1988impl Error for DeleteForecastError {}
1989/// Errors returned by DeleteForecastExportJob
1990#[derive(Debug, PartialEq)]
1991pub enum DeleteForecastExportJobError {
1992    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
1993    InvalidInput(String),
1994    /// <p>The specified resource is in use.</p>
1995    ResourceInUse(String),
1996    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
1997    ResourceNotFound(String),
1998}
1999
2000impl DeleteForecastExportJobError {
2001    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteForecastExportJobError> {
2002        if let Some(err) = proto::json::Error::parse(&res) {
2003            match err.typ.as_str() {
2004                "InvalidInputException" => {
2005                    return RusotoError::Service(DeleteForecastExportJobError::InvalidInput(
2006                        err.msg,
2007                    ))
2008                }
2009                "ResourceInUseException" => {
2010                    return RusotoError::Service(DeleteForecastExportJobError::ResourceInUse(
2011                        err.msg,
2012                    ))
2013                }
2014                "ResourceNotFoundException" => {
2015                    return RusotoError::Service(DeleteForecastExportJobError::ResourceNotFound(
2016                        err.msg,
2017                    ))
2018                }
2019                "ValidationException" => return RusotoError::Validation(err.msg),
2020                _ => {}
2021            }
2022        }
2023        RusotoError::Unknown(res)
2024    }
2025}
2026impl fmt::Display for DeleteForecastExportJobError {
2027    #[allow(unused_variables)]
2028    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2029        match *self {
2030            DeleteForecastExportJobError::InvalidInput(ref cause) => write!(f, "{}", cause),
2031            DeleteForecastExportJobError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2032            DeleteForecastExportJobError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2033        }
2034    }
2035}
2036impl Error for DeleteForecastExportJobError {}
2037/// Errors returned by DeletePredictor
2038#[derive(Debug, PartialEq)]
2039pub enum DeletePredictorError {
2040    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2041    InvalidInput(String),
2042    /// <p>The specified resource is in use.</p>
2043    ResourceInUse(String),
2044    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2045    ResourceNotFound(String),
2046}
2047
2048impl DeletePredictorError {
2049    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeletePredictorError> {
2050        if let Some(err) = proto::json::Error::parse(&res) {
2051            match err.typ.as_str() {
2052                "InvalidInputException" => {
2053                    return RusotoError::Service(DeletePredictorError::InvalidInput(err.msg))
2054                }
2055                "ResourceInUseException" => {
2056                    return RusotoError::Service(DeletePredictorError::ResourceInUse(err.msg))
2057                }
2058                "ResourceNotFoundException" => {
2059                    return RusotoError::Service(DeletePredictorError::ResourceNotFound(err.msg))
2060                }
2061                "ValidationException" => return RusotoError::Validation(err.msg),
2062                _ => {}
2063            }
2064        }
2065        RusotoError::Unknown(res)
2066    }
2067}
2068impl fmt::Display for DeletePredictorError {
2069    #[allow(unused_variables)]
2070    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2071        match *self {
2072            DeletePredictorError::InvalidInput(ref cause) => write!(f, "{}", cause),
2073            DeletePredictorError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2074            DeletePredictorError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2075        }
2076    }
2077}
2078impl Error for DeletePredictorError {}
2079/// Errors returned by DescribeDataset
2080#[derive(Debug, PartialEq)]
2081pub enum DescribeDatasetError {
2082    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2083    InvalidInput(String),
2084    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2085    ResourceNotFound(String),
2086}
2087
2088impl DescribeDatasetError {
2089    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeDatasetError> {
2090        if let Some(err) = proto::json::Error::parse(&res) {
2091            match err.typ.as_str() {
2092                "InvalidInputException" => {
2093                    return RusotoError::Service(DescribeDatasetError::InvalidInput(err.msg))
2094                }
2095                "ResourceNotFoundException" => {
2096                    return RusotoError::Service(DescribeDatasetError::ResourceNotFound(err.msg))
2097                }
2098                "ValidationException" => return RusotoError::Validation(err.msg),
2099                _ => {}
2100            }
2101        }
2102        RusotoError::Unknown(res)
2103    }
2104}
2105impl fmt::Display for DescribeDatasetError {
2106    #[allow(unused_variables)]
2107    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2108        match *self {
2109            DescribeDatasetError::InvalidInput(ref cause) => write!(f, "{}", cause),
2110            DescribeDatasetError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2111        }
2112    }
2113}
2114impl Error for DescribeDatasetError {}
2115/// Errors returned by DescribeDatasetGroup
2116#[derive(Debug, PartialEq)]
2117pub enum DescribeDatasetGroupError {
2118    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2119    InvalidInput(String),
2120    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2121    ResourceNotFound(String),
2122}
2123
2124impl DescribeDatasetGroupError {
2125    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeDatasetGroupError> {
2126        if let Some(err) = proto::json::Error::parse(&res) {
2127            match err.typ.as_str() {
2128                "InvalidInputException" => {
2129                    return RusotoError::Service(DescribeDatasetGroupError::InvalidInput(err.msg))
2130                }
2131                "ResourceNotFoundException" => {
2132                    return RusotoError::Service(DescribeDatasetGroupError::ResourceNotFound(
2133                        err.msg,
2134                    ))
2135                }
2136                "ValidationException" => return RusotoError::Validation(err.msg),
2137                _ => {}
2138            }
2139        }
2140        RusotoError::Unknown(res)
2141    }
2142}
2143impl fmt::Display for DescribeDatasetGroupError {
2144    #[allow(unused_variables)]
2145    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2146        match *self {
2147            DescribeDatasetGroupError::InvalidInput(ref cause) => write!(f, "{}", cause),
2148            DescribeDatasetGroupError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2149        }
2150    }
2151}
2152impl Error for DescribeDatasetGroupError {}
2153/// Errors returned by DescribeDatasetImportJob
2154#[derive(Debug, PartialEq)]
2155pub enum DescribeDatasetImportJobError {
2156    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2157    InvalidInput(String),
2158    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2159    ResourceNotFound(String),
2160}
2161
2162impl DescribeDatasetImportJobError {
2163    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeDatasetImportJobError> {
2164        if let Some(err) = proto::json::Error::parse(&res) {
2165            match err.typ.as_str() {
2166                "InvalidInputException" => {
2167                    return RusotoError::Service(DescribeDatasetImportJobError::InvalidInput(
2168                        err.msg,
2169                    ))
2170                }
2171                "ResourceNotFoundException" => {
2172                    return RusotoError::Service(DescribeDatasetImportJobError::ResourceNotFound(
2173                        err.msg,
2174                    ))
2175                }
2176                "ValidationException" => return RusotoError::Validation(err.msg),
2177                _ => {}
2178            }
2179        }
2180        RusotoError::Unknown(res)
2181    }
2182}
2183impl fmt::Display for DescribeDatasetImportJobError {
2184    #[allow(unused_variables)]
2185    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2186        match *self {
2187            DescribeDatasetImportJobError::InvalidInput(ref cause) => write!(f, "{}", cause),
2188            DescribeDatasetImportJobError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2189        }
2190    }
2191}
2192impl Error for DescribeDatasetImportJobError {}
2193/// Errors returned by DescribeForecast
2194#[derive(Debug, PartialEq)]
2195pub enum DescribeForecastError {
2196    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2197    InvalidInput(String),
2198    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2199    ResourceNotFound(String),
2200}
2201
2202impl DescribeForecastError {
2203    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeForecastError> {
2204        if let Some(err) = proto::json::Error::parse(&res) {
2205            match err.typ.as_str() {
2206                "InvalidInputException" => {
2207                    return RusotoError::Service(DescribeForecastError::InvalidInput(err.msg))
2208                }
2209                "ResourceNotFoundException" => {
2210                    return RusotoError::Service(DescribeForecastError::ResourceNotFound(err.msg))
2211                }
2212                "ValidationException" => return RusotoError::Validation(err.msg),
2213                _ => {}
2214            }
2215        }
2216        RusotoError::Unknown(res)
2217    }
2218}
2219impl fmt::Display for DescribeForecastError {
2220    #[allow(unused_variables)]
2221    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2222        match *self {
2223            DescribeForecastError::InvalidInput(ref cause) => write!(f, "{}", cause),
2224            DescribeForecastError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2225        }
2226    }
2227}
2228impl Error for DescribeForecastError {}
2229/// Errors returned by DescribeForecastExportJob
2230#[derive(Debug, PartialEq)]
2231pub enum DescribeForecastExportJobError {
2232    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2233    InvalidInput(String),
2234    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2235    ResourceNotFound(String),
2236}
2237
2238impl DescribeForecastExportJobError {
2239    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeForecastExportJobError> {
2240        if let Some(err) = proto::json::Error::parse(&res) {
2241            match err.typ.as_str() {
2242                "InvalidInputException" => {
2243                    return RusotoError::Service(DescribeForecastExportJobError::InvalidInput(
2244                        err.msg,
2245                    ))
2246                }
2247                "ResourceNotFoundException" => {
2248                    return RusotoError::Service(DescribeForecastExportJobError::ResourceNotFound(
2249                        err.msg,
2250                    ))
2251                }
2252                "ValidationException" => return RusotoError::Validation(err.msg),
2253                _ => {}
2254            }
2255        }
2256        RusotoError::Unknown(res)
2257    }
2258}
2259impl fmt::Display for DescribeForecastExportJobError {
2260    #[allow(unused_variables)]
2261    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2262        match *self {
2263            DescribeForecastExportJobError::InvalidInput(ref cause) => write!(f, "{}", cause),
2264            DescribeForecastExportJobError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2265        }
2266    }
2267}
2268impl Error for DescribeForecastExportJobError {}
2269/// Errors returned by DescribePredictor
2270#[derive(Debug, PartialEq)]
2271pub enum DescribePredictorError {
2272    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2273    InvalidInput(String),
2274    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2275    ResourceNotFound(String),
2276}
2277
2278impl DescribePredictorError {
2279    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribePredictorError> {
2280        if let Some(err) = proto::json::Error::parse(&res) {
2281            match err.typ.as_str() {
2282                "InvalidInputException" => {
2283                    return RusotoError::Service(DescribePredictorError::InvalidInput(err.msg))
2284                }
2285                "ResourceNotFoundException" => {
2286                    return RusotoError::Service(DescribePredictorError::ResourceNotFound(err.msg))
2287                }
2288                "ValidationException" => return RusotoError::Validation(err.msg),
2289                _ => {}
2290            }
2291        }
2292        RusotoError::Unknown(res)
2293    }
2294}
2295impl fmt::Display for DescribePredictorError {
2296    #[allow(unused_variables)]
2297    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2298        match *self {
2299            DescribePredictorError::InvalidInput(ref cause) => write!(f, "{}", cause),
2300            DescribePredictorError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2301        }
2302    }
2303}
2304impl Error for DescribePredictorError {}
2305/// Errors returned by GetAccuracyMetrics
2306#[derive(Debug, PartialEq)]
2307pub enum GetAccuracyMetricsError {
2308    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2309    InvalidInput(String),
2310    /// <p>The specified resource is in use.</p>
2311    ResourceInUse(String),
2312    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2313    ResourceNotFound(String),
2314}
2315
2316impl GetAccuracyMetricsError {
2317    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<GetAccuracyMetricsError> {
2318        if let Some(err) = proto::json::Error::parse(&res) {
2319            match err.typ.as_str() {
2320                "InvalidInputException" => {
2321                    return RusotoError::Service(GetAccuracyMetricsError::InvalidInput(err.msg))
2322                }
2323                "ResourceInUseException" => {
2324                    return RusotoError::Service(GetAccuracyMetricsError::ResourceInUse(err.msg))
2325                }
2326                "ResourceNotFoundException" => {
2327                    return RusotoError::Service(GetAccuracyMetricsError::ResourceNotFound(err.msg))
2328                }
2329                "ValidationException" => return RusotoError::Validation(err.msg),
2330                _ => {}
2331            }
2332        }
2333        RusotoError::Unknown(res)
2334    }
2335}
2336impl fmt::Display for GetAccuracyMetricsError {
2337    #[allow(unused_variables)]
2338    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2339        match *self {
2340            GetAccuracyMetricsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2341            GetAccuracyMetricsError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2342            GetAccuracyMetricsError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2343        }
2344    }
2345}
2346impl Error for GetAccuracyMetricsError {}
2347/// Errors returned by ListDatasetGroups
2348#[derive(Debug, PartialEq)]
2349pub enum ListDatasetGroupsError {
2350    /// <p>The token is not valid. Tokens expire after 24 hours.</p>
2351    InvalidNextToken(String),
2352}
2353
2354impl ListDatasetGroupsError {
2355    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListDatasetGroupsError> {
2356        if let Some(err) = proto::json::Error::parse(&res) {
2357            match err.typ.as_str() {
2358                "InvalidNextTokenException" => {
2359                    return RusotoError::Service(ListDatasetGroupsError::InvalidNextToken(err.msg))
2360                }
2361                "ValidationException" => return RusotoError::Validation(err.msg),
2362                _ => {}
2363            }
2364        }
2365        RusotoError::Unknown(res)
2366    }
2367}
2368impl fmt::Display for ListDatasetGroupsError {
2369    #[allow(unused_variables)]
2370    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2371        match *self {
2372            ListDatasetGroupsError::InvalidNextToken(ref cause) => write!(f, "{}", cause),
2373        }
2374    }
2375}
2376impl Error for ListDatasetGroupsError {}
2377/// Errors returned by ListDatasetImportJobs
2378#[derive(Debug, PartialEq)]
2379pub enum ListDatasetImportJobsError {
2380    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2381    InvalidInput(String),
2382    /// <p>The token is not valid. Tokens expire after 24 hours.</p>
2383    InvalidNextToken(String),
2384}
2385
2386impl ListDatasetImportJobsError {
2387    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListDatasetImportJobsError> {
2388        if let Some(err) = proto::json::Error::parse(&res) {
2389            match err.typ.as_str() {
2390                "InvalidInputException" => {
2391                    return RusotoError::Service(ListDatasetImportJobsError::InvalidInput(err.msg))
2392                }
2393                "InvalidNextTokenException" => {
2394                    return RusotoError::Service(ListDatasetImportJobsError::InvalidNextToken(
2395                        err.msg,
2396                    ))
2397                }
2398                "ValidationException" => return RusotoError::Validation(err.msg),
2399                _ => {}
2400            }
2401        }
2402        RusotoError::Unknown(res)
2403    }
2404}
2405impl fmt::Display for ListDatasetImportJobsError {
2406    #[allow(unused_variables)]
2407    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2408        match *self {
2409            ListDatasetImportJobsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2410            ListDatasetImportJobsError::InvalidNextToken(ref cause) => write!(f, "{}", cause),
2411        }
2412    }
2413}
2414impl Error for ListDatasetImportJobsError {}
2415/// Errors returned by ListDatasets
2416#[derive(Debug, PartialEq)]
2417pub enum ListDatasetsError {
2418    /// <p>The token is not valid. Tokens expire after 24 hours.</p>
2419    InvalidNextToken(String),
2420}
2421
2422impl ListDatasetsError {
2423    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListDatasetsError> {
2424        if let Some(err) = proto::json::Error::parse(&res) {
2425            match err.typ.as_str() {
2426                "InvalidNextTokenException" => {
2427                    return RusotoError::Service(ListDatasetsError::InvalidNextToken(err.msg))
2428                }
2429                "ValidationException" => return RusotoError::Validation(err.msg),
2430                _ => {}
2431            }
2432        }
2433        RusotoError::Unknown(res)
2434    }
2435}
2436impl fmt::Display for ListDatasetsError {
2437    #[allow(unused_variables)]
2438    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2439        match *self {
2440            ListDatasetsError::InvalidNextToken(ref cause) => write!(f, "{}", cause),
2441        }
2442    }
2443}
2444impl Error for ListDatasetsError {}
2445/// Errors returned by ListForecastExportJobs
2446#[derive(Debug, PartialEq)]
2447pub enum ListForecastExportJobsError {
2448    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2449    InvalidInput(String),
2450    /// <p>The token is not valid. Tokens expire after 24 hours.</p>
2451    InvalidNextToken(String),
2452}
2453
2454impl ListForecastExportJobsError {
2455    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListForecastExportJobsError> {
2456        if let Some(err) = proto::json::Error::parse(&res) {
2457            match err.typ.as_str() {
2458                "InvalidInputException" => {
2459                    return RusotoError::Service(ListForecastExportJobsError::InvalidInput(err.msg))
2460                }
2461                "InvalidNextTokenException" => {
2462                    return RusotoError::Service(ListForecastExportJobsError::InvalidNextToken(
2463                        err.msg,
2464                    ))
2465                }
2466                "ValidationException" => return RusotoError::Validation(err.msg),
2467                _ => {}
2468            }
2469        }
2470        RusotoError::Unknown(res)
2471    }
2472}
2473impl fmt::Display for ListForecastExportJobsError {
2474    #[allow(unused_variables)]
2475    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2476        match *self {
2477            ListForecastExportJobsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2478            ListForecastExportJobsError::InvalidNextToken(ref cause) => write!(f, "{}", cause),
2479        }
2480    }
2481}
2482impl Error for ListForecastExportJobsError {}
2483/// Errors returned by ListForecasts
2484#[derive(Debug, PartialEq)]
2485pub enum ListForecastsError {
2486    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2487    InvalidInput(String),
2488    /// <p>The token is not valid. Tokens expire after 24 hours.</p>
2489    InvalidNextToken(String),
2490}
2491
2492impl ListForecastsError {
2493    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListForecastsError> {
2494        if let Some(err) = proto::json::Error::parse(&res) {
2495            match err.typ.as_str() {
2496                "InvalidInputException" => {
2497                    return RusotoError::Service(ListForecastsError::InvalidInput(err.msg))
2498                }
2499                "InvalidNextTokenException" => {
2500                    return RusotoError::Service(ListForecastsError::InvalidNextToken(err.msg))
2501                }
2502                "ValidationException" => return RusotoError::Validation(err.msg),
2503                _ => {}
2504            }
2505        }
2506        RusotoError::Unknown(res)
2507    }
2508}
2509impl fmt::Display for ListForecastsError {
2510    #[allow(unused_variables)]
2511    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2512        match *self {
2513            ListForecastsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2514            ListForecastsError::InvalidNextToken(ref cause) => write!(f, "{}", cause),
2515        }
2516    }
2517}
2518impl Error for ListForecastsError {}
2519/// Errors returned by ListPredictors
2520#[derive(Debug, PartialEq)]
2521pub enum ListPredictorsError {
2522    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2523    InvalidInput(String),
2524    /// <p>The token is not valid. Tokens expire after 24 hours.</p>
2525    InvalidNextToken(String),
2526}
2527
2528impl ListPredictorsError {
2529    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListPredictorsError> {
2530        if let Some(err) = proto::json::Error::parse(&res) {
2531            match err.typ.as_str() {
2532                "InvalidInputException" => {
2533                    return RusotoError::Service(ListPredictorsError::InvalidInput(err.msg))
2534                }
2535                "InvalidNextTokenException" => {
2536                    return RusotoError::Service(ListPredictorsError::InvalidNextToken(err.msg))
2537                }
2538                "ValidationException" => return RusotoError::Validation(err.msg),
2539                _ => {}
2540            }
2541        }
2542        RusotoError::Unknown(res)
2543    }
2544}
2545impl fmt::Display for ListPredictorsError {
2546    #[allow(unused_variables)]
2547    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2548        match *self {
2549            ListPredictorsError::InvalidInput(ref cause) => write!(f, "{}", cause),
2550            ListPredictorsError::InvalidNextToken(ref cause) => write!(f, "{}", cause),
2551        }
2552    }
2553}
2554impl Error for ListPredictorsError {}
2555/// Errors returned by ListTagsForResource
2556#[derive(Debug, PartialEq)]
2557pub enum ListTagsForResourceError {
2558    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2559    InvalidInput(String),
2560    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2561    ResourceNotFound(String),
2562}
2563
2564impl ListTagsForResourceError {
2565    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListTagsForResourceError> {
2566        if let Some(err) = proto::json::Error::parse(&res) {
2567            match err.typ.as_str() {
2568                "InvalidInputException" => {
2569                    return RusotoError::Service(ListTagsForResourceError::InvalidInput(err.msg))
2570                }
2571                "ResourceNotFoundException" => {
2572                    return RusotoError::Service(ListTagsForResourceError::ResourceNotFound(
2573                        err.msg,
2574                    ))
2575                }
2576                "ValidationException" => return RusotoError::Validation(err.msg),
2577                _ => {}
2578            }
2579        }
2580        RusotoError::Unknown(res)
2581    }
2582}
2583impl fmt::Display for ListTagsForResourceError {
2584    #[allow(unused_variables)]
2585    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2586        match *self {
2587            ListTagsForResourceError::InvalidInput(ref cause) => write!(f, "{}", cause),
2588            ListTagsForResourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2589        }
2590    }
2591}
2592impl Error for ListTagsForResourceError {}
2593/// Errors returned by TagResource
2594#[derive(Debug, PartialEq)]
2595pub enum TagResourceError {
2596    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2597    InvalidInput(String),
2598    /// <p>The limit on the number of resources per account has been exceeded.</p>
2599    LimitExceeded(String),
2600    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2601    ResourceNotFound(String),
2602}
2603
2604impl TagResourceError {
2605    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<TagResourceError> {
2606        if let Some(err) = proto::json::Error::parse(&res) {
2607            match err.typ.as_str() {
2608                "InvalidInputException" => {
2609                    return RusotoError::Service(TagResourceError::InvalidInput(err.msg))
2610                }
2611                "LimitExceededException" => {
2612                    return RusotoError::Service(TagResourceError::LimitExceeded(err.msg))
2613                }
2614                "ResourceNotFoundException" => {
2615                    return RusotoError::Service(TagResourceError::ResourceNotFound(err.msg))
2616                }
2617                "ValidationException" => return RusotoError::Validation(err.msg),
2618                _ => {}
2619            }
2620        }
2621        RusotoError::Unknown(res)
2622    }
2623}
2624impl fmt::Display for TagResourceError {
2625    #[allow(unused_variables)]
2626    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2627        match *self {
2628            TagResourceError::InvalidInput(ref cause) => write!(f, "{}", cause),
2629            TagResourceError::LimitExceeded(ref cause) => write!(f, "{}", cause),
2630            TagResourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2631        }
2632    }
2633}
2634impl Error for TagResourceError {}
2635/// Errors returned by UntagResource
2636#[derive(Debug, PartialEq)]
2637pub enum UntagResourceError {
2638    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2639    InvalidInput(String),
2640    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2641    ResourceNotFound(String),
2642}
2643
2644impl UntagResourceError {
2645    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UntagResourceError> {
2646        if let Some(err) = proto::json::Error::parse(&res) {
2647            match err.typ.as_str() {
2648                "InvalidInputException" => {
2649                    return RusotoError::Service(UntagResourceError::InvalidInput(err.msg))
2650                }
2651                "ResourceNotFoundException" => {
2652                    return RusotoError::Service(UntagResourceError::ResourceNotFound(err.msg))
2653                }
2654                "ValidationException" => return RusotoError::Validation(err.msg),
2655                _ => {}
2656            }
2657        }
2658        RusotoError::Unknown(res)
2659    }
2660}
2661impl fmt::Display for UntagResourceError {
2662    #[allow(unused_variables)]
2663    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2664        match *self {
2665            UntagResourceError::InvalidInput(ref cause) => write!(f, "{}", cause),
2666            UntagResourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2667        }
2668    }
2669}
2670impl Error for UntagResourceError {}
2671/// Errors returned by UpdateDatasetGroup
2672#[derive(Debug, PartialEq)]
2673pub enum UpdateDatasetGroupError {
2674    /// <p>We can't process the request because it includes an invalid value or a value that exceeds the valid range.</p>
2675    InvalidInput(String),
2676    /// <p>The specified resource is in use.</p>
2677    ResourceInUse(String),
2678    /// <p>We can't find a resource with that Amazon Resource Name (ARN). Check the ARN and try again.</p>
2679    ResourceNotFound(String),
2680}
2681
2682impl UpdateDatasetGroupError {
2683    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateDatasetGroupError> {
2684        if let Some(err) = proto::json::Error::parse(&res) {
2685            match err.typ.as_str() {
2686                "InvalidInputException" => {
2687                    return RusotoError::Service(UpdateDatasetGroupError::InvalidInput(err.msg))
2688                }
2689                "ResourceInUseException" => {
2690                    return RusotoError::Service(UpdateDatasetGroupError::ResourceInUse(err.msg))
2691                }
2692                "ResourceNotFoundException" => {
2693                    return RusotoError::Service(UpdateDatasetGroupError::ResourceNotFound(err.msg))
2694                }
2695                "ValidationException" => return RusotoError::Validation(err.msg),
2696                _ => {}
2697            }
2698        }
2699        RusotoError::Unknown(res)
2700    }
2701}
2702impl fmt::Display for UpdateDatasetGroupError {
2703    #[allow(unused_variables)]
2704    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2705        match *self {
2706            UpdateDatasetGroupError::InvalidInput(ref cause) => write!(f, "{}", cause),
2707            UpdateDatasetGroupError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2708            UpdateDatasetGroupError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2709        }
2710    }
2711}
2712impl Error for UpdateDatasetGroupError {}
2713/// Trait representing the capabilities of the Amazon Forecast Service API. Amazon Forecast Service clients implement this trait.
2714#[async_trait]
2715pub trait Forecast {
2716    /// <p><p>Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:</p> <ul> <li> <p> <i> <code>DataFrequency</code> </i> - How frequently your historical time-series data is collected.</p> </li> <li> <p> <i> <code>Domain</code> </i> and <i> <code>DatasetType</code> </i> - Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.</p> </li> <li> <p> <i> <code>Schema</code> </i> - A schema specifies the fields in the dataset, including the field name and data type.</p> </li> </ul> <p>After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see <a>howitworks-datasets-groups</a>.</p> <p>To get a list of all your datasets, use the <a>ListDatasets</a> operation.</p> <p>For example Forecast datasets, see the <a href="https://github.com/aws-samples/amazon-forecast-samples">Amazon Forecast Sample GitHub repository</a>.</p> <note> <p>The <code>Status</code> of a dataset must be <code>ACTIVE</code> before you can import training data. Use the <a>DescribeDataset</a> operation to get the status.</p> </note></p>
2717    async fn create_dataset(
2718        &self,
2719        input: CreateDatasetRequest,
2720    ) -> Result<CreateDatasetResponse, RusotoError<CreateDatasetError>>;
2721
2722    /// <p><p>Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the <a>UpdateDatasetGroup</a> operation.</p> <p>After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see <a>howitworks-datasets-groups</a>.</p> <p>To get a list of all your datasets groups, use the <a>ListDatasetGroups</a> operation.</p> <note> <p>The <code>Status</code> of a dataset group must be <code>ACTIVE</code> before you can create use the dataset group to create a predictor. To get the status, use the <a>DescribeDatasetGroup</a> operation.</p> </note></p>
2723    async fn create_dataset_group(
2724        &self,
2725        input: CreateDatasetGroupRequest,
2726    ) -> Result<CreateDatasetGroupResponse, RusotoError<CreateDatasetGroupError>>;
2727
2728    /// <p>Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.</p> <p>You must specify a <a>DataSource</a> object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see <a>aws-forecast-iam-roles</a>.</p> <p>The training data must be in CSV format. The delimiter must be a comma (,).</p> <p>You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.</p> <p>Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.</p> <p>To get a list of all your dataset import jobs, filtered by specified criteria, use the <a>ListDatasetImportJobs</a> operation.</p>
2729    async fn create_dataset_import_job(
2730        &self,
2731        input: CreateDatasetImportJobRequest,
2732    ) -> Result<CreateDatasetImportJobResponse, RusotoError<CreateDatasetImportJobError>>;
2733
2734    /// <p><p>Creates a forecast for each item in the <code>TARGET<em>TIME</em>SERIES</code> dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the <a>CreateForecastExportJob</a> operation.</p> <p>The range of the forecast is determined by the <code>ForecastHorizon</code> value, which you specify in the <a>CreatePredictor</a> request. When you query a forecast, you can request a specific date range within the forecast.</p> <p>To get a list of all your forecasts, use the <a>ListForecasts</a> operation.</p> <note> <p>The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.</p> </note> <p>For more information, see <a>howitworks-forecast</a>.</p> <note> <p>The <code>Status</code> of the forecast must be <code>ACTIVE</code> before you can query or export the forecast. Use the <a>DescribeForecast</a> operation to get the status.</p> </note></p>
2735    async fn create_forecast(
2736        &self,
2737        input: CreateForecastRequest,
2738    ) -> Result<CreateForecastResponse, RusotoError<CreateForecastError>>;
2739
2740    /// <p><p>Exports a forecast created by the <a>CreateForecast</a> operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:</p> <p>&lt;ForecastExportJobName&gt;<em>&lt;ExportTimestamp&gt;</em>&lt;PartNumber&gt;</p> <p>where the &lt;ExportTimestamp&gt; component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see <a>aws-forecast-iam-roles</a>.</p> <p>For more information, see <a>howitworks-forecast</a>.</p> <p>To get a list of all your forecast export jobs, use the <a>ListForecastExportJobs</a> operation.</p> <note> <p>The <code>Status</code> of the forecast export job must be <code>ACTIVE</code> before you can access the forecast in your Amazon S3 bucket. To get the status, use the <a>DescribeForecastExportJob</a> operation.</p> </note></p>
2741    async fn create_forecast_export_job(
2742        &self,
2743        input: CreateForecastExportJobRequest,
2744    ) -> Result<CreateForecastExportJobResponse, RusotoError<CreateForecastExportJobError>>;
2745
2746    /// <p><p>Creates an Amazon Forecast predictor.</p> <p>In the request, you provide a dataset group and either specify an algorithm or let Amazon Forecast choose the algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.</p> <p>Amazon Forecast uses the chosen algorithm to train a model using the latest version of the datasets in the specified dataset group. The result is called a predictor. You then generate a forecast using the <a>CreateForecast</a> operation.</p> <p>After training a model, the <code>CreatePredictor</code> operation also evaluates it. To see the evaluation metrics, use the <a>GetAccuracyMetrics</a> operation. Always review the evaluation metrics before deciding to use the predictor to generate a forecast.</p> <p>Optionally, you can specify a featurization configuration to fill and aggregate the data fields in the <code>TARGET<em>TIME</em>SERIES</code> dataset to improve model training. For more information, see <a>FeaturizationConfig</a>.</p> <p>For RELATED<em>TIME</em>SERIES datasets, <code>CreatePredictor</code> verifies that the <code>DataFrequency</code> specified when the dataset was created matches the <code>ForecastFrequency</code>. TARGET<em>TIME</em>SERIES datasets don&#39;t have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see <a>howitworks-datasets-groups</a>.</p> <p> <b>AutoML</b> </p> <p>If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the <code>objective function</code>, set <code>PerformAutoML</code> to <code>true</code>. The <code>objective function</code> is defined as the mean of the weighted p10, p50, and p90 quantile losses. For more information, see <a>EvaluationResult</a>.</p> <p>When AutoML is enabled, the following properties are disallowed:</p> <ul> <li> <p> <code>AlgorithmArn</code> </p> </li> <li> <p> <code>HPOConfig</code> </p> </li> <li> <p> <code>PerformHPO</code> </p> </li> <li> <p> <code>TrainingParameters</code> </p> </li> </ul> <p>To get a list of all of your predictors, use the <a>ListPredictors</a> operation.</p> <note> <p>Before you can use the predictor to create a forecast, the <code>Status</code> of the predictor must be <code>ACTIVE</code>, signifying that training has completed. To get the status, use the <a>DescribePredictor</a> operation.</p> </note></p>
2747    async fn create_predictor(
2748        &self,
2749        input: CreatePredictorRequest,
2750    ) -> Result<CreatePredictorResponse, RusotoError<CreatePredictorError>>;
2751
2752    /// <p><p>Deletes an Amazon Forecast dataset that was created using the <a>CreateDataset</a> operation. You can only delete datasets that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status use the <a>DescribeDataset</a> operation.</p> <note> <p>Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the operation, omitting the deleted dataset&#39;s ARN.</p> </note></p>
2753    async fn delete_dataset(
2754        &self,
2755        input: DeleteDatasetRequest,
2756    ) -> Result<(), RusotoError<DeleteDatasetError>>;
2757
2758    /// <p>Deletes a dataset group created using the <a>CreateDatasetGroup</a> operation. You can only delete dataset groups that have a status of <code>ACTIVE</code>, <code>CREATE_FAILED</code>, or <code>UPDATE_FAILED</code>. To get the status, use the <a>DescribeDatasetGroup</a> operation.</p> <p>This operation deletes only the dataset group, not the datasets in the group.</p>
2759    async fn delete_dataset_group(
2760        &self,
2761        input: DeleteDatasetGroupRequest,
2762    ) -> Result<(), RusotoError<DeleteDatasetGroupError>>;
2763
2764    /// <p>Deletes a dataset import job created using the <a>CreateDatasetImportJob</a> operation. You can delete only dataset import jobs that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeDatasetImportJob</a> operation.</p>
2765    async fn delete_dataset_import_job(
2766        &self,
2767        input: DeleteDatasetImportJobRequest,
2768    ) -> Result<(), RusotoError<DeleteDatasetImportJobError>>;
2769
2770    /// <p>Deletes a forecast created using the <a>CreateForecast</a> operation. You can delete only forecasts that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeForecast</a> operation.</p> <p>You can't delete a forecast while it is being exported. After a forecast is deleted, you can no longer query the forecast.</p>
2771    async fn delete_forecast(
2772        &self,
2773        input: DeleteForecastRequest,
2774    ) -> Result<(), RusotoError<DeleteForecastError>>;
2775
2776    /// <p>Deletes a forecast export job created using the <a>CreateForecastExportJob</a> operation. You can delete only export jobs that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeForecastExportJob</a> operation.</p>
2777    async fn delete_forecast_export_job(
2778        &self,
2779        input: DeleteForecastExportJobRequest,
2780    ) -> Result<(), RusotoError<DeleteForecastExportJobError>>;
2781
2782    /// <p>Deletes a predictor created using the <a>CreatePredictor</a> operation. You can delete only predictor that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribePredictor</a> operation.</p>
2783    async fn delete_predictor(
2784        &self,
2785        input: DeletePredictorRequest,
2786    ) -> Result<(), RusotoError<DeletePredictorError>>;
2787
2788    /// <p><p>Describes an Amazon Forecast dataset created using the <a>CreateDataset</a> operation.</p> <p>In addition to listing the parameters specified in the <code>CreateDataset</code> request, this operation includes the following dataset properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> </ul></p>
2789    async fn describe_dataset(
2790        &self,
2791        input: DescribeDatasetRequest,
2792    ) -> Result<DescribeDatasetResponse, RusotoError<DescribeDatasetError>>;
2793
2794    /// <p><p>Describes a dataset group created using the <a>CreateDatasetGroup</a> operation.</p> <p>In addition to listing the parameters provided in the <code>CreateDatasetGroup</code> request, this operation includes the following properties:</p> <ul> <li> <p> <code>DatasetArns</code> - The datasets belonging to the group.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> </ul></p>
2795    async fn describe_dataset_group(
2796        &self,
2797        input: DescribeDatasetGroupRequest,
2798    ) -> Result<DescribeDatasetGroupResponse, RusotoError<DescribeDatasetGroupError>>;
2799
2800    /// <p><p>Describes a dataset import job created using the <a>CreateDatasetImportJob</a> operation.</p> <p>In addition to listing the parameters provided in the <code>CreateDatasetImportJob</code> request, this operation includes the following properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>DataSize</code> </p> </li> <li> <p> <code>FieldStatistics</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
2801    async fn describe_dataset_import_job(
2802        &self,
2803        input: DescribeDatasetImportJobRequest,
2804    ) -> Result<DescribeDatasetImportJobResponse, RusotoError<DescribeDatasetImportJobError>>;
2805
2806    /// <p><p>Describes a forecast created using the <a>CreateForecast</a> operation.</p> <p>In addition to listing the properties provided in the <code>CreateForecast</code> request, this operation lists the following properties:</p> <ul> <li> <p> <code>DatasetGroupArn</code> - The dataset group that provided the training data.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
2807    async fn describe_forecast(
2808        &self,
2809        input: DescribeForecastRequest,
2810    ) -> Result<DescribeForecastResponse, RusotoError<DescribeForecastError>>;
2811
2812    /// <p><p>Describes a forecast export job created using the <a>CreateForecastExportJob</a> operation.</p> <p>In addition to listing the properties provided by the user in the <code>CreateForecastExportJob</code> request, this operation lists the following properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
2813    async fn describe_forecast_export_job(
2814        &self,
2815        input: DescribeForecastExportJobRequest,
2816    ) -> Result<DescribeForecastExportJobResponse, RusotoError<DescribeForecastExportJobError>>;
2817
2818    /// <p><p>Describes a predictor created using the <a>CreatePredictor</a> operation.</p> <p>In addition to listing the properties provided in the <code>CreatePredictor</code> request, this operation lists the following properties:</p> <ul> <li> <p> <code>DatasetImportJobArns</code> - The dataset import jobs used to import training data.</p> </li> <li> <p> <code>AutoMLAlgorithmArns</code> - If AutoML is performed, the algorithms that were evaluated.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
2819    async fn describe_predictor(
2820        &self,
2821        input: DescribePredictorRequest,
2822    ) -> Result<DescribePredictorResponse, RusotoError<DescribePredictorError>>;
2823
2824    /// <p><p>Provides metrics on the accuracy of the models that were trained by the <a>CreatePredictor</a> operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see <a>metrics</a>.</p> <p>This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (<code>NumberOfBacktestWindows</code>) is specified using the <a>EvaluationParameters</a> object, which is optionally included in the <code>CreatePredictor</code> request. If <code>NumberOfBacktestWindows</code> isn&#39;t specified, the number defaults to one.</p> <p>The parameters of the <code>filling</code> method determine which items contribute to the metrics. If you want all items to contribute, specify <code>zero</code>. If you want only those items that have complete data in the range being evaluated to contribute, specify <code>nan</code>. For more information, see <a>FeaturizationMethod</a>.</p> <note> <p>Before you can get accuracy metrics, the <code>Status</code> of the predictor must be <code>ACTIVE</code>, signifying that training has completed. To get the status, use the <a>DescribePredictor</a> operation.</p> </note></p>
2825    async fn get_accuracy_metrics(
2826        &self,
2827        input: GetAccuracyMetricsRequest,
2828    ) -> Result<GetAccuracyMetricsResponse, RusotoError<GetAccuracyMetricsError>>;
2829
2830    /// <p>Returns a list of dataset groups created using the <a>CreateDatasetGroup</a> operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the <a>DescribeDatasetGroup</a> operation.</p>
2831    async fn list_dataset_groups(
2832        &self,
2833        input: ListDatasetGroupsRequest,
2834    ) -> Result<ListDatasetGroupsResponse, RusotoError<ListDatasetGroupsError>>;
2835
2836    /// <p>Returns a list of dataset import jobs created using the <a>CreateDatasetImportJob</a> operation. For each import job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the <a>DescribeDatasetImportJob</a> operation. You can filter the list by providing an array of <a>Filter</a> objects.</p>
2837    async fn list_dataset_import_jobs(
2838        &self,
2839        input: ListDatasetImportJobsRequest,
2840    ) -> Result<ListDatasetImportJobsResponse, RusotoError<ListDatasetImportJobsError>>;
2841
2842    /// <p>Returns a list of datasets created using the <a>CreateDataset</a> operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. To retrieve the complete set of properties, use the ARN with the <a>DescribeDataset</a> operation.</p>
2843    async fn list_datasets(
2844        &self,
2845        input: ListDatasetsRequest,
2846    ) -> Result<ListDatasetsResponse, RusotoError<ListDatasetsError>>;
2847
2848    /// <p>Returns a list of forecast export jobs created using the <a>CreateForecastExportJob</a> operation. For each forecast export job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, use the ARN with the <a>DescribeForecastExportJob</a> operation. You can filter the list using an array of <a>Filter</a> objects.</p>
2849    async fn list_forecast_export_jobs(
2850        &self,
2851        input: ListForecastExportJobsRequest,
2852    ) -> Result<ListForecastExportJobsResponse, RusotoError<ListForecastExportJobsError>>;
2853
2854    /// <p>Returns a list of forecasts created using the <a>CreateForecast</a> operation. For each forecast, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, specify the ARN with the <a>DescribeForecast</a> operation. You can filter the list using an array of <a>Filter</a> objects.</p>
2855    async fn list_forecasts(
2856        &self,
2857        input: ListForecastsRequest,
2858    ) -> Result<ListForecastsResponse, RusotoError<ListForecastsError>>;
2859
2860    /// <p>Returns a list of predictors created using the <a>CreatePredictor</a> operation. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the <a>DescribePredictor</a> operation. You can filter the list using an array of <a>Filter</a> objects.</p>
2861    async fn list_predictors(
2862        &self,
2863        input: ListPredictorsRequest,
2864    ) -> Result<ListPredictorsResponse, RusotoError<ListPredictorsError>>;
2865
2866    /// <p>Lists the tags for an Amazon Forecast resource.</p>
2867    async fn list_tags_for_resource(
2868        &self,
2869        input: ListTagsForResourceRequest,
2870    ) -> Result<ListTagsForResourceResponse, RusotoError<ListTagsForResourceError>>;
2871
2872    /// <p>Associates the specified tags to a resource with the specified <code>resourceArn</code>. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.</p>
2873    async fn tag_resource(
2874        &self,
2875        input: TagResourceRequest,
2876    ) -> Result<TagResourceResponse, RusotoError<TagResourceError>>;
2877
2878    /// <p>Deletes the specified tags from a resource.</p>
2879    async fn untag_resource(
2880        &self,
2881        input: UntagResourceRequest,
2882    ) -> Result<UntagResourceResponse, RusotoError<UntagResourceError>>;
2883
2884    /// <p><p>Replaces the datasets in a dataset group with the specified datasets.</p> <note> <p>The <code>Status</code> of the dataset group must be <code>ACTIVE</code> before you can use the dataset group to create a predictor. Use the <a>DescribeDatasetGroup</a> operation to get the status.</p> </note></p>
2885    async fn update_dataset_group(
2886        &self,
2887        input: UpdateDatasetGroupRequest,
2888    ) -> Result<UpdateDatasetGroupResponse, RusotoError<UpdateDatasetGroupError>>;
2889}
2890/// A client for the Amazon Forecast Service API.
2891#[derive(Clone)]
2892pub struct ForecastClient {
2893    client: Client,
2894    region: region::Region,
2895}
2896
2897impl ForecastClient {
2898    /// Creates a client backed by the default tokio event loop.
2899    ///
2900    /// The client will use the default credentials provider and tls client.
2901    pub fn new(region: region::Region) -> ForecastClient {
2902        ForecastClient {
2903            client: Client::shared(),
2904            region,
2905        }
2906    }
2907
2908    pub fn new_with<P, D>(
2909        request_dispatcher: D,
2910        credentials_provider: P,
2911        region: region::Region,
2912    ) -> ForecastClient
2913    where
2914        P: ProvideAwsCredentials + Send + Sync + 'static,
2915        D: DispatchSignedRequest + Send + Sync + 'static,
2916    {
2917        ForecastClient {
2918            client: Client::new_with(credentials_provider, request_dispatcher),
2919            region,
2920        }
2921    }
2922
2923    pub fn new_with_client(client: Client, region: region::Region) -> ForecastClient {
2924        ForecastClient { client, region }
2925    }
2926}
2927
2928#[async_trait]
2929impl Forecast for ForecastClient {
2930    /// <p><p>Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:</p> <ul> <li> <p> <i> <code>DataFrequency</code> </i> - How frequently your historical time-series data is collected.</p> </li> <li> <p> <i> <code>Domain</code> </i> and <i> <code>DatasetType</code> </i> - Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.</p> </li> <li> <p> <i> <code>Schema</code> </i> - A schema specifies the fields in the dataset, including the field name and data type.</p> </li> </ul> <p>After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see <a>howitworks-datasets-groups</a>.</p> <p>To get a list of all your datasets, use the <a>ListDatasets</a> operation.</p> <p>For example Forecast datasets, see the <a href="https://github.com/aws-samples/amazon-forecast-samples">Amazon Forecast Sample GitHub repository</a>.</p> <note> <p>The <code>Status</code> of a dataset must be <code>ACTIVE</code> before you can import training data. Use the <a>DescribeDataset</a> operation to get the status.</p> </note></p>
2931    async fn create_dataset(
2932        &self,
2933        input: CreateDatasetRequest,
2934    ) -> Result<CreateDatasetResponse, RusotoError<CreateDatasetError>> {
2935        let mut request = self.new_signed_request("POST", "/");
2936        request.add_header("x-amz-target", "AmazonForecast.CreateDataset");
2937        let encoded = serde_json::to_string(&input).unwrap();
2938        request.set_payload(Some(encoded));
2939
2940        let response = self
2941            .sign_and_dispatch(request, CreateDatasetError::from_response)
2942            .await?;
2943        let mut response = response;
2944        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2945        proto::json::ResponsePayload::new(&response).deserialize::<CreateDatasetResponse, _>()
2946    }
2947
2948    /// <p><p>Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the <a>UpdateDatasetGroup</a> operation.</p> <p>After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see <a>howitworks-datasets-groups</a>.</p> <p>To get a list of all your datasets groups, use the <a>ListDatasetGroups</a> operation.</p> <note> <p>The <code>Status</code> of a dataset group must be <code>ACTIVE</code> before you can create use the dataset group to create a predictor. To get the status, use the <a>DescribeDatasetGroup</a> operation.</p> </note></p>
2949    async fn create_dataset_group(
2950        &self,
2951        input: CreateDatasetGroupRequest,
2952    ) -> Result<CreateDatasetGroupResponse, RusotoError<CreateDatasetGroupError>> {
2953        let mut request = self.new_signed_request("POST", "/");
2954        request.add_header("x-amz-target", "AmazonForecast.CreateDatasetGroup");
2955        let encoded = serde_json::to_string(&input).unwrap();
2956        request.set_payload(Some(encoded));
2957
2958        let response = self
2959            .sign_and_dispatch(request, CreateDatasetGroupError::from_response)
2960            .await?;
2961        let mut response = response;
2962        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2963        proto::json::ResponsePayload::new(&response).deserialize::<CreateDatasetGroupResponse, _>()
2964    }
2965
2966    /// <p>Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.</p> <p>You must specify a <a>DataSource</a> object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see <a>aws-forecast-iam-roles</a>.</p> <p>The training data must be in CSV format. The delimiter must be a comma (,).</p> <p>You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.</p> <p>Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.</p> <p>To get a list of all your dataset import jobs, filtered by specified criteria, use the <a>ListDatasetImportJobs</a> operation.</p>
2967    async fn create_dataset_import_job(
2968        &self,
2969        input: CreateDatasetImportJobRequest,
2970    ) -> Result<CreateDatasetImportJobResponse, RusotoError<CreateDatasetImportJobError>> {
2971        let mut request = self.new_signed_request("POST", "/");
2972        request.add_header("x-amz-target", "AmazonForecast.CreateDatasetImportJob");
2973        let encoded = serde_json::to_string(&input).unwrap();
2974        request.set_payload(Some(encoded));
2975
2976        let response = self
2977            .sign_and_dispatch(request, CreateDatasetImportJobError::from_response)
2978            .await?;
2979        let mut response = response;
2980        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2981        proto::json::ResponsePayload::new(&response)
2982            .deserialize::<CreateDatasetImportJobResponse, _>()
2983    }
2984
2985    /// <p><p>Creates a forecast for each item in the <code>TARGET<em>TIME</em>SERIES</code> dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the <a>CreateForecastExportJob</a> operation.</p> <p>The range of the forecast is determined by the <code>ForecastHorizon</code> value, which you specify in the <a>CreatePredictor</a> request. When you query a forecast, you can request a specific date range within the forecast.</p> <p>To get a list of all your forecasts, use the <a>ListForecasts</a> operation.</p> <note> <p>The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.</p> </note> <p>For more information, see <a>howitworks-forecast</a>.</p> <note> <p>The <code>Status</code> of the forecast must be <code>ACTIVE</code> before you can query or export the forecast. Use the <a>DescribeForecast</a> operation to get the status.</p> </note></p>
2986    async fn create_forecast(
2987        &self,
2988        input: CreateForecastRequest,
2989    ) -> Result<CreateForecastResponse, RusotoError<CreateForecastError>> {
2990        let mut request = self.new_signed_request("POST", "/");
2991        request.add_header("x-amz-target", "AmazonForecast.CreateForecast");
2992        let encoded = serde_json::to_string(&input).unwrap();
2993        request.set_payload(Some(encoded));
2994
2995        let response = self
2996            .sign_and_dispatch(request, CreateForecastError::from_response)
2997            .await?;
2998        let mut response = response;
2999        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3000        proto::json::ResponsePayload::new(&response).deserialize::<CreateForecastResponse, _>()
3001    }
3002
3003    /// <p><p>Exports a forecast created by the <a>CreateForecast</a> operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:</p> <p>&lt;ForecastExportJobName&gt;<em>&lt;ExportTimestamp&gt;</em>&lt;PartNumber&gt;</p> <p>where the &lt;ExportTimestamp&gt; component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).</p> <p>You must specify a <a>DataDestination</a> object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see <a>aws-forecast-iam-roles</a>.</p> <p>For more information, see <a>howitworks-forecast</a>.</p> <p>To get a list of all your forecast export jobs, use the <a>ListForecastExportJobs</a> operation.</p> <note> <p>The <code>Status</code> of the forecast export job must be <code>ACTIVE</code> before you can access the forecast in your Amazon S3 bucket. To get the status, use the <a>DescribeForecastExportJob</a> operation.</p> </note></p>
3004    async fn create_forecast_export_job(
3005        &self,
3006        input: CreateForecastExportJobRequest,
3007    ) -> Result<CreateForecastExportJobResponse, RusotoError<CreateForecastExportJobError>> {
3008        let mut request = self.new_signed_request("POST", "/");
3009        request.add_header("x-amz-target", "AmazonForecast.CreateForecastExportJob");
3010        let encoded = serde_json::to_string(&input).unwrap();
3011        request.set_payload(Some(encoded));
3012
3013        let response = self
3014            .sign_and_dispatch(request, CreateForecastExportJobError::from_response)
3015            .await?;
3016        let mut response = response;
3017        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3018        proto::json::ResponsePayload::new(&response)
3019            .deserialize::<CreateForecastExportJobResponse, _>()
3020    }
3021
3022    /// <p><p>Creates an Amazon Forecast predictor.</p> <p>In the request, you provide a dataset group and either specify an algorithm or let Amazon Forecast choose the algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.</p> <p>Amazon Forecast uses the chosen algorithm to train a model using the latest version of the datasets in the specified dataset group. The result is called a predictor. You then generate a forecast using the <a>CreateForecast</a> operation.</p> <p>After training a model, the <code>CreatePredictor</code> operation also evaluates it. To see the evaluation metrics, use the <a>GetAccuracyMetrics</a> operation. Always review the evaluation metrics before deciding to use the predictor to generate a forecast.</p> <p>Optionally, you can specify a featurization configuration to fill and aggregate the data fields in the <code>TARGET<em>TIME</em>SERIES</code> dataset to improve model training. For more information, see <a>FeaturizationConfig</a>.</p> <p>For RELATED<em>TIME</em>SERIES datasets, <code>CreatePredictor</code> verifies that the <code>DataFrequency</code> specified when the dataset was created matches the <code>ForecastFrequency</code>. TARGET<em>TIME</em>SERIES datasets don&#39;t have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see <a>howitworks-datasets-groups</a>.</p> <p> <b>AutoML</b> </p> <p>If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the <code>objective function</code>, set <code>PerformAutoML</code> to <code>true</code>. The <code>objective function</code> is defined as the mean of the weighted p10, p50, and p90 quantile losses. For more information, see <a>EvaluationResult</a>.</p> <p>When AutoML is enabled, the following properties are disallowed:</p> <ul> <li> <p> <code>AlgorithmArn</code> </p> </li> <li> <p> <code>HPOConfig</code> </p> </li> <li> <p> <code>PerformHPO</code> </p> </li> <li> <p> <code>TrainingParameters</code> </p> </li> </ul> <p>To get a list of all of your predictors, use the <a>ListPredictors</a> operation.</p> <note> <p>Before you can use the predictor to create a forecast, the <code>Status</code> of the predictor must be <code>ACTIVE</code>, signifying that training has completed. To get the status, use the <a>DescribePredictor</a> operation.</p> </note></p>
3023    async fn create_predictor(
3024        &self,
3025        input: CreatePredictorRequest,
3026    ) -> Result<CreatePredictorResponse, RusotoError<CreatePredictorError>> {
3027        let mut request = self.new_signed_request("POST", "/");
3028        request.add_header("x-amz-target", "AmazonForecast.CreatePredictor");
3029        let encoded = serde_json::to_string(&input).unwrap();
3030        request.set_payload(Some(encoded));
3031
3032        let response = self
3033            .sign_and_dispatch(request, CreatePredictorError::from_response)
3034            .await?;
3035        let mut response = response;
3036        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3037        proto::json::ResponsePayload::new(&response).deserialize::<CreatePredictorResponse, _>()
3038    }
3039
3040    /// <p><p>Deletes an Amazon Forecast dataset that was created using the <a>CreateDataset</a> operation. You can only delete datasets that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status use the <a>DescribeDataset</a> operation.</p> <note> <p>Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the operation, omitting the deleted dataset&#39;s ARN.</p> </note></p>
3041    async fn delete_dataset(
3042        &self,
3043        input: DeleteDatasetRequest,
3044    ) -> Result<(), RusotoError<DeleteDatasetError>> {
3045        let mut request = self.new_signed_request("POST", "/");
3046        request.add_header("x-amz-target", "AmazonForecast.DeleteDataset");
3047        let encoded = serde_json::to_string(&input).unwrap();
3048        request.set_payload(Some(encoded));
3049
3050        let response = self
3051            .sign_and_dispatch(request, DeleteDatasetError::from_response)
3052            .await?;
3053        std::mem::drop(response);
3054        Ok(())
3055    }
3056
3057    /// <p>Deletes a dataset group created using the <a>CreateDatasetGroup</a> operation. You can only delete dataset groups that have a status of <code>ACTIVE</code>, <code>CREATE_FAILED</code>, or <code>UPDATE_FAILED</code>. To get the status, use the <a>DescribeDatasetGroup</a> operation.</p> <p>This operation deletes only the dataset group, not the datasets in the group.</p>
3058    async fn delete_dataset_group(
3059        &self,
3060        input: DeleteDatasetGroupRequest,
3061    ) -> Result<(), RusotoError<DeleteDatasetGroupError>> {
3062        let mut request = self.new_signed_request("POST", "/");
3063        request.add_header("x-amz-target", "AmazonForecast.DeleteDatasetGroup");
3064        let encoded = serde_json::to_string(&input).unwrap();
3065        request.set_payload(Some(encoded));
3066
3067        let response = self
3068            .sign_and_dispatch(request, DeleteDatasetGroupError::from_response)
3069            .await?;
3070        std::mem::drop(response);
3071        Ok(())
3072    }
3073
3074    /// <p>Deletes a dataset import job created using the <a>CreateDatasetImportJob</a> operation. You can delete only dataset import jobs that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeDatasetImportJob</a> operation.</p>
3075    async fn delete_dataset_import_job(
3076        &self,
3077        input: DeleteDatasetImportJobRequest,
3078    ) -> Result<(), RusotoError<DeleteDatasetImportJobError>> {
3079        let mut request = self.new_signed_request("POST", "/");
3080        request.add_header("x-amz-target", "AmazonForecast.DeleteDatasetImportJob");
3081        let encoded = serde_json::to_string(&input).unwrap();
3082        request.set_payload(Some(encoded));
3083
3084        let response = self
3085            .sign_and_dispatch(request, DeleteDatasetImportJobError::from_response)
3086            .await?;
3087        std::mem::drop(response);
3088        Ok(())
3089    }
3090
3091    /// <p>Deletes a forecast created using the <a>CreateForecast</a> operation. You can delete only forecasts that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeForecast</a> operation.</p> <p>You can't delete a forecast while it is being exported. After a forecast is deleted, you can no longer query the forecast.</p>
3092    async fn delete_forecast(
3093        &self,
3094        input: DeleteForecastRequest,
3095    ) -> Result<(), RusotoError<DeleteForecastError>> {
3096        let mut request = self.new_signed_request("POST", "/");
3097        request.add_header("x-amz-target", "AmazonForecast.DeleteForecast");
3098        let encoded = serde_json::to_string(&input).unwrap();
3099        request.set_payload(Some(encoded));
3100
3101        let response = self
3102            .sign_and_dispatch(request, DeleteForecastError::from_response)
3103            .await?;
3104        std::mem::drop(response);
3105        Ok(())
3106    }
3107
3108    /// <p>Deletes a forecast export job created using the <a>CreateForecastExportJob</a> operation. You can delete only export jobs that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribeForecastExportJob</a> operation.</p>
3109    async fn delete_forecast_export_job(
3110        &self,
3111        input: DeleteForecastExportJobRequest,
3112    ) -> Result<(), RusotoError<DeleteForecastExportJobError>> {
3113        let mut request = self.new_signed_request("POST", "/");
3114        request.add_header("x-amz-target", "AmazonForecast.DeleteForecastExportJob");
3115        let encoded = serde_json::to_string(&input).unwrap();
3116        request.set_payload(Some(encoded));
3117
3118        let response = self
3119            .sign_and_dispatch(request, DeleteForecastExportJobError::from_response)
3120            .await?;
3121        std::mem::drop(response);
3122        Ok(())
3123    }
3124
3125    /// <p>Deletes a predictor created using the <a>CreatePredictor</a> operation. You can delete only predictor that have a status of <code>ACTIVE</code> or <code>CREATE_FAILED</code>. To get the status, use the <a>DescribePredictor</a> operation.</p>
3126    async fn delete_predictor(
3127        &self,
3128        input: DeletePredictorRequest,
3129    ) -> Result<(), RusotoError<DeletePredictorError>> {
3130        let mut request = self.new_signed_request("POST", "/");
3131        request.add_header("x-amz-target", "AmazonForecast.DeletePredictor");
3132        let encoded = serde_json::to_string(&input).unwrap();
3133        request.set_payload(Some(encoded));
3134
3135        let response = self
3136            .sign_and_dispatch(request, DeletePredictorError::from_response)
3137            .await?;
3138        std::mem::drop(response);
3139        Ok(())
3140    }
3141
3142    /// <p><p>Describes an Amazon Forecast dataset created using the <a>CreateDataset</a> operation.</p> <p>In addition to listing the parameters specified in the <code>CreateDataset</code> request, this operation includes the following dataset properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> </ul></p>
3143    async fn describe_dataset(
3144        &self,
3145        input: DescribeDatasetRequest,
3146    ) -> Result<DescribeDatasetResponse, RusotoError<DescribeDatasetError>> {
3147        let mut request = self.new_signed_request("POST", "/");
3148        request.add_header("x-amz-target", "AmazonForecast.DescribeDataset");
3149        let encoded = serde_json::to_string(&input).unwrap();
3150        request.set_payload(Some(encoded));
3151
3152        let response = self
3153            .sign_and_dispatch(request, DescribeDatasetError::from_response)
3154            .await?;
3155        let mut response = response;
3156        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3157        proto::json::ResponsePayload::new(&response).deserialize::<DescribeDatasetResponse, _>()
3158    }
3159
3160    /// <p><p>Describes a dataset group created using the <a>CreateDatasetGroup</a> operation.</p> <p>In addition to listing the parameters provided in the <code>CreateDatasetGroup</code> request, this operation includes the following properties:</p> <ul> <li> <p> <code>DatasetArns</code> - The datasets belonging to the group.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> </ul></p>
3161    async fn describe_dataset_group(
3162        &self,
3163        input: DescribeDatasetGroupRequest,
3164    ) -> Result<DescribeDatasetGroupResponse, RusotoError<DescribeDatasetGroupError>> {
3165        let mut request = self.new_signed_request("POST", "/");
3166        request.add_header("x-amz-target", "AmazonForecast.DescribeDatasetGroup");
3167        let encoded = serde_json::to_string(&input).unwrap();
3168        request.set_payload(Some(encoded));
3169
3170        let response = self
3171            .sign_and_dispatch(request, DescribeDatasetGroupError::from_response)
3172            .await?;
3173        let mut response = response;
3174        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3175        proto::json::ResponsePayload::new(&response)
3176            .deserialize::<DescribeDatasetGroupResponse, _>()
3177    }
3178
3179    /// <p><p>Describes a dataset import job created using the <a>CreateDatasetImportJob</a> operation.</p> <p>In addition to listing the parameters provided in the <code>CreateDatasetImportJob</code> request, this operation includes the following properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>DataSize</code> </p> </li> <li> <p> <code>FieldStatistics</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
3180    async fn describe_dataset_import_job(
3181        &self,
3182        input: DescribeDatasetImportJobRequest,
3183    ) -> Result<DescribeDatasetImportJobResponse, RusotoError<DescribeDatasetImportJobError>> {
3184        let mut request = self.new_signed_request("POST", "/");
3185        request.add_header("x-amz-target", "AmazonForecast.DescribeDatasetImportJob");
3186        let encoded = serde_json::to_string(&input).unwrap();
3187        request.set_payload(Some(encoded));
3188
3189        let response = self
3190            .sign_and_dispatch(request, DescribeDatasetImportJobError::from_response)
3191            .await?;
3192        let mut response = response;
3193        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3194        proto::json::ResponsePayload::new(&response)
3195            .deserialize::<DescribeDatasetImportJobResponse, _>()
3196    }
3197
3198    /// <p><p>Describes a forecast created using the <a>CreateForecast</a> operation.</p> <p>In addition to listing the properties provided in the <code>CreateForecast</code> request, this operation lists the following properties:</p> <ul> <li> <p> <code>DatasetGroupArn</code> - The dataset group that provided the training data.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
3199    async fn describe_forecast(
3200        &self,
3201        input: DescribeForecastRequest,
3202    ) -> Result<DescribeForecastResponse, RusotoError<DescribeForecastError>> {
3203        let mut request = self.new_signed_request("POST", "/");
3204        request.add_header("x-amz-target", "AmazonForecast.DescribeForecast");
3205        let encoded = serde_json::to_string(&input).unwrap();
3206        request.set_payload(Some(encoded));
3207
3208        let response = self
3209            .sign_and_dispatch(request, DescribeForecastError::from_response)
3210            .await?;
3211        let mut response = response;
3212        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3213        proto::json::ResponsePayload::new(&response).deserialize::<DescribeForecastResponse, _>()
3214    }
3215
3216    /// <p><p>Describes a forecast export job created using the <a>CreateForecastExportJob</a> operation.</p> <p>In addition to listing the properties provided by the user in the <code>CreateForecastExportJob</code> request, this operation lists the following properties:</p> <ul> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
3217    async fn describe_forecast_export_job(
3218        &self,
3219        input: DescribeForecastExportJobRequest,
3220    ) -> Result<DescribeForecastExportJobResponse, RusotoError<DescribeForecastExportJobError>>
3221    {
3222        let mut request = self.new_signed_request("POST", "/");
3223        request.add_header("x-amz-target", "AmazonForecast.DescribeForecastExportJob");
3224        let encoded = serde_json::to_string(&input).unwrap();
3225        request.set_payload(Some(encoded));
3226
3227        let response = self
3228            .sign_and_dispatch(request, DescribeForecastExportJobError::from_response)
3229            .await?;
3230        let mut response = response;
3231        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3232        proto::json::ResponsePayload::new(&response)
3233            .deserialize::<DescribeForecastExportJobResponse, _>()
3234    }
3235
3236    /// <p><p>Describes a predictor created using the <a>CreatePredictor</a> operation.</p> <p>In addition to listing the properties provided in the <code>CreatePredictor</code> request, this operation lists the following properties:</p> <ul> <li> <p> <code>DatasetImportJobArns</code> - The dataset import jobs used to import training data.</p> </li> <li> <p> <code>AutoMLAlgorithmArns</code> - If AutoML is performed, the algorithms that were evaluated.</p> </li> <li> <p> <code>CreationTime</code> </p> </li> <li> <p> <code>LastModificationTime</code> </p> </li> <li> <p> <code>Status</code> </p> </li> <li> <p> <code>Message</code> - If an error occurred, information about the error.</p> </li> </ul></p>
3237    async fn describe_predictor(
3238        &self,
3239        input: DescribePredictorRequest,
3240    ) -> Result<DescribePredictorResponse, RusotoError<DescribePredictorError>> {
3241        let mut request = self.new_signed_request("POST", "/");
3242        request.add_header("x-amz-target", "AmazonForecast.DescribePredictor");
3243        let encoded = serde_json::to_string(&input).unwrap();
3244        request.set_payload(Some(encoded));
3245
3246        let response = self
3247            .sign_and_dispatch(request, DescribePredictorError::from_response)
3248            .await?;
3249        let mut response = response;
3250        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3251        proto::json::ResponsePayload::new(&response).deserialize::<DescribePredictorResponse, _>()
3252    }
3253
3254    /// <p><p>Provides metrics on the accuracy of the models that were trained by the <a>CreatePredictor</a> operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see <a>metrics</a>.</p> <p>This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (<code>NumberOfBacktestWindows</code>) is specified using the <a>EvaluationParameters</a> object, which is optionally included in the <code>CreatePredictor</code> request. If <code>NumberOfBacktestWindows</code> isn&#39;t specified, the number defaults to one.</p> <p>The parameters of the <code>filling</code> method determine which items contribute to the metrics. If you want all items to contribute, specify <code>zero</code>. If you want only those items that have complete data in the range being evaluated to contribute, specify <code>nan</code>. For more information, see <a>FeaturizationMethod</a>.</p> <note> <p>Before you can get accuracy metrics, the <code>Status</code> of the predictor must be <code>ACTIVE</code>, signifying that training has completed. To get the status, use the <a>DescribePredictor</a> operation.</p> </note></p>
3255    async fn get_accuracy_metrics(
3256        &self,
3257        input: GetAccuracyMetricsRequest,
3258    ) -> Result<GetAccuracyMetricsResponse, RusotoError<GetAccuracyMetricsError>> {
3259        let mut request = self.new_signed_request("POST", "/");
3260        request.add_header("x-amz-target", "AmazonForecast.GetAccuracyMetrics");
3261        let encoded = serde_json::to_string(&input).unwrap();
3262        request.set_payload(Some(encoded));
3263
3264        let response = self
3265            .sign_and_dispatch(request, GetAccuracyMetricsError::from_response)
3266            .await?;
3267        let mut response = response;
3268        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3269        proto::json::ResponsePayload::new(&response).deserialize::<GetAccuracyMetricsResponse, _>()
3270    }
3271
3272    /// <p>Returns a list of dataset groups created using the <a>CreateDatasetGroup</a> operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the <a>DescribeDatasetGroup</a> operation.</p>
3273    async fn list_dataset_groups(
3274        &self,
3275        input: ListDatasetGroupsRequest,
3276    ) -> Result<ListDatasetGroupsResponse, RusotoError<ListDatasetGroupsError>> {
3277        let mut request = self.new_signed_request("POST", "/");
3278        request.add_header("x-amz-target", "AmazonForecast.ListDatasetGroups");
3279        let encoded = serde_json::to_string(&input).unwrap();
3280        request.set_payload(Some(encoded));
3281
3282        let response = self
3283            .sign_and_dispatch(request, ListDatasetGroupsError::from_response)
3284            .await?;
3285        let mut response = response;
3286        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3287        proto::json::ResponsePayload::new(&response).deserialize::<ListDatasetGroupsResponse, _>()
3288    }
3289
3290    /// <p>Returns a list of dataset import jobs created using the <a>CreateDatasetImportJob</a> operation. For each import job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the <a>DescribeDatasetImportJob</a> operation. You can filter the list by providing an array of <a>Filter</a> objects.</p>
3291    async fn list_dataset_import_jobs(
3292        &self,
3293        input: ListDatasetImportJobsRequest,
3294    ) -> Result<ListDatasetImportJobsResponse, RusotoError<ListDatasetImportJobsError>> {
3295        let mut request = self.new_signed_request("POST", "/");
3296        request.add_header("x-amz-target", "AmazonForecast.ListDatasetImportJobs");
3297        let encoded = serde_json::to_string(&input).unwrap();
3298        request.set_payload(Some(encoded));
3299
3300        let response = self
3301            .sign_and_dispatch(request, ListDatasetImportJobsError::from_response)
3302            .await?;
3303        let mut response = response;
3304        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3305        proto::json::ResponsePayload::new(&response)
3306            .deserialize::<ListDatasetImportJobsResponse, _>()
3307    }
3308
3309    /// <p>Returns a list of datasets created using the <a>CreateDataset</a> operation. For each dataset, a summary of its properties, including its Amazon Resource Name (ARN), is returned. To retrieve the complete set of properties, use the ARN with the <a>DescribeDataset</a> operation.</p>
3310    async fn list_datasets(
3311        &self,
3312        input: ListDatasetsRequest,
3313    ) -> Result<ListDatasetsResponse, RusotoError<ListDatasetsError>> {
3314        let mut request = self.new_signed_request("POST", "/");
3315        request.add_header("x-amz-target", "AmazonForecast.ListDatasets");
3316        let encoded = serde_json::to_string(&input).unwrap();
3317        request.set_payload(Some(encoded));
3318
3319        let response = self
3320            .sign_and_dispatch(request, ListDatasetsError::from_response)
3321            .await?;
3322        let mut response = response;
3323        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3324        proto::json::ResponsePayload::new(&response).deserialize::<ListDatasetsResponse, _>()
3325    }
3326
3327    /// <p>Returns a list of forecast export jobs created using the <a>CreateForecastExportJob</a> operation. For each forecast export job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, use the ARN with the <a>DescribeForecastExportJob</a> operation. You can filter the list using an array of <a>Filter</a> objects.</p>
3328    async fn list_forecast_export_jobs(
3329        &self,
3330        input: ListForecastExportJobsRequest,
3331    ) -> Result<ListForecastExportJobsResponse, RusotoError<ListForecastExportJobsError>> {
3332        let mut request = self.new_signed_request("POST", "/");
3333        request.add_header("x-amz-target", "AmazonForecast.ListForecastExportJobs");
3334        let encoded = serde_json::to_string(&input).unwrap();
3335        request.set_payload(Some(encoded));
3336
3337        let response = self
3338            .sign_and_dispatch(request, ListForecastExportJobsError::from_response)
3339            .await?;
3340        let mut response = response;
3341        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3342        proto::json::ResponsePayload::new(&response)
3343            .deserialize::<ListForecastExportJobsResponse, _>()
3344    }
3345
3346    /// <p>Returns a list of forecasts created using the <a>CreateForecast</a> operation. For each forecast, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, specify the ARN with the <a>DescribeForecast</a> operation. You can filter the list using an array of <a>Filter</a> objects.</p>
3347    async fn list_forecasts(
3348        &self,
3349        input: ListForecastsRequest,
3350    ) -> Result<ListForecastsResponse, RusotoError<ListForecastsError>> {
3351        let mut request = self.new_signed_request("POST", "/");
3352        request.add_header("x-amz-target", "AmazonForecast.ListForecasts");
3353        let encoded = serde_json::to_string(&input).unwrap();
3354        request.set_payload(Some(encoded));
3355
3356        let response = self
3357            .sign_and_dispatch(request, ListForecastsError::from_response)
3358            .await?;
3359        let mut response = response;
3360        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3361        proto::json::ResponsePayload::new(&response).deserialize::<ListForecastsResponse, _>()
3362    }
3363
3364    /// <p>Returns a list of predictors created using the <a>CreatePredictor</a> operation. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the <a>DescribePredictor</a> operation. You can filter the list using an array of <a>Filter</a> objects.</p>
3365    async fn list_predictors(
3366        &self,
3367        input: ListPredictorsRequest,
3368    ) -> Result<ListPredictorsResponse, RusotoError<ListPredictorsError>> {
3369        let mut request = self.new_signed_request("POST", "/");
3370        request.add_header("x-amz-target", "AmazonForecast.ListPredictors");
3371        let encoded = serde_json::to_string(&input).unwrap();
3372        request.set_payload(Some(encoded));
3373
3374        let response = self
3375            .sign_and_dispatch(request, ListPredictorsError::from_response)
3376            .await?;
3377        let mut response = response;
3378        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3379        proto::json::ResponsePayload::new(&response).deserialize::<ListPredictorsResponse, _>()
3380    }
3381
3382    /// <p>Lists the tags for an Amazon Forecast resource.</p>
3383    async fn list_tags_for_resource(
3384        &self,
3385        input: ListTagsForResourceRequest,
3386    ) -> Result<ListTagsForResourceResponse, RusotoError<ListTagsForResourceError>> {
3387        let mut request = self.new_signed_request("POST", "/");
3388        request.add_header("x-amz-target", "AmazonForecast.ListTagsForResource");
3389        let encoded = serde_json::to_string(&input).unwrap();
3390        request.set_payload(Some(encoded));
3391
3392        let response = self
3393            .sign_and_dispatch(request, ListTagsForResourceError::from_response)
3394            .await?;
3395        let mut response = response;
3396        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3397        proto::json::ResponsePayload::new(&response).deserialize::<ListTagsForResourceResponse, _>()
3398    }
3399
3400    /// <p>Associates the specified tags to a resource with the specified <code>resourceArn</code>. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.</p>
3401    async fn tag_resource(
3402        &self,
3403        input: TagResourceRequest,
3404    ) -> Result<TagResourceResponse, RusotoError<TagResourceError>> {
3405        let mut request = self.new_signed_request("POST", "/");
3406        request.add_header("x-amz-target", "AmazonForecast.TagResource");
3407        let encoded = serde_json::to_string(&input).unwrap();
3408        request.set_payload(Some(encoded));
3409
3410        let response = self
3411            .sign_and_dispatch(request, TagResourceError::from_response)
3412            .await?;
3413        let mut response = response;
3414        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3415        proto::json::ResponsePayload::new(&response).deserialize::<TagResourceResponse, _>()
3416    }
3417
3418    /// <p>Deletes the specified tags from a resource.</p>
3419    async fn untag_resource(
3420        &self,
3421        input: UntagResourceRequest,
3422    ) -> Result<UntagResourceResponse, RusotoError<UntagResourceError>> {
3423        let mut request = self.new_signed_request("POST", "/");
3424        request.add_header("x-amz-target", "AmazonForecast.UntagResource");
3425        let encoded = serde_json::to_string(&input).unwrap();
3426        request.set_payload(Some(encoded));
3427
3428        let response = self
3429            .sign_and_dispatch(request, UntagResourceError::from_response)
3430            .await?;
3431        let mut response = response;
3432        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3433        proto::json::ResponsePayload::new(&response).deserialize::<UntagResourceResponse, _>()
3434    }
3435
3436    /// <p><p>Replaces the datasets in a dataset group with the specified datasets.</p> <note> <p>The <code>Status</code> of the dataset group must be <code>ACTIVE</code> before you can use the dataset group to create a predictor. Use the <a>DescribeDatasetGroup</a> operation to get the status.</p> </note></p>
3437    async fn update_dataset_group(
3438        &self,
3439        input: UpdateDatasetGroupRequest,
3440    ) -> Result<UpdateDatasetGroupResponse, RusotoError<UpdateDatasetGroupError>> {
3441        let mut request = self.new_signed_request("POST", "/");
3442        request.add_header("x-amz-target", "AmazonForecast.UpdateDatasetGroup");
3443        let encoded = serde_json::to_string(&input).unwrap();
3444        request.set_payload(Some(encoded));
3445
3446        let response = self
3447            .sign_and_dispatch(request, UpdateDatasetGroupError::from_response)
3448            .await?;
3449        let mut response = response;
3450        let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
3451        proto::json::ResponsePayload::new(&response).deserialize::<UpdateDatasetGroupResponse, _>()
3452    }
3453}