rusoto_firehose/generated.rs
1// =================================================================
2//
3// * WARNING *
4//
5// This file is generated!
6//
7// Changes made to this file will be overwritten. If changes are
8// required to the generated code, the service_crategen project
9// must be updated to generate the changes.
10//
11// =================================================================
12
13use std::error::Error;
14use std::fmt;
15
16use async_trait::async_trait;
17use rusoto_core::credential::ProvideAwsCredentials;
18use rusoto_core::region;
19use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest};
20use rusoto_core::{Client, RusotoError};
21
22use rusoto_core::proto;
23use rusoto_core::request::HttpResponse;
24use rusoto_core::signature::SignedRequest;
25#[allow(unused_imports)]
26use serde::{Deserialize, Serialize};
27
28impl KinesisFirehoseClient {
29 fn new_signed_request(&self, http_method: &str, request_uri: &str) -> SignedRequest {
30 let mut request = SignedRequest::new(http_method, "firehose", &self.region, request_uri);
31
32 request.set_content_type("application/x-amz-json-1.1".to_owned());
33
34 request
35 }
36
37 async fn sign_and_dispatch<E>(
38 &self,
39 request: SignedRequest,
40 from_response: fn(BufferedHttpResponse) -> RusotoError<E>,
41 ) -> Result<HttpResponse, RusotoError<E>> {
42 let mut response = self.client.sign_and_dispatch(request).await?;
43 if !response.status.is_success() {
44 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
45 return Err(from_response(response));
46 }
47
48 Ok(response)
49 }
50}
51
52use serde_json;
53/// <p>Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values when it is optimal. The <code>SizeInMBs</code> and <code>IntervalInSeconds</code> parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.</p>
54#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
55pub struct BufferingHints {
56 /// <p>Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for <code>SizeInMBs</code>, and vice versa.</p>
57 #[serde(rename = "IntervalInSeconds")]
58 #[serde(skip_serializing_if = "Option::is_none")]
59 pub interval_in_seconds: Option<i64>,
60 /// <p>Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for <code>IntervalInSeconds</code>, and vice versa.</p> <p>We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.</p>
61 #[serde(rename = "SizeInMBs")]
62 #[serde(skip_serializing_if = "Option::is_none")]
63 pub size_in_m_bs: Option<i64>,
64}
65
66/// <p>Describes the Amazon CloudWatch logging options for your delivery stream.</p>
67#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
68pub struct CloudWatchLoggingOptions {
69 /// <p>Enables or disables CloudWatch logging.</p>
70 #[serde(rename = "Enabled")]
71 #[serde(skip_serializing_if = "Option::is_none")]
72 pub enabled: Option<bool>,
73 /// <p>The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.</p>
74 #[serde(rename = "LogGroupName")]
75 #[serde(skip_serializing_if = "Option::is_none")]
76 pub log_group_name: Option<String>,
77 /// <p>The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.</p>
78 #[serde(rename = "LogStreamName")]
79 #[serde(skip_serializing_if = "Option::is_none")]
80 pub log_stream_name: Option<String>,
81}
82
83/// <p>Describes a <code>COPY</code> command for Amazon Redshift.</p>
84#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
85pub struct CopyCommand {
86 /// <p>Optional parameters to use with the Amazon Redshift <code>COPY</code> command. For more information, see the "Optional Parameters" section of <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html">Amazon Redshift COPY command</a>. Some possible examples that would apply to Kinesis Data Firehose are as follows:</p> <p> <code>delimiter '\t' lzop;</code> - fields are delimited with "\t" (TAB character) and compressed using lzop.</p> <p> <code>delimiter '|'</code> - fields are delimited with "|" (this is the default delimiter).</p> <p> <code>delimiter '|' escape</code> - the delimiter should be escaped.</p> <p> <code>fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'</code> - fields are fixed width in the source, with each width specified after every column in the table.</p> <p> <code>JSON 's3://mybucket/jsonpaths.txt'</code> - data is in JSON format, and the path specified is the format of the data.</p> <p>For more examples, see <a href="https://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html">Amazon Redshift COPY command examples</a>.</p>
87 #[serde(rename = "CopyOptions")]
88 #[serde(skip_serializing_if = "Option::is_none")]
89 pub copy_options: Option<String>,
90 /// <p>A comma-separated list of column names.</p>
91 #[serde(rename = "DataTableColumns")]
92 #[serde(skip_serializing_if = "Option::is_none")]
93 pub data_table_columns: Option<String>,
94 /// <p>The name of the target table. The table must already exist in the database.</p>
95 #[serde(rename = "DataTableName")]
96 pub data_table_name: String,
97}
98
99#[derive(Clone, Debug, Default, PartialEq, Serialize)]
100#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
101pub struct CreateDeliveryStreamInput {
102 /// <p>Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).</p>
103 #[serde(rename = "DeliveryStreamEncryptionConfigurationInput")]
104 #[serde(skip_serializing_if = "Option::is_none")]
105 pub delivery_stream_encryption_configuration_input:
106 Option<DeliveryStreamEncryptionConfigurationInput>,
107 /// <p>The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.</p>
108 #[serde(rename = "DeliveryStreamName")]
109 pub delivery_stream_name: String,
110 /// <p><p>The delivery stream type. This parameter can be one of the following values:</p> <ul> <li> <p> <code>DirectPut</code>: Provider applications access the delivery stream directly.</p> </li> <li> <p> <code>KinesisStreamAsSource</code>: The delivery stream uses a Kinesis data stream as a source.</p> </li> </ul></p>
111 #[serde(rename = "DeliveryStreamType")]
112 #[serde(skip_serializing_if = "Option::is_none")]
113 pub delivery_stream_type: Option<String>,
114 /// <p>The destination in Amazon ES. You can specify only one destination.</p>
115 #[serde(rename = "ElasticsearchDestinationConfiguration")]
116 #[serde(skip_serializing_if = "Option::is_none")]
117 pub elasticsearch_destination_configuration: Option<ElasticsearchDestinationConfiguration>,
118 /// <p>The destination in Amazon S3. You can specify only one destination.</p>
119 #[serde(rename = "ExtendedS3DestinationConfiguration")]
120 #[serde(skip_serializing_if = "Option::is_none")]
121 pub extended_s3_destination_configuration: Option<ExtendedS3DestinationConfiguration>,
122 /// <p>When a Kinesis data stream is used as the source for the delivery stream, a <a>KinesisStreamSourceConfiguration</a> containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.</p>
123 #[serde(rename = "KinesisStreamSourceConfiguration")]
124 #[serde(skip_serializing_if = "Option::is_none")]
125 pub kinesis_stream_source_configuration: Option<KinesisStreamSourceConfiguration>,
126 /// <p>The destination in Amazon Redshift. You can specify only one destination.</p>
127 #[serde(rename = "RedshiftDestinationConfiguration")]
128 #[serde(skip_serializing_if = "Option::is_none")]
129 pub redshift_destination_configuration: Option<RedshiftDestinationConfiguration>,
130 /// <p>The destination in Splunk. You can specify only one destination.</p>
131 #[serde(rename = "SplunkDestinationConfiguration")]
132 #[serde(skip_serializing_if = "Option::is_none")]
133 pub splunk_destination_configuration: Option<SplunkDestinationConfiguration>,
134 /// <p>A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see <a href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html">Using Cost Allocation Tags</a> in the AWS Billing and Cost Management User Guide.</p> <p>You can specify up to 50 tags when creating a delivery stream.</p>
135 #[serde(rename = "Tags")]
136 #[serde(skip_serializing_if = "Option::is_none")]
137 pub tags: Option<Vec<Tag>>,
138}
139
140#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
141#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
142pub struct CreateDeliveryStreamOutput {
143 /// <p>The ARN of the delivery stream.</p>
144 #[serde(rename = "DeliveryStreamARN")]
145 #[serde(skip_serializing_if = "Option::is_none")]
146 pub delivery_stream_arn: Option<String>,
147}
148
149/// <p>Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the AWS Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html">Kinesis Data Firehose Record Format Conversion</a>.</p>
150#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
151pub struct DataFormatConversionConfiguration {
152 /// <p>Defaults to <code>true</code>. Set it to <code>false</code> if you want to disable format conversion while preserving the configuration details.</p>
153 #[serde(rename = "Enabled")]
154 #[serde(skip_serializing_if = "Option::is_none")]
155 pub enabled: Option<bool>,
156 /// <p>Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if <code>Enabled</code> is set to true.</p>
157 #[serde(rename = "InputFormatConfiguration")]
158 #[serde(skip_serializing_if = "Option::is_none")]
159 pub input_format_configuration: Option<InputFormatConfiguration>,
160 /// <p>Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if <code>Enabled</code> is set to true.</p>
161 #[serde(rename = "OutputFormatConfiguration")]
162 #[serde(skip_serializing_if = "Option::is_none")]
163 pub output_format_configuration: Option<OutputFormatConfiguration>,
164 /// <p>Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if <code>Enabled</code> is set to true.</p>
165 #[serde(rename = "SchemaConfiguration")]
166 #[serde(skip_serializing_if = "Option::is_none")]
167 pub schema_configuration: Option<SchemaConfiguration>,
168}
169
170#[derive(Clone, Debug, Default, PartialEq, Serialize)]
171#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
172pub struct DeleteDeliveryStreamInput {
173 /// <p>Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html">RevokeGrant</a> operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an AWS KMS issue, Kinesis Data Firehose keeps retrying the delete operation.</p> <p>The default value is false.</p>
174 #[serde(rename = "AllowForceDelete")]
175 #[serde(skip_serializing_if = "Option::is_none")]
176 pub allow_force_delete: Option<bool>,
177 /// <p>The name of the delivery stream.</p>
178 #[serde(rename = "DeliveryStreamName")]
179 pub delivery_stream_name: String,
180}
181
182#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
183#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
184pub struct DeleteDeliveryStreamOutput {}
185
186/// <p>Contains information about a delivery stream.</p>
187#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
188#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
189pub struct DeliveryStreamDescription {
190 /// <p>The date and time that the delivery stream was created.</p>
191 #[serde(rename = "CreateTimestamp")]
192 #[serde(skip_serializing_if = "Option::is_none")]
193 pub create_timestamp: Option<f64>,
194 /// <p>The Amazon Resource Name (ARN) of the delivery stream. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
195 #[serde(rename = "DeliveryStreamARN")]
196 pub delivery_stream_arn: String,
197 /// <p>Indicates the server-side encryption (SSE) status for the delivery stream.</p>
198 #[serde(rename = "DeliveryStreamEncryptionConfiguration")]
199 #[serde(skip_serializing_if = "Option::is_none")]
200 pub delivery_stream_encryption_configuration: Option<DeliveryStreamEncryptionConfiguration>,
201 /// <p>The name of the delivery stream.</p>
202 #[serde(rename = "DeliveryStreamName")]
203 pub delivery_stream_name: String,
204 /// <p>The status of the delivery stream. If the status of a delivery stream is <code>CREATING_FAILED</code>, this status doesn't change, and you can't invoke <code>CreateDeliveryStream</code> again on it. However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete it.</p>
205 #[serde(rename = "DeliveryStreamStatus")]
206 pub delivery_stream_status: String,
207 /// <p><p>The delivery stream type. This can be one of the following values:</p> <ul> <li> <p> <code>DirectPut</code>: Provider applications access the delivery stream directly.</p> </li> <li> <p> <code>KinesisStreamAsSource</code>: The delivery stream uses a Kinesis data stream as a source.</p> </li> </ul></p>
208 #[serde(rename = "DeliveryStreamType")]
209 pub delivery_stream_type: String,
210 /// <p>The destinations.</p>
211 #[serde(rename = "Destinations")]
212 pub destinations: Vec<DestinationDescription>,
213 /// <p>Provides details in case one of the following operations fails due to an error related to KMS: <a>CreateDeliveryStream</a>, <a>DeleteDeliveryStream</a>, <a>StartDeliveryStreamEncryption</a>, <a>StopDeliveryStreamEncryption</a>.</p>
214 #[serde(rename = "FailureDescription")]
215 #[serde(skip_serializing_if = "Option::is_none")]
216 pub failure_description: Option<FailureDescription>,
217 /// <p>Indicates whether there are more destinations available to list.</p>
218 #[serde(rename = "HasMoreDestinations")]
219 pub has_more_destinations: bool,
220 /// <p>The date and time that the delivery stream was last updated.</p>
221 #[serde(rename = "LastUpdateTimestamp")]
222 #[serde(skip_serializing_if = "Option::is_none")]
223 pub last_update_timestamp: Option<f64>,
224 /// <p>If the <code>DeliveryStreamType</code> parameter is <code>KinesisStreamAsSource</code>, a <a>SourceDescription</a> object describing the source Kinesis data stream.</p>
225 #[serde(rename = "Source")]
226 #[serde(skip_serializing_if = "Option::is_none")]
227 pub source: Option<SourceDescription>,
228 /// <p>Each time the destination is updated for a delivery stream, the version ID is changed, and the current version ID is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.</p>
229 #[serde(rename = "VersionId")]
230 pub version_id: String,
231}
232
233/// <p>Contains information about the server-side encryption (SSE) status for the delivery stream, the type customer master key (CMK) in use, if any, and the ARN of the CMK. You can get <code>DeliveryStreamEncryptionConfiguration</code> by invoking the <a>DescribeDeliveryStream</a> operation. </p>
234#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
235#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
236pub struct DeliveryStreamEncryptionConfiguration {
237 /// <p>Provides details in case one of the following operations fails due to an error related to KMS: <a>CreateDeliveryStream</a>, <a>DeleteDeliveryStream</a>, <a>StartDeliveryStreamEncryption</a>, <a>StopDeliveryStreamEncryption</a>.</p>
238 #[serde(rename = "FailureDescription")]
239 #[serde(skip_serializing_if = "Option::is_none")]
240 pub failure_description: Option<FailureDescription>,
241 /// <p>If <code>KeyType</code> is <code>CUSTOMER_MANAGED_CMK</code>, this field contains the ARN of the customer managed CMK. If <code>KeyType</code> is <code>AWS_OWNED_CMK</code>, <code>DeliveryStreamEncryptionConfiguration</code> doesn't contain a value for <code>KeyARN</code>.</p>
242 #[serde(rename = "KeyARN")]
243 #[serde(skip_serializing_if = "Option::is_none")]
244 pub key_arn: Option<String>,
245 /// <p>Indicates the type of customer master key (CMK) that is used for encryption. The default setting is <code>AWS_OWNED_CMK</code>. For more information about CMKs, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys">Customer Master Keys (CMKs)</a>.</p>
246 #[serde(rename = "KeyType")]
247 #[serde(skip_serializing_if = "Option::is_none")]
248 pub key_type: Option<String>,
249 /// <p>This is the server-side encryption (SSE) status for the delivery stream. For a full description of the different values of this status, see <a>StartDeliveryStreamEncryption</a> and <a>StopDeliveryStreamEncryption</a>. If this status is <code>ENABLING_FAILED</code> or <code>DISABLING_FAILED</code>, it is the status of the most recent attempt to enable or disable SSE, respectively.</p>
250 #[serde(rename = "Status")]
251 #[serde(skip_serializing_if = "Option::is_none")]
252 pub status: Option<String>,
253}
254
255/// <p>Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE). </p>
256#[derive(Clone, Debug, Default, PartialEq, Serialize)]
257#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
258pub struct DeliveryStreamEncryptionConfigurationInput {
259 /// <p>If you set <code>KeyType</code> to <code>CUSTOMER_MANAGED_CMK</code>, you must specify the Amazon Resource Name (ARN) of the CMK. If you set <code>KeyType</code> to <code>AWS_OWNED_CMK</code>, Kinesis Data Firehose uses a service-account CMK.</p>
260 #[serde(rename = "KeyARN")]
261 #[serde(skip_serializing_if = "Option::is_none")]
262 pub key_arn: Option<String>,
263 /// <p><p>Indicates the type of customer master key (CMK) to use for encryption. The default setting is <code>AWS<em>OWNED</em>CMK</code>. For more information about CMKs, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys">Customer Master Keys (CMKs)</a>. When you invoke <a>CreateDeliveryStream</a> or <a>StartDeliveryStreamEncryption</a> with <code>KeyType</code> set to CUSTOMER<em>MANAGED</em>CMK, Kinesis Data Firehose invokes the Amazon KMS operation <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateGrant.html">CreateGrant</a> to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant. </p> <p>When you invoke <a>StartDeliveryStreamEncryption</a> to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.</p> <p>You can use a CMK of type CUSTOMER<em>MANAGED</em>CMK to encrypt up to 500 delivery streams. If a <a>CreateDeliveryStream</a> or <a>StartDeliveryStreamEncryption</a> operation exceeds this limit, Kinesis Data Firehose throws a <code>LimitExceededException</code>. </p> <important> <p>To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html">About Symmetric and Asymmetric CMKs</a> in the AWS Key Management Service developer guide.</p> </important></p>
264 #[serde(rename = "KeyType")]
265 pub key_type: String,
266}
267
268#[derive(Clone, Debug, Default, PartialEq, Serialize)]
269#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
270pub struct DescribeDeliveryStreamInput {
271 /// <p>The name of the delivery stream.</p>
272 #[serde(rename = "DeliveryStreamName")]
273 pub delivery_stream_name: String,
274 /// <p>The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.</p>
275 #[serde(rename = "ExclusiveStartDestinationId")]
276 #[serde(skip_serializing_if = "Option::is_none")]
277 pub exclusive_start_destination_id: Option<String>,
278 /// <p>The limit on the number of destinations to return. You can have one destination per delivery stream.</p>
279 #[serde(rename = "Limit")]
280 #[serde(skip_serializing_if = "Option::is_none")]
281 pub limit: Option<i64>,
282}
283
284#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
285#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
286pub struct DescribeDeliveryStreamOutput {
287 /// <p>Information about the delivery stream.</p>
288 #[serde(rename = "DeliveryStreamDescription")]
289 pub delivery_stream_description: DeliveryStreamDescription,
290}
291
292/// <p>The deserializer you want Kinesis Data Firehose to use for converting the input data from JSON. Kinesis Data Firehose then serializes the data to its final format using the <a>Serializer</a>. Kinesis Data Firehose supports two types of deserializers: the <a href="https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON">Apache Hive JSON SerDe</a> and the <a href="https://github.com/rcongiu/Hive-JSON-Serde">OpenX JSON SerDe</a>.</p>
293#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
294pub struct Deserializer {
295 /// <p>The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.</p>
296 #[serde(rename = "HiveJsonSerDe")]
297 #[serde(skip_serializing_if = "Option::is_none")]
298 pub hive_json_ser_de: Option<HiveJsonSerDe>,
299 /// <p>The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.</p>
300 #[serde(rename = "OpenXJsonSerDe")]
301 #[serde(skip_serializing_if = "Option::is_none")]
302 pub open_x_json_ser_de: Option<OpenXJsonSerDe>,
303}
304
305/// <p>Describes the destination for a delivery stream.</p>
306#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
307#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
308pub struct DestinationDescription {
309 /// <p>The ID of the destination.</p>
310 #[serde(rename = "DestinationId")]
311 pub destination_id: String,
312 /// <p>The destination in Amazon ES.</p>
313 #[serde(rename = "ElasticsearchDestinationDescription")]
314 #[serde(skip_serializing_if = "Option::is_none")]
315 pub elasticsearch_destination_description: Option<ElasticsearchDestinationDescription>,
316 /// <p>The destination in Amazon S3.</p>
317 #[serde(rename = "ExtendedS3DestinationDescription")]
318 #[serde(skip_serializing_if = "Option::is_none")]
319 pub extended_s3_destination_description: Option<ExtendedS3DestinationDescription>,
320 /// <p>The destination in Amazon Redshift.</p>
321 #[serde(rename = "RedshiftDestinationDescription")]
322 #[serde(skip_serializing_if = "Option::is_none")]
323 pub redshift_destination_description: Option<RedshiftDestinationDescription>,
324 /// <p>[Deprecated] The destination in Amazon S3.</p>
325 #[serde(rename = "S3DestinationDescription")]
326 #[serde(skip_serializing_if = "Option::is_none")]
327 pub s3_destination_description: Option<S3DestinationDescription>,
328 /// <p>The destination in Splunk.</p>
329 #[serde(rename = "SplunkDestinationDescription")]
330 #[serde(skip_serializing_if = "Option::is_none")]
331 pub splunk_destination_description: Option<SplunkDestinationDescription>,
332}
333
334/// <p>Describes the buffering to perform before delivering data to the Amazon ES destination.</p>
335#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
336pub struct ElasticsearchBufferingHints {
337 /// <p>Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).</p>
338 #[serde(rename = "IntervalInSeconds")]
339 #[serde(skip_serializing_if = "Option::is_none")]
340 pub interval_in_seconds: Option<i64>,
341 /// <p>Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.</p> <p>We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.</p>
342 #[serde(rename = "SizeInMBs")]
343 #[serde(skip_serializing_if = "Option::is_none")]
344 pub size_in_m_bs: Option<i64>,
345}
346
347/// <p>Describes the configuration of a destination in Amazon ES.</p>
348#[derive(Clone, Debug, Default, PartialEq, Serialize)]
349#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
350pub struct ElasticsearchDestinationConfiguration {
351 /// <p>The buffering options. If no value is specified, the default values for <code>ElasticsearchBufferingHints</code> are used.</p>
352 #[serde(rename = "BufferingHints")]
353 #[serde(skip_serializing_if = "Option::is_none")]
354 pub buffering_hints: Option<ElasticsearchBufferingHints>,
355 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
356 #[serde(rename = "CloudWatchLoggingOptions")]
357 #[serde(skip_serializing_if = "Option::is_none")]
358 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
359 /// <p>The endpoint to use when communicating with the cluster. Specify either this <code>ClusterEndpoint</code> or the <code>DomainARN</code> field.</p>
360 #[serde(rename = "ClusterEndpoint")]
361 #[serde(skip_serializing_if = "Option::is_none")]
362 pub cluster_endpoint: Option<String>,
363 /// <p>The ARN of the Amazon ES domain. The IAM role must have permissions for <code>DescribeElasticsearchDomain</code>, <code>DescribeElasticsearchDomains</code>, and <code>DescribeElasticsearchDomainConfig</code> after assuming the role specified in <b>RoleARN</b>. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p> <p>Specify either <code>ClusterEndpoint</code> or <code>DomainARN</code>.</p>
364 #[serde(rename = "DomainARN")]
365 #[serde(skip_serializing_if = "Option::is_none")]
366 pub domain_arn: Option<String>,
367 /// <p>The Elasticsearch index name.</p>
368 #[serde(rename = "IndexName")]
369 pub index_name: String,
370 /// <p>The Elasticsearch index rotation period. Index rotation appends a timestamp to the <code>IndexName</code> to facilitate the expiration of old data. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation">Index Rotation for the Amazon ES Destination</a>. The default value is <code>OneDay</code>.</p>
371 #[serde(rename = "IndexRotationPeriod")]
372 #[serde(skip_serializing_if = "Option::is_none")]
373 pub index_rotation_period: Option<String>,
374 /// <p>The data processing configuration.</p>
375 #[serde(rename = "ProcessingConfiguration")]
376 #[serde(skip_serializing_if = "Option::is_none")]
377 pub processing_configuration: Option<ProcessingConfiguration>,
378 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).</p>
379 #[serde(rename = "RetryOptions")]
380 #[serde(skip_serializing_if = "Option::is_none")]
381 pub retry_options: Option<ElasticsearchRetryOptions>,
382 /// <p>The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3">Grant Kinesis Data Firehose Access to an Amazon S3 Destination</a> and <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
383 #[serde(rename = "RoleARN")]
384 pub role_arn: String,
385 /// <p>Defines how documents should be delivered to Amazon S3. When it is set to <code>FailedDocumentsOnly</code>, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with <code>elasticsearch-failed/</code> appended to the key prefix. When set to <code>AllDocuments</code>, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with <code>elasticsearch-failed/</code> appended to the prefix. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup">Amazon S3 Backup for the Amazon ES Destination</a>. Default value is <code>FailedDocumentsOnly</code>.</p>
386 #[serde(rename = "S3BackupMode")]
387 #[serde(skip_serializing_if = "Option::is_none")]
388 pub s3_backup_mode: Option<String>,
389 /// <p>The configuration for the backup Amazon S3 location.</p>
390 #[serde(rename = "S3Configuration")]
391 pub s3_configuration: S3DestinationConfiguration,
392 /// <p>The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.</p> <p>For Elasticsearch 7.x, don't specify a <code>TypeName</code>.</p>
393 #[serde(rename = "TypeName")]
394 #[serde(skip_serializing_if = "Option::is_none")]
395 pub type_name: Option<String>,
396 /// <p>The details of the VPC of the Amazon ES destination.</p>
397 #[serde(rename = "VpcConfiguration")]
398 #[serde(skip_serializing_if = "Option::is_none")]
399 pub vpc_configuration: Option<VpcConfiguration>,
400}
401
402/// <p>The destination description in Amazon ES.</p>
403#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
404#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
405pub struct ElasticsearchDestinationDescription {
406 /// <p>The buffering options.</p>
407 #[serde(rename = "BufferingHints")]
408 #[serde(skip_serializing_if = "Option::is_none")]
409 pub buffering_hints: Option<ElasticsearchBufferingHints>,
410 /// <p>The Amazon CloudWatch logging options.</p>
411 #[serde(rename = "CloudWatchLoggingOptions")]
412 #[serde(skip_serializing_if = "Option::is_none")]
413 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
414 /// <p>The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this <code>ClusterEndpoint</code> or the <code>DomainARN</code> field to send data to Amazon ES.</p>
415 #[serde(rename = "ClusterEndpoint")]
416 #[serde(skip_serializing_if = "Option::is_none")]
417 pub cluster_endpoint: Option<String>,
418 /// <p>The ARN of the Amazon ES domain. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p> <p>Kinesis Data Firehose uses either <code>ClusterEndpoint</code> or <code>DomainARN</code> to send data to Amazon ES.</p>
419 #[serde(rename = "DomainARN")]
420 #[serde(skip_serializing_if = "Option::is_none")]
421 pub domain_arn: Option<String>,
422 /// <p>The Elasticsearch index name.</p>
423 #[serde(rename = "IndexName")]
424 #[serde(skip_serializing_if = "Option::is_none")]
425 pub index_name: Option<String>,
426 /// <p>The Elasticsearch index rotation period</p>
427 #[serde(rename = "IndexRotationPeriod")]
428 #[serde(skip_serializing_if = "Option::is_none")]
429 pub index_rotation_period: Option<String>,
430 /// <p>The data processing configuration.</p>
431 #[serde(rename = "ProcessingConfiguration")]
432 #[serde(skip_serializing_if = "Option::is_none")]
433 pub processing_configuration: Option<ProcessingConfiguration>,
434 /// <p>The Amazon ES retry options.</p>
435 #[serde(rename = "RetryOptions")]
436 #[serde(skip_serializing_if = "Option::is_none")]
437 pub retry_options: Option<ElasticsearchRetryOptions>,
438 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
439 #[serde(rename = "RoleARN")]
440 #[serde(skip_serializing_if = "Option::is_none")]
441 pub role_arn: Option<String>,
442 /// <p>The Amazon S3 backup mode.</p>
443 #[serde(rename = "S3BackupMode")]
444 #[serde(skip_serializing_if = "Option::is_none")]
445 pub s3_backup_mode: Option<String>,
446 /// <p>The Amazon S3 destination.</p>
447 #[serde(rename = "S3DestinationDescription")]
448 #[serde(skip_serializing_if = "Option::is_none")]
449 pub s3_destination_description: Option<S3DestinationDescription>,
450 /// <p>The Elasticsearch type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x, there's no value for <code>TypeName</code>.</p>
451 #[serde(rename = "TypeName")]
452 #[serde(skip_serializing_if = "Option::is_none")]
453 pub type_name: Option<String>,
454 /// <p>The details of the VPC of the Amazon ES destination.</p>
455 #[serde(rename = "VpcConfigurationDescription")]
456 #[serde(skip_serializing_if = "Option::is_none")]
457 pub vpc_configuration_description: Option<VpcConfigurationDescription>,
458}
459
460/// <p>Describes an update for a destination in Amazon ES.</p>
461#[derive(Clone, Debug, Default, PartialEq, Serialize)]
462#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
463pub struct ElasticsearchDestinationUpdate {
464 /// <p>The buffering options. If no value is specified, <code>ElasticsearchBufferingHints</code> object default values are used. </p>
465 #[serde(rename = "BufferingHints")]
466 #[serde(skip_serializing_if = "Option::is_none")]
467 pub buffering_hints: Option<ElasticsearchBufferingHints>,
468 /// <p>The CloudWatch logging options for your delivery stream.</p>
469 #[serde(rename = "CloudWatchLoggingOptions")]
470 #[serde(skip_serializing_if = "Option::is_none")]
471 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
472 /// <p>The endpoint to use when communicating with the cluster. Specify either this <code>ClusterEndpoint</code> or the <code>DomainARN</code> field.</p>
473 #[serde(rename = "ClusterEndpoint")]
474 #[serde(skip_serializing_if = "Option::is_none")]
475 pub cluster_endpoint: Option<String>,
476 /// <p>The ARN of the Amazon ES domain. The IAM role must have permissions for <code>DescribeElasticsearchDomain</code>, <code>DescribeElasticsearchDomains</code>, and <code>DescribeElasticsearchDomainConfig</code> after assuming the IAM role specified in <code>RoleARN</code>. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p> <p>Specify either <code>ClusterEndpoint</code> or <code>DomainARN</code>.</p>
477 #[serde(rename = "DomainARN")]
478 #[serde(skip_serializing_if = "Option::is_none")]
479 pub domain_arn: Option<String>,
480 /// <p>The Elasticsearch index name.</p>
481 #[serde(rename = "IndexName")]
482 #[serde(skip_serializing_if = "Option::is_none")]
483 pub index_name: Option<String>,
484 /// <p>The Elasticsearch index rotation period. Index rotation appends a timestamp to <code>IndexName</code> to facilitate the expiration of old data. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation">Index Rotation for the Amazon ES Destination</a>. Default value is <code>OneDay</code>.</p>
485 #[serde(rename = "IndexRotationPeriod")]
486 #[serde(skip_serializing_if = "Option::is_none")]
487 pub index_rotation_period: Option<String>,
488 /// <p>The data processing configuration.</p>
489 #[serde(rename = "ProcessingConfiguration")]
490 #[serde(skip_serializing_if = "Option::is_none")]
491 pub processing_configuration: Option<ProcessingConfiguration>,
492 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).</p>
493 #[serde(rename = "RetryOptions")]
494 #[serde(skip_serializing_if = "Option::is_none")]
495 pub retry_options: Option<ElasticsearchRetryOptions>,
496 /// <p>The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3">Grant Kinesis Data Firehose Access to an Amazon S3 Destination</a> and <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
497 #[serde(rename = "RoleARN")]
498 #[serde(skip_serializing_if = "Option::is_none")]
499 pub role_arn: Option<String>,
500 /// <p>The Amazon S3 destination.</p>
501 #[serde(rename = "S3Update")]
502 #[serde(skip_serializing_if = "Option::is_none")]
503 pub s3_update: Option<S3DestinationUpdate>,
504 /// <p>The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.</p> <p>If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for <code>TypeName</code>. </p>
505 #[serde(rename = "TypeName")]
506 #[serde(skip_serializing_if = "Option::is_none")]
507 pub type_name: Option<String>,
508}
509
510/// <p>Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES.</p>
511#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
512pub struct ElasticsearchRetryOptions {
513 /// <p>After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.</p>
514 #[serde(rename = "DurationInSeconds")]
515 #[serde(skip_serializing_if = "Option::is_none")]
516 pub duration_in_seconds: Option<i64>,
517}
518
519/// <p>Describes the encryption for a destination in Amazon S3.</p>
520#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
521pub struct EncryptionConfiguration {
522 /// <p>The encryption key.</p>
523 #[serde(rename = "KMSEncryptionConfig")]
524 #[serde(skip_serializing_if = "Option::is_none")]
525 pub kms_encryption_config: Option<KMSEncryptionConfig>,
526 /// <p>Specifically override existing encryption information to ensure that no encryption is used.</p>
527 #[serde(rename = "NoEncryptionConfig")]
528 #[serde(skip_serializing_if = "Option::is_none")]
529 pub no_encryption_config: Option<String>,
530}
531
532/// <p>Describes the configuration of a destination in Amazon S3.</p>
533#[derive(Clone, Debug, Default, PartialEq, Serialize)]
534#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
535pub struct ExtendedS3DestinationConfiguration {
536 /// <p>The ARN of the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
537 #[serde(rename = "BucketARN")]
538 pub bucket_arn: String,
539 /// <p>The buffering option.</p>
540 #[serde(rename = "BufferingHints")]
541 #[serde(skip_serializing_if = "Option::is_none")]
542 pub buffering_hints: Option<BufferingHints>,
543 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
544 #[serde(rename = "CloudWatchLoggingOptions")]
545 #[serde(skip_serializing_if = "Option::is_none")]
546 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
547 /// <p>The compression format. If no value is specified, the default is UNCOMPRESSED.</p>
548 #[serde(rename = "CompressionFormat")]
549 #[serde(skip_serializing_if = "Option::is_none")]
550 pub compression_format: Option<String>,
551 /// <p>The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.</p>
552 #[serde(rename = "DataFormatConversionConfiguration")]
553 #[serde(skip_serializing_if = "Option::is_none")]
554 pub data_format_conversion_configuration: Option<DataFormatConversionConfiguration>,
555 /// <p>The encryption configuration. If no value is specified, the default is no encryption.</p>
556 #[serde(rename = "EncryptionConfiguration")]
557 #[serde(skip_serializing_if = "Option::is_none")]
558 pub encryption_configuration: Option<EncryptionConfiguration>,
559 /// <p>A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
560 #[serde(rename = "ErrorOutputPrefix")]
561 #[serde(skip_serializing_if = "Option::is_none")]
562 pub error_output_prefix: Option<String>,
563 /// <p>The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
564 #[serde(rename = "Prefix")]
565 #[serde(skip_serializing_if = "Option::is_none")]
566 pub prefix: Option<String>,
567 /// <p>The data processing configuration.</p>
568 #[serde(rename = "ProcessingConfiguration")]
569 #[serde(skip_serializing_if = "Option::is_none")]
570 pub processing_configuration: Option<ProcessingConfiguration>,
571 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
572 #[serde(rename = "RoleARN")]
573 pub role_arn: String,
574 /// <p>The configuration for backup in Amazon S3.</p>
575 #[serde(rename = "S3BackupConfiguration")]
576 #[serde(skip_serializing_if = "Option::is_none")]
577 pub s3_backup_configuration: Option<S3DestinationConfiguration>,
578 /// <p>The Amazon S3 backup mode.</p>
579 #[serde(rename = "S3BackupMode")]
580 #[serde(skip_serializing_if = "Option::is_none")]
581 pub s3_backup_mode: Option<String>,
582}
583
584/// <p>Describes a destination in Amazon S3.</p>
585#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
586#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
587pub struct ExtendedS3DestinationDescription {
588 /// <p>The ARN of the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
589 #[serde(rename = "BucketARN")]
590 pub bucket_arn: String,
591 /// <p>The buffering option.</p>
592 #[serde(rename = "BufferingHints")]
593 pub buffering_hints: BufferingHints,
594 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
595 #[serde(rename = "CloudWatchLoggingOptions")]
596 #[serde(skip_serializing_if = "Option::is_none")]
597 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
598 /// <p>The compression format. If no value is specified, the default is <code>UNCOMPRESSED</code>.</p>
599 #[serde(rename = "CompressionFormat")]
600 pub compression_format: String,
601 /// <p>The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.</p>
602 #[serde(rename = "DataFormatConversionConfiguration")]
603 #[serde(skip_serializing_if = "Option::is_none")]
604 pub data_format_conversion_configuration: Option<DataFormatConversionConfiguration>,
605 /// <p>The encryption configuration. If no value is specified, the default is no encryption.</p>
606 #[serde(rename = "EncryptionConfiguration")]
607 pub encryption_configuration: EncryptionConfiguration,
608 /// <p>A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
609 #[serde(rename = "ErrorOutputPrefix")]
610 #[serde(skip_serializing_if = "Option::is_none")]
611 pub error_output_prefix: Option<String>,
612 /// <p>The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
613 #[serde(rename = "Prefix")]
614 #[serde(skip_serializing_if = "Option::is_none")]
615 pub prefix: Option<String>,
616 /// <p>The data processing configuration.</p>
617 #[serde(rename = "ProcessingConfiguration")]
618 #[serde(skip_serializing_if = "Option::is_none")]
619 pub processing_configuration: Option<ProcessingConfiguration>,
620 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
621 #[serde(rename = "RoleARN")]
622 pub role_arn: String,
623 /// <p>The configuration for backup in Amazon S3.</p>
624 #[serde(rename = "S3BackupDescription")]
625 #[serde(skip_serializing_if = "Option::is_none")]
626 pub s3_backup_description: Option<S3DestinationDescription>,
627 /// <p>The Amazon S3 backup mode.</p>
628 #[serde(rename = "S3BackupMode")]
629 #[serde(skip_serializing_if = "Option::is_none")]
630 pub s3_backup_mode: Option<String>,
631}
632
633/// <p>Describes an update for a destination in Amazon S3.</p>
634#[derive(Clone, Debug, Default, PartialEq, Serialize)]
635#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
636pub struct ExtendedS3DestinationUpdate {
637 /// <p>The ARN of the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
638 #[serde(rename = "BucketARN")]
639 #[serde(skip_serializing_if = "Option::is_none")]
640 pub bucket_arn: Option<String>,
641 /// <p>The buffering option.</p>
642 #[serde(rename = "BufferingHints")]
643 #[serde(skip_serializing_if = "Option::is_none")]
644 pub buffering_hints: Option<BufferingHints>,
645 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
646 #[serde(rename = "CloudWatchLoggingOptions")]
647 #[serde(skip_serializing_if = "Option::is_none")]
648 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
649 /// <p>The compression format. If no value is specified, the default is <code>UNCOMPRESSED</code>. </p>
650 #[serde(rename = "CompressionFormat")]
651 #[serde(skip_serializing_if = "Option::is_none")]
652 pub compression_format: Option<String>,
653 /// <p>The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.</p>
654 #[serde(rename = "DataFormatConversionConfiguration")]
655 #[serde(skip_serializing_if = "Option::is_none")]
656 pub data_format_conversion_configuration: Option<DataFormatConversionConfiguration>,
657 /// <p>The encryption configuration. If no value is specified, the default is no encryption.</p>
658 #[serde(rename = "EncryptionConfiguration")]
659 #[serde(skip_serializing_if = "Option::is_none")]
660 pub encryption_configuration: Option<EncryptionConfiguration>,
661 /// <p>A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
662 #[serde(rename = "ErrorOutputPrefix")]
663 #[serde(skip_serializing_if = "Option::is_none")]
664 pub error_output_prefix: Option<String>,
665 /// <p>The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
666 #[serde(rename = "Prefix")]
667 #[serde(skip_serializing_if = "Option::is_none")]
668 pub prefix: Option<String>,
669 /// <p>The data processing configuration.</p>
670 #[serde(rename = "ProcessingConfiguration")]
671 #[serde(skip_serializing_if = "Option::is_none")]
672 pub processing_configuration: Option<ProcessingConfiguration>,
673 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
674 #[serde(rename = "RoleARN")]
675 #[serde(skip_serializing_if = "Option::is_none")]
676 pub role_arn: Option<String>,
677 /// <p>Enables or disables Amazon S3 backup mode.</p>
678 #[serde(rename = "S3BackupMode")]
679 #[serde(skip_serializing_if = "Option::is_none")]
680 pub s3_backup_mode: Option<String>,
681 /// <p>The Amazon S3 destination for backup.</p>
682 #[serde(rename = "S3BackupUpdate")]
683 #[serde(skip_serializing_if = "Option::is_none")]
684 pub s3_backup_update: Option<S3DestinationUpdate>,
685}
686
687/// <p>Provides details in case one of the following operations fails due to an error related to KMS: <a>CreateDeliveryStream</a>, <a>DeleteDeliveryStream</a>, <a>StartDeliveryStreamEncryption</a>, <a>StopDeliveryStreamEncryption</a>.</p>
688#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
689#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
690pub struct FailureDescription {
691 /// <p>A message providing details about the error that caused the failure.</p>
692 #[serde(rename = "Details")]
693 pub details: String,
694 /// <p>The type of error that caused the failure.</p>
695 #[serde(rename = "Type")]
696 pub type_: String,
697}
698
699/// <p>The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.</p>
700#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
701pub struct HiveJsonSerDe {
702 /// <p>Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see <a href="https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html">Class DateTimeFormat</a>. You can also use the special value <code>millis</code> to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses <code>java.sql.Timestamp::valueOf</code> by default.</p>
703 #[serde(rename = "TimestampFormats")]
704 #[serde(skip_serializing_if = "Option::is_none")]
705 pub timestamp_formats: Option<Vec<String>>,
706}
707
708/// <p>Specifies the deserializer you want to use to convert the format of the input data. This parameter is required if <code>Enabled</code> is set to true.</p>
709#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
710pub struct InputFormatConfiguration {
711 /// <p>Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.</p>
712 #[serde(rename = "Deserializer")]
713 #[serde(skip_serializing_if = "Option::is_none")]
714 pub deserializer: Option<Deserializer>,
715}
716
717/// <p>Describes an encryption key for a destination in Amazon S3.</p>
718#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
719pub struct KMSEncryptionConfig {
720 /// <p>The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
721 #[serde(rename = "AWSKMSKeyARN")]
722 pub awskms_key_arn: String,
723}
724
725/// <p>The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream.</p>
726#[derive(Clone, Debug, Default, PartialEq, Serialize)]
727#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
728pub struct KinesisStreamSourceConfiguration {
729 /// <p>The ARN of the source Kinesis data stream. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams">Amazon Kinesis Data Streams ARN Format</a>.</p>
730 #[serde(rename = "KinesisStreamARN")]
731 pub kinesis_stream_arn: String,
732 /// <p>The ARN of the role that provides access to the source Kinesis data stream. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam">AWS Identity and Access Management (IAM) ARN Format</a>.</p>
733 #[serde(rename = "RoleARN")]
734 pub role_arn: String,
735}
736
737/// <p>Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.</p>
738#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
739#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
740pub struct KinesisStreamSourceDescription {
741 /// <p>Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.</p>
742 #[serde(rename = "DeliveryStartTimestamp")]
743 #[serde(skip_serializing_if = "Option::is_none")]
744 pub delivery_start_timestamp: Option<f64>,
745 /// <p>The Amazon Resource Name (ARN) of the source Kinesis data stream. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams">Amazon Kinesis Data Streams ARN Format</a>.</p>
746 #[serde(rename = "KinesisStreamARN")]
747 #[serde(skip_serializing_if = "Option::is_none")]
748 pub kinesis_stream_arn: Option<String>,
749 /// <p>The ARN of the role used by the source Kinesis data stream. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam">AWS Identity and Access Management (IAM) ARN Format</a>.</p>
750 #[serde(rename = "RoleARN")]
751 #[serde(skip_serializing_if = "Option::is_none")]
752 pub role_arn: Option<String>,
753}
754
755#[derive(Clone, Debug, Default, PartialEq, Serialize)]
756#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
757pub struct ListDeliveryStreamsInput {
758 /// <p>The delivery stream type. This can be one of the following values:</p> <ul> <li> <p> <code>DirectPut</code>: Provider applications access the delivery stream directly.</p> </li> <li> <p> <code>KinesisStreamAsSource</code>: The delivery stream uses a Kinesis data stream as a source.</p> </li> </ul> <p>This parameter is optional. If this parameter is omitted, delivery streams of all types are returned.</p>
759 #[serde(rename = "DeliveryStreamType")]
760 #[serde(skip_serializing_if = "Option::is_none")]
761 pub delivery_stream_type: Option<String>,
762 /// <p>The list of delivery streams returned by this call to <code>ListDeliveryStreams</code> will start with the delivery stream whose name comes alphabetically immediately after the name you specify in <code>ExclusiveStartDeliveryStreamName</code>.</p>
763 #[serde(rename = "ExclusiveStartDeliveryStreamName")]
764 #[serde(skip_serializing_if = "Option::is_none")]
765 pub exclusive_start_delivery_stream_name: Option<String>,
766 /// <p>The maximum number of delivery streams to list. The default value is 10.</p>
767 #[serde(rename = "Limit")]
768 #[serde(skip_serializing_if = "Option::is_none")]
769 pub limit: Option<i64>,
770}
771
772#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
773#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
774pub struct ListDeliveryStreamsOutput {
775 /// <p>The names of the delivery streams.</p>
776 #[serde(rename = "DeliveryStreamNames")]
777 pub delivery_stream_names: Vec<String>,
778 /// <p>Indicates whether there are more delivery streams available to list.</p>
779 #[serde(rename = "HasMoreDeliveryStreams")]
780 pub has_more_delivery_streams: bool,
781}
782
783#[derive(Clone, Debug, Default, PartialEq, Serialize)]
784#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
785pub struct ListTagsForDeliveryStreamInput {
786 /// <p>The name of the delivery stream whose tags you want to list.</p>
787 #[serde(rename = "DeliveryStreamName")]
788 pub delivery_stream_name: String,
789 /// <p>The key to use as the starting point for the list of tags. If you set this parameter, <code>ListTagsForDeliveryStream</code> gets all tags that occur after <code>ExclusiveStartTagKey</code>.</p>
790 #[serde(rename = "ExclusiveStartTagKey")]
791 #[serde(skip_serializing_if = "Option::is_none")]
792 pub exclusive_start_tag_key: Option<String>,
793 /// <p>The number of tags to return. If this number is less than the total number of tags associated with the delivery stream, <code>HasMoreTags</code> is set to <code>true</code> in the response. To list additional tags, set <code>ExclusiveStartTagKey</code> to the last key in the response. </p>
794 #[serde(rename = "Limit")]
795 #[serde(skip_serializing_if = "Option::is_none")]
796 pub limit: Option<i64>,
797}
798
799#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
800#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
801pub struct ListTagsForDeliveryStreamOutput {
802 /// <p>If this is <code>true</code> in the response, more tags are available. To list the remaining tags, set <code>ExclusiveStartTagKey</code> to the key of the last tag returned and call <code>ListTagsForDeliveryStream</code> again.</p>
803 #[serde(rename = "HasMoreTags")]
804 pub has_more_tags: bool,
805 /// <p>A list of tags associated with <code>DeliveryStreamName</code>, starting with the first tag after <code>ExclusiveStartTagKey</code> and up to the specified <code>Limit</code>.</p>
806 #[serde(rename = "Tags")]
807 pub tags: Vec<Tag>,
808}
809
810/// <p>The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.</p>
811#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
812pub struct OpenXJsonSerDe {
813 /// <p>When set to <code>true</code>, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.</p>
814 #[serde(rename = "CaseInsensitive")]
815 #[serde(skip_serializing_if = "Option::is_none")]
816 pub case_insensitive: Option<bool>,
817 /// <p>Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, <code>timestamp</code> is a Hive keyword. If you have a JSON key named <code>timestamp</code>, set this parameter to <code>{"ts": "timestamp"}</code> to map this key to a column named <code>ts</code>.</p>
818 #[serde(rename = "ColumnToJsonKeyMappings")]
819 #[serde(skip_serializing_if = "Option::is_none")]
820 pub column_to_json_key_mappings: Option<::std::collections::HashMap<String, String>>,
821 /// <p>When set to <code>true</code>, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option.</p> <p>The default is <code>false</code>.</p>
822 #[serde(rename = "ConvertDotsInJsonKeysToUnderscores")]
823 #[serde(skip_serializing_if = "Option::is_none")]
824 pub convert_dots_in_json_keys_to_underscores: Option<bool>,
825}
826
827/// <p>A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see <a href="https://orc.apache.org/docs/">Apache ORC</a>.</p>
828#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
829pub struct OrcSerDe {
830 /// <p>The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.</p>
831 #[serde(rename = "BlockSizeBytes")]
832 #[serde(skip_serializing_if = "Option::is_none")]
833 pub block_size_bytes: Option<i64>,
834 /// <p>The column names for which you want Kinesis Data Firehose to create bloom filters. The default is <code>null</code>.</p>
835 #[serde(rename = "BloomFilterColumns")]
836 #[serde(skip_serializing_if = "Option::is_none")]
837 pub bloom_filter_columns: Option<Vec<String>>,
838 /// <p>The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.</p>
839 #[serde(rename = "BloomFilterFalsePositiveProbability")]
840 #[serde(skip_serializing_if = "Option::is_none")]
841 pub bloom_filter_false_positive_probability: Option<f64>,
842 /// <p>The compression code to use over data blocks. The default is <code>SNAPPY</code>.</p>
843 #[serde(rename = "Compression")]
844 #[serde(skip_serializing_if = "Option::is_none")]
845 pub compression: Option<String>,
846 /// <p>Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.</p>
847 #[serde(rename = "DictionaryKeyThreshold")]
848 #[serde(skip_serializing_if = "Option::is_none")]
849 pub dictionary_key_threshold: Option<f64>,
850 /// <p>Set this to <code>true</code> to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is <code>false</code>.</p>
851 #[serde(rename = "EnablePadding")]
852 #[serde(skip_serializing_if = "Option::is_none")]
853 pub enable_padding: Option<bool>,
854 /// <p>The version of the file to write. The possible values are <code>V0_11</code> and <code>V0_12</code>. The default is <code>V0_12</code>.</p>
855 #[serde(rename = "FormatVersion")]
856 #[serde(skip_serializing_if = "Option::is_none")]
857 pub format_version: Option<String>,
858 /// <p>A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.</p> <p>For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.</p> <p>Kinesis Data Firehose ignores this parameter when <a>OrcSerDe$EnablePadding</a> is <code>false</code>.</p>
859 #[serde(rename = "PaddingTolerance")]
860 #[serde(skip_serializing_if = "Option::is_none")]
861 pub padding_tolerance: Option<f64>,
862 /// <p>The number of rows between index entries. The default is 10,000 and the minimum is 1,000.</p>
863 #[serde(rename = "RowIndexStride")]
864 #[serde(skip_serializing_if = "Option::is_none")]
865 pub row_index_stride: Option<i64>,
866 /// <p>The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.</p>
867 #[serde(rename = "StripeSizeBytes")]
868 #[serde(skip_serializing_if = "Option::is_none")]
869 pub stripe_size_bytes: Option<i64>,
870}
871
872/// <p>Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if <code>Enabled</code> is set to true.</p>
873#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
874pub struct OutputFormatConfiguration {
875 /// <p>Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.</p>
876 #[serde(rename = "Serializer")]
877 #[serde(skip_serializing_if = "Option::is_none")]
878 pub serializer: Option<Serializer>,
879}
880
881/// <p>A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see <a href="https://parquet.apache.org/documentation/latest/">Apache Parquet</a>.</p>
882#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
883pub struct ParquetSerDe {
884 /// <p>The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.</p>
885 #[serde(rename = "BlockSizeBytes")]
886 #[serde(skip_serializing_if = "Option::is_none")]
887 pub block_size_bytes: Option<i64>,
888 /// <p>The compression code to use over data blocks. The possible values are <code>UNCOMPRESSED</code>, <code>SNAPPY</code>, and <code>GZIP</code>, with the default being <code>SNAPPY</code>. Use <code>SNAPPY</code> for higher decompression speed. Use <code>GZIP</code> if the compression ratio is more important than speed.</p>
889 #[serde(rename = "Compression")]
890 #[serde(skip_serializing_if = "Option::is_none")]
891 pub compression: Option<String>,
892 /// <p>Indicates whether to enable dictionary compression.</p>
893 #[serde(rename = "EnableDictionaryCompression")]
894 #[serde(skip_serializing_if = "Option::is_none")]
895 pub enable_dictionary_compression: Option<bool>,
896 /// <p>The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.</p>
897 #[serde(rename = "MaxPaddingBytes")]
898 #[serde(skip_serializing_if = "Option::is_none")]
899 pub max_padding_bytes: Option<i64>,
900 /// <p>The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.</p>
901 #[serde(rename = "PageSizeBytes")]
902 #[serde(skip_serializing_if = "Option::is_none")]
903 pub page_size_bytes: Option<i64>,
904 /// <p>Indicates the version of row format to output. The possible values are <code>V1</code> and <code>V2</code>. The default is <code>V1</code>.</p>
905 #[serde(rename = "WriterVersion")]
906 #[serde(skip_serializing_if = "Option::is_none")]
907 pub writer_version: Option<String>,
908}
909
910/// <p>Describes a data processing configuration.</p>
911#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
912pub struct ProcessingConfiguration {
913 /// <p>Enables or disables data processing.</p>
914 #[serde(rename = "Enabled")]
915 #[serde(skip_serializing_if = "Option::is_none")]
916 pub enabled: Option<bool>,
917 /// <p>The data processors.</p>
918 #[serde(rename = "Processors")]
919 #[serde(skip_serializing_if = "Option::is_none")]
920 pub processors: Option<Vec<Processor>>,
921}
922
923/// <p>Describes a data processor.</p>
924#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
925pub struct Processor {
926 /// <p>The processor parameters.</p>
927 #[serde(rename = "Parameters")]
928 #[serde(skip_serializing_if = "Option::is_none")]
929 pub parameters: Option<Vec<ProcessorParameter>>,
930 /// <p>The type of processor.</p>
931 #[serde(rename = "Type")]
932 pub type_: String,
933}
934
935/// <p>Describes the processor parameter.</p>
936#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
937pub struct ProcessorParameter {
938 /// <p>The name of the parameter.</p>
939 #[serde(rename = "ParameterName")]
940 pub parameter_name: String,
941 /// <p>The parameter value.</p>
942 #[serde(rename = "ParameterValue")]
943 pub parameter_value: String,
944}
945
946#[derive(Clone, Debug, Default, PartialEq, Serialize)]
947#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
948pub struct PutRecordBatchInput {
949 /// <p>The name of the delivery stream.</p>
950 #[serde(rename = "DeliveryStreamName")]
951 pub delivery_stream_name: String,
952 /// <p>One or more records.</p>
953 #[serde(rename = "Records")]
954 pub records: Vec<Record>,
955}
956
957#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
958#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
959pub struct PutRecordBatchOutput {
960 /// <p>Indicates whether server-side encryption (SSE) was enabled during this operation.</p>
961 #[serde(rename = "Encrypted")]
962 #[serde(skip_serializing_if = "Option::is_none")]
963 pub encrypted: Option<bool>,
964 /// <p>The number of records that might have failed processing. This number might be greater than 0 even if the <a>PutRecordBatch</a> call succeeds. Check <code>FailedPutCount</code> to determine whether there are records that you need to resend.</p>
965 #[serde(rename = "FailedPutCount")]
966 pub failed_put_count: i64,
967 /// <p>The results array. For each record, the index of the response element is the same as the index used in the request array.</p>
968 #[serde(rename = "RequestResponses")]
969 pub request_responses: Vec<PutRecordBatchResponseEntry>,
970}
971
972/// <p>Contains the result for an individual record from a <a>PutRecordBatch</a> request. If the record is successfully added to your delivery stream, it receives a record ID. If the record fails to be added to your delivery stream, the result includes an error code and an error message.</p>
973#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
974#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
975pub struct PutRecordBatchResponseEntry {
976 /// <p>The error code for an individual record result.</p>
977 #[serde(rename = "ErrorCode")]
978 #[serde(skip_serializing_if = "Option::is_none")]
979 pub error_code: Option<String>,
980 /// <p>The error message for an individual record result.</p>
981 #[serde(rename = "ErrorMessage")]
982 #[serde(skip_serializing_if = "Option::is_none")]
983 pub error_message: Option<String>,
984 /// <p>The ID of the record.</p>
985 #[serde(rename = "RecordId")]
986 #[serde(skip_serializing_if = "Option::is_none")]
987 pub record_id: Option<String>,
988}
989
990#[derive(Clone, Debug, Default, PartialEq, Serialize)]
991#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
992pub struct PutRecordInput {
993 /// <p>The name of the delivery stream.</p>
994 #[serde(rename = "DeliveryStreamName")]
995 pub delivery_stream_name: String,
996 /// <p>The record.</p>
997 #[serde(rename = "Record")]
998 pub record: Record,
999}
1000
1001#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1002#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1003pub struct PutRecordOutput {
1004 /// <p>Indicates whether server-side encryption (SSE) was enabled during this operation.</p>
1005 #[serde(rename = "Encrypted")]
1006 #[serde(skip_serializing_if = "Option::is_none")]
1007 pub encrypted: Option<bool>,
1008 /// <p>The ID of the record.</p>
1009 #[serde(rename = "RecordId")]
1010 pub record_id: String,
1011}
1012
1013/// <p>The unit of data in a delivery stream.</p>
1014#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1015#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1016pub struct Record {
1017 /// <p>The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.</p>
1018 #[serde(rename = "Data")]
1019 #[serde(
1020 deserialize_with = "::rusoto_core::serialization::SerdeBlob::deserialize_blob",
1021 serialize_with = "::rusoto_core::serialization::SerdeBlob::serialize_blob",
1022 default
1023 )]
1024 pub data: bytes::Bytes,
1025}
1026
1027/// <p>Describes the configuration of a destination in Amazon Redshift.</p>
1028#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1029#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1030pub struct RedshiftDestinationConfiguration {
1031 /// <p>The CloudWatch logging options for your delivery stream.</p>
1032 #[serde(rename = "CloudWatchLoggingOptions")]
1033 #[serde(skip_serializing_if = "Option::is_none")]
1034 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1035 /// <p>The database connection string.</p>
1036 #[serde(rename = "ClusterJDBCURL")]
1037 pub cluster_jdbcurl: String,
1038 /// <p>The <code>COPY</code> command.</p>
1039 #[serde(rename = "CopyCommand")]
1040 pub copy_command: CopyCommand,
1041 /// <p>The user password.</p>
1042 #[serde(rename = "Password")]
1043 pub password: String,
1044 /// <p>The data processing configuration.</p>
1045 #[serde(rename = "ProcessingConfiguration")]
1046 #[serde(skip_serializing_if = "Option::is_none")]
1047 pub processing_configuration: Option<ProcessingConfiguration>,
1048 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).</p>
1049 #[serde(rename = "RetryOptions")]
1050 #[serde(skip_serializing_if = "Option::is_none")]
1051 pub retry_options: Option<RedshiftRetryOptions>,
1052 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1053 #[serde(rename = "RoleARN")]
1054 pub role_arn: String,
1055 /// <p>The configuration for backup in Amazon S3.</p>
1056 #[serde(rename = "S3BackupConfiguration")]
1057 #[serde(skip_serializing_if = "Option::is_none")]
1058 pub s3_backup_configuration: Option<S3DestinationConfiguration>,
1059 /// <p>The Amazon S3 backup mode.</p>
1060 #[serde(rename = "S3BackupMode")]
1061 #[serde(skip_serializing_if = "Option::is_none")]
1062 pub s3_backup_mode: Option<String>,
1063 /// <p>The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for <a>CreateDeliveryStream</a>.</p> <p>The compression formats <code>SNAPPY</code> or <code>ZIP</code> cannot be specified in <code>RedshiftDestinationConfiguration.S3Configuration</code> because the Amazon Redshift <code>COPY</code> operation that reads from the S3 bucket doesn't support these compression formats.</p>
1064 #[serde(rename = "S3Configuration")]
1065 pub s3_configuration: S3DestinationConfiguration,
1066 /// <p>The name of the user.</p>
1067 #[serde(rename = "Username")]
1068 pub username: String,
1069}
1070
1071/// <p>Describes a destination in Amazon Redshift.</p>
1072#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1073#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1074pub struct RedshiftDestinationDescription {
1075 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
1076 #[serde(rename = "CloudWatchLoggingOptions")]
1077 #[serde(skip_serializing_if = "Option::is_none")]
1078 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1079 /// <p>The database connection string.</p>
1080 #[serde(rename = "ClusterJDBCURL")]
1081 pub cluster_jdbcurl: String,
1082 /// <p>The <code>COPY</code> command.</p>
1083 #[serde(rename = "CopyCommand")]
1084 pub copy_command: CopyCommand,
1085 /// <p>The data processing configuration.</p>
1086 #[serde(rename = "ProcessingConfiguration")]
1087 #[serde(skip_serializing_if = "Option::is_none")]
1088 pub processing_configuration: Option<ProcessingConfiguration>,
1089 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).</p>
1090 #[serde(rename = "RetryOptions")]
1091 #[serde(skip_serializing_if = "Option::is_none")]
1092 pub retry_options: Option<RedshiftRetryOptions>,
1093 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1094 #[serde(rename = "RoleARN")]
1095 pub role_arn: String,
1096 /// <p>The configuration for backup in Amazon S3.</p>
1097 #[serde(rename = "S3BackupDescription")]
1098 #[serde(skip_serializing_if = "Option::is_none")]
1099 pub s3_backup_description: Option<S3DestinationDescription>,
1100 /// <p>The Amazon S3 backup mode.</p>
1101 #[serde(rename = "S3BackupMode")]
1102 #[serde(skip_serializing_if = "Option::is_none")]
1103 pub s3_backup_mode: Option<String>,
1104 /// <p>The Amazon S3 destination.</p>
1105 #[serde(rename = "S3DestinationDescription")]
1106 pub s3_destination_description: S3DestinationDescription,
1107 /// <p>The name of the user.</p>
1108 #[serde(rename = "Username")]
1109 pub username: String,
1110}
1111
1112/// <p>Describes an update for a destination in Amazon Redshift.</p>
1113#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1114#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1115pub struct RedshiftDestinationUpdate {
1116 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
1117 #[serde(rename = "CloudWatchLoggingOptions")]
1118 #[serde(skip_serializing_if = "Option::is_none")]
1119 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1120 /// <p>The database connection string.</p>
1121 #[serde(rename = "ClusterJDBCURL")]
1122 #[serde(skip_serializing_if = "Option::is_none")]
1123 pub cluster_jdbcurl: Option<String>,
1124 /// <p>The <code>COPY</code> command.</p>
1125 #[serde(rename = "CopyCommand")]
1126 #[serde(skip_serializing_if = "Option::is_none")]
1127 pub copy_command: Option<CopyCommand>,
1128 /// <p>The user password.</p>
1129 #[serde(rename = "Password")]
1130 #[serde(skip_serializing_if = "Option::is_none")]
1131 pub password: Option<String>,
1132 /// <p>The data processing configuration.</p>
1133 #[serde(rename = "ProcessingConfiguration")]
1134 #[serde(skip_serializing_if = "Option::is_none")]
1135 pub processing_configuration: Option<ProcessingConfiguration>,
1136 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).</p>
1137 #[serde(rename = "RetryOptions")]
1138 #[serde(skip_serializing_if = "Option::is_none")]
1139 pub retry_options: Option<RedshiftRetryOptions>,
1140 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1141 #[serde(rename = "RoleARN")]
1142 #[serde(skip_serializing_if = "Option::is_none")]
1143 pub role_arn: Option<String>,
1144 /// <p>The Amazon S3 backup mode.</p>
1145 #[serde(rename = "S3BackupMode")]
1146 #[serde(skip_serializing_if = "Option::is_none")]
1147 pub s3_backup_mode: Option<String>,
1148 /// <p>The Amazon S3 destination for backup.</p>
1149 #[serde(rename = "S3BackupUpdate")]
1150 #[serde(skip_serializing_if = "Option::is_none")]
1151 pub s3_backup_update: Option<S3DestinationUpdate>,
1152 /// <p>The Amazon S3 destination.</p> <p>The compression formats <code>SNAPPY</code> or <code>ZIP</code> cannot be specified in <code>RedshiftDestinationUpdate.S3Update</code> because the Amazon Redshift <code>COPY</code> operation that reads from the S3 bucket doesn't support these compression formats.</p>
1153 #[serde(rename = "S3Update")]
1154 #[serde(skip_serializing_if = "Option::is_none")]
1155 pub s3_update: Option<S3DestinationUpdate>,
1156 /// <p>The name of the user.</p>
1157 #[serde(rename = "Username")]
1158 #[serde(skip_serializing_if = "Option::is_none")]
1159 pub username: Option<String>,
1160}
1161
1162/// <p>Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift.</p>
1163#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1164pub struct RedshiftRetryOptions {
1165 /// <p>The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of <code>DurationInSeconds</code> is 0 (zero) or if the first delivery attempt takes longer than the current value.</p>
1166 #[serde(rename = "DurationInSeconds")]
1167 #[serde(skip_serializing_if = "Option::is_none")]
1168 pub duration_in_seconds: Option<i64>,
1169}
1170
1171/// <p>Describes the configuration of a destination in Amazon S3.</p>
1172#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1173#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1174pub struct S3DestinationConfiguration {
1175 /// <p>The ARN of the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1176 #[serde(rename = "BucketARN")]
1177 pub bucket_arn: String,
1178 /// <p>The buffering option. If no value is specified, <code>BufferingHints</code> object default values are used.</p>
1179 #[serde(rename = "BufferingHints")]
1180 #[serde(skip_serializing_if = "Option::is_none")]
1181 pub buffering_hints: Option<BufferingHints>,
1182 /// <p>The CloudWatch logging options for your delivery stream.</p>
1183 #[serde(rename = "CloudWatchLoggingOptions")]
1184 #[serde(skip_serializing_if = "Option::is_none")]
1185 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1186 /// <p>The compression format. If no value is specified, the default is <code>UNCOMPRESSED</code>.</p> <p>The compression formats <code>SNAPPY</code> or <code>ZIP</code> cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift <code>COPY</code> operation that reads from the S3 bucket.</p>
1187 #[serde(rename = "CompressionFormat")]
1188 #[serde(skip_serializing_if = "Option::is_none")]
1189 pub compression_format: Option<String>,
1190 /// <p>The encryption configuration. If no value is specified, the default is no encryption.</p>
1191 #[serde(rename = "EncryptionConfiguration")]
1192 #[serde(skip_serializing_if = "Option::is_none")]
1193 pub encryption_configuration: Option<EncryptionConfiguration>,
1194 /// <p>A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
1195 #[serde(rename = "ErrorOutputPrefix")]
1196 #[serde(skip_serializing_if = "Option::is_none")]
1197 pub error_output_prefix: Option<String>,
1198 /// <p>The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
1199 #[serde(rename = "Prefix")]
1200 #[serde(skip_serializing_if = "Option::is_none")]
1201 pub prefix: Option<String>,
1202 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1203 #[serde(rename = "RoleARN")]
1204 pub role_arn: String,
1205}
1206
1207/// <p>Describes a destination in Amazon S3.</p>
1208#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1209#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1210pub struct S3DestinationDescription {
1211 /// <p>The ARN of the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1212 #[serde(rename = "BucketARN")]
1213 pub bucket_arn: String,
1214 /// <p>The buffering option. If no value is specified, <code>BufferingHints</code> object default values are used.</p>
1215 #[serde(rename = "BufferingHints")]
1216 pub buffering_hints: BufferingHints,
1217 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
1218 #[serde(rename = "CloudWatchLoggingOptions")]
1219 #[serde(skip_serializing_if = "Option::is_none")]
1220 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1221 /// <p>The compression format. If no value is specified, the default is <code>UNCOMPRESSED</code>.</p>
1222 #[serde(rename = "CompressionFormat")]
1223 pub compression_format: String,
1224 /// <p>The encryption configuration. If no value is specified, the default is no encryption.</p>
1225 #[serde(rename = "EncryptionConfiguration")]
1226 pub encryption_configuration: EncryptionConfiguration,
1227 /// <p>A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
1228 #[serde(rename = "ErrorOutputPrefix")]
1229 #[serde(skip_serializing_if = "Option::is_none")]
1230 pub error_output_prefix: Option<String>,
1231 /// <p>The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
1232 #[serde(rename = "Prefix")]
1233 #[serde(skip_serializing_if = "Option::is_none")]
1234 pub prefix: Option<String>,
1235 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1236 #[serde(rename = "RoleARN")]
1237 pub role_arn: String,
1238}
1239
1240/// <p>Describes an update for a destination in Amazon S3.</p>
1241#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1242#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1243pub struct S3DestinationUpdate {
1244 /// <p>The ARN of the S3 bucket. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1245 #[serde(rename = "BucketARN")]
1246 #[serde(skip_serializing_if = "Option::is_none")]
1247 pub bucket_arn: Option<String>,
1248 /// <p>The buffering option. If no value is specified, <code>BufferingHints</code> object default values are used.</p>
1249 #[serde(rename = "BufferingHints")]
1250 #[serde(skip_serializing_if = "Option::is_none")]
1251 pub buffering_hints: Option<BufferingHints>,
1252 /// <p>The CloudWatch logging options for your delivery stream.</p>
1253 #[serde(rename = "CloudWatchLoggingOptions")]
1254 #[serde(skip_serializing_if = "Option::is_none")]
1255 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1256 /// <p>The compression format. If no value is specified, the default is <code>UNCOMPRESSED</code>.</p> <p>The compression formats <code>SNAPPY</code> or <code>ZIP</code> cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift <code>COPY</code> operation that reads from the S3 bucket.</p>
1257 #[serde(rename = "CompressionFormat")]
1258 #[serde(skip_serializing_if = "Option::is_none")]
1259 pub compression_format: Option<String>,
1260 /// <p>The encryption configuration. If no value is specified, the default is no encryption.</p>
1261 #[serde(rename = "EncryptionConfiguration")]
1262 #[serde(skip_serializing_if = "Option::is_none")]
1263 pub encryption_configuration: Option<EncryptionConfiguration>,
1264 /// <p>A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
1265 #[serde(rename = "ErrorOutputPrefix")]
1266 #[serde(skip_serializing_if = "Option::is_none")]
1267 pub error_output_prefix: Option<String>,
1268 /// <p>The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in <a href="https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html">Custom Prefixes for Amazon S3 Objects</a>.</p>
1269 #[serde(rename = "Prefix")]
1270 #[serde(skip_serializing_if = "Option::is_none")]
1271 pub prefix: Option<String>,
1272 /// <p>The Amazon Resource Name (ARN) of the AWS credentials. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs) and AWS Service Namespaces</a>.</p>
1273 #[serde(rename = "RoleARN")]
1274 #[serde(skip_serializing_if = "Option::is_none")]
1275 pub role_arn: Option<String>,
1276}
1277
1278/// <p>Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if <code>Enabled</code> is set to true.</p>
1279#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1280pub struct SchemaConfiguration {
1281 /// <p>The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.</p>
1282 #[serde(rename = "CatalogId")]
1283 #[serde(skip_serializing_if = "Option::is_none")]
1284 pub catalog_id: Option<String>,
1285 /// <p>Specifies the name of the AWS Glue database that contains the schema for the output data.</p>
1286 #[serde(rename = "DatabaseName")]
1287 #[serde(skip_serializing_if = "Option::is_none")]
1288 pub database_name: Option<String>,
1289 /// <p>If you don't specify an AWS Region, the default is the current Region.</p>
1290 #[serde(rename = "Region")]
1291 #[serde(skip_serializing_if = "Option::is_none")]
1292 pub region: Option<String>,
1293 /// <p>The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.</p>
1294 #[serde(rename = "RoleARN")]
1295 #[serde(skip_serializing_if = "Option::is_none")]
1296 pub role_arn: Option<String>,
1297 /// <p>Specifies the AWS Glue table that contains the column information that constitutes your data schema.</p>
1298 #[serde(rename = "TableName")]
1299 #[serde(skip_serializing_if = "Option::is_none")]
1300 pub table_name: Option<String>,
1301 /// <p>Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to <code>LATEST</code>, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.</p>
1302 #[serde(rename = "VersionId")]
1303 #[serde(skip_serializing_if = "Option::is_none")]
1304 pub version_id: Option<String>,
1305}
1306
1307/// <p>The serializer that you want Kinesis Data Firehose to use to convert data to the target format before writing it to Amazon S3. Kinesis Data Firehose supports two types of serializers: the <a href="https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html">ORC SerDe</a> and the <a href="https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.html">Parquet SerDe</a>.</p>
1308#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1309pub struct Serializer {
1310 /// <p>A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see <a href="https://orc.apache.org/docs/">Apache ORC</a>.</p>
1311 #[serde(rename = "OrcSerDe")]
1312 #[serde(skip_serializing_if = "Option::is_none")]
1313 pub orc_ser_de: Option<OrcSerDe>,
1314 /// <p>A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see <a href="https://parquet.apache.org/documentation/latest/">Apache Parquet</a>.</p>
1315 #[serde(rename = "ParquetSerDe")]
1316 #[serde(skip_serializing_if = "Option::is_none")]
1317 pub parquet_ser_de: Option<ParquetSerDe>,
1318}
1319
1320/// <p>Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.</p>
1321#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1322#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1323pub struct SourceDescription {
1324 /// <p>The <a>KinesisStreamSourceDescription</a> value for the source Kinesis data stream.</p>
1325 #[serde(rename = "KinesisStreamSourceDescription")]
1326 #[serde(skip_serializing_if = "Option::is_none")]
1327 pub kinesis_stream_source_description: Option<KinesisStreamSourceDescription>,
1328}
1329
1330/// <p>Describes the configuration of a destination in Splunk.</p>
1331#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1332#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1333pub struct SplunkDestinationConfiguration {
1334 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
1335 #[serde(rename = "CloudWatchLoggingOptions")]
1336 #[serde(skip_serializing_if = "Option::is_none")]
1337 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1338 /// <p>The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.</p>
1339 #[serde(rename = "HECAcknowledgmentTimeoutInSeconds")]
1340 #[serde(skip_serializing_if = "Option::is_none")]
1341 pub hec_acknowledgment_timeout_in_seconds: Option<i64>,
1342 /// <p>The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.</p>
1343 #[serde(rename = "HECEndpoint")]
1344 pub hec_endpoint: String,
1345 /// <p>This type can be either "Raw" or "Event."</p>
1346 #[serde(rename = "HECEndpointType")]
1347 pub hec_endpoint_type: String,
1348 /// <p>This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.</p>
1349 #[serde(rename = "HECToken")]
1350 pub hec_token: String,
1351 /// <p>The data processing configuration.</p>
1352 #[serde(rename = "ProcessingConfiguration")]
1353 #[serde(skip_serializing_if = "Option::is_none")]
1354 pub processing_configuration: Option<ProcessingConfiguration>,
1355 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.</p>
1356 #[serde(rename = "RetryOptions")]
1357 #[serde(skip_serializing_if = "Option::is_none")]
1358 pub retry_options: Option<SplunkRetryOptions>,
1359 /// <p>Defines how documents should be delivered to Amazon S3. When set to <code>FailedDocumentsOnly</code>, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to <code>AllDocuments</code>, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is <code>FailedDocumentsOnly</code>. </p>
1360 #[serde(rename = "S3BackupMode")]
1361 #[serde(skip_serializing_if = "Option::is_none")]
1362 pub s3_backup_mode: Option<String>,
1363 /// <p>The configuration for the backup Amazon S3 location.</p>
1364 #[serde(rename = "S3Configuration")]
1365 pub s3_configuration: S3DestinationConfiguration,
1366}
1367
1368/// <p>Describes a destination in Splunk.</p>
1369#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1370#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1371pub struct SplunkDestinationDescription {
1372 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
1373 #[serde(rename = "CloudWatchLoggingOptions")]
1374 #[serde(skip_serializing_if = "Option::is_none")]
1375 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1376 /// <p>The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.</p>
1377 #[serde(rename = "HECAcknowledgmentTimeoutInSeconds")]
1378 #[serde(skip_serializing_if = "Option::is_none")]
1379 pub hec_acknowledgment_timeout_in_seconds: Option<i64>,
1380 /// <p>The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.</p>
1381 #[serde(rename = "HECEndpoint")]
1382 #[serde(skip_serializing_if = "Option::is_none")]
1383 pub hec_endpoint: Option<String>,
1384 /// <p>This type can be either "Raw" or "Event."</p>
1385 #[serde(rename = "HECEndpointType")]
1386 #[serde(skip_serializing_if = "Option::is_none")]
1387 pub hec_endpoint_type: Option<String>,
1388 /// <p>A GUID you obtain from your Splunk cluster when you create a new HEC endpoint.</p>
1389 #[serde(rename = "HECToken")]
1390 #[serde(skip_serializing_if = "Option::is_none")]
1391 pub hec_token: Option<String>,
1392 /// <p>The data processing configuration.</p>
1393 #[serde(rename = "ProcessingConfiguration")]
1394 #[serde(skip_serializing_if = "Option::is_none")]
1395 pub processing_configuration: Option<ProcessingConfiguration>,
1396 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.</p>
1397 #[serde(rename = "RetryOptions")]
1398 #[serde(skip_serializing_if = "Option::is_none")]
1399 pub retry_options: Option<SplunkRetryOptions>,
1400 /// <p>Defines how documents should be delivered to Amazon S3. When set to <code>FailedDocumentsOnly</code>, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to <code>AllDocuments</code>, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is <code>FailedDocumentsOnly</code>. </p>
1401 #[serde(rename = "S3BackupMode")]
1402 #[serde(skip_serializing_if = "Option::is_none")]
1403 pub s3_backup_mode: Option<String>,
1404 /// <p>The Amazon S3 destination.></p>
1405 #[serde(rename = "S3DestinationDescription")]
1406 #[serde(skip_serializing_if = "Option::is_none")]
1407 pub s3_destination_description: Option<S3DestinationDescription>,
1408}
1409
1410/// <p>Describes an update for a destination in Splunk.</p>
1411#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1412#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1413pub struct SplunkDestinationUpdate {
1414 /// <p>The Amazon CloudWatch logging options for your delivery stream.</p>
1415 #[serde(rename = "CloudWatchLoggingOptions")]
1416 #[serde(skip_serializing_if = "Option::is_none")]
1417 pub cloud_watch_logging_options: Option<CloudWatchLoggingOptions>,
1418 /// <p>The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.</p>
1419 #[serde(rename = "HECAcknowledgmentTimeoutInSeconds")]
1420 #[serde(skip_serializing_if = "Option::is_none")]
1421 pub hec_acknowledgment_timeout_in_seconds: Option<i64>,
1422 /// <p>The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.</p>
1423 #[serde(rename = "HECEndpoint")]
1424 #[serde(skip_serializing_if = "Option::is_none")]
1425 pub hec_endpoint: Option<String>,
1426 /// <p>This type can be either "Raw" or "Event."</p>
1427 #[serde(rename = "HECEndpointType")]
1428 #[serde(skip_serializing_if = "Option::is_none")]
1429 pub hec_endpoint_type: Option<String>,
1430 /// <p>A GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.</p>
1431 #[serde(rename = "HECToken")]
1432 #[serde(skip_serializing_if = "Option::is_none")]
1433 pub hec_token: Option<String>,
1434 /// <p>The data processing configuration.</p>
1435 #[serde(rename = "ProcessingConfiguration")]
1436 #[serde(skip_serializing_if = "Option::is_none")]
1437 pub processing_configuration: Option<ProcessingConfiguration>,
1438 /// <p>The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.</p>
1439 #[serde(rename = "RetryOptions")]
1440 #[serde(skip_serializing_if = "Option::is_none")]
1441 pub retry_options: Option<SplunkRetryOptions>,
1442 /// <p>Defines how documents should be delivered to Amazon S3. When set to <code>FailedDocumentsOnly</code>, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to <code>AllDocuments</code>, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is <code>FailedDocumentsOnly</code>. </p>
1443 #[serde(rename = "S3BackupMode")]
1444 #[serde(skip_serializing_if = "Option::is_none")]
1445 pub s3_backup_mode: Option<String>,
1446 /// <p>Your update to the configuration of the backup Amazon S3 location.</p>
1447 #[serde(rename = "S3Update")]
1448 #[serde(skip_serializing_if = "Option::is_none")]
1449 pub s3_update: Option<S3DestinationUpdate>,
1450}
1451
1452/// <p>Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.</p>
1453#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1454pub struct SplunkRetryOptions {
1455 /// <p>The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.</p>
1456 #[serde(rename = "DurationInSeconds")]
1457 #[serde(skip_serializing_if = "Option::is_none")]
1458 pub duration_in_seconds: Option<i64>,
1459}
1460
1461#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1462#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1463pub struct StartDeliveryStreamEncryptionInput {
1464 /// <p>Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).</p>
1465 #[serde(rename = "DeliveryStreamEncryptionConfigurationInput")]
1466 #[serde(skip_serializing_if = "Option::is_none")]
1467 pub delivery_stream_encryption_configuration_input:
1468 Option<DeliveryStreamEncryptionConfigurationInput>,
1469 /// <p>The name of the delivery stream for which you want to enable server-side encryption (SSE).</p>
1470 #[serde(rename = "DeliveryStreamName")]
1471 pub delivery_stream_name: String,
1472}
1473
1474#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1475#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1476pub struct StartDeliveryStreamEncryptionOutput {}
1477
1478#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1479#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1480pub struct StopDeliveryStreamEncryptionInput {
1481 /// <p>The name of the delivery stream for which you want to disable server-side encryption (SSE).</p>
1482 #[serde(rename = "DeliveryStreamName")]
1483 pub delivery_stream_name: String,
1484}
1485
1486#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1487#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1488pub struct StopDeliveryStreamEncryptionOutput {}
1489
1490/// <p>Metadata that you can assign to a delivery stream, consisting of a key-value pair.</p>
1491#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
1492pub struct Tag {
1493 /// <p>A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @</p>
1494 #[serde(rename = "Key")]
1495 pub key: String,
1496 /// <p>An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @</p>
1497 #[serde(rename = "Value")]
1498 #[serde(skip_serializing_if = "Option::is_none")]
1499 pub value: Option<String>,
1500}
1501
1502#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1503#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1504pub struct TagDeliveryStreamInput {
1505 /// <p>The name of the delivery stream to which you want to add the tags.</p>
1506 #[serde(rename = "DeliveryStreamName")]
1507 pub delivery_stream_name: String,
1508 /// <p>A set of key-value pairs to use to create the tags.</p>
1509 #[serde(rename = "Tags")]
1510 pub tags: Vec<Tag>,
1511}
1512
1513#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1514#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1515pub struct TagDeliveryStreamOutput {}
1516
1517#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1518#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1519pub struct UntagDeliveryStreamInput {
1520 /// <p>The name of the delivery stream.</p>
1521 #[serde(rename = "DeliveryStreamName")]
1522 pub delivery_stream_name: String,
1523 /// <p>A list of tag keys. Each corresponding tag is removed from the delivery stream.</p>
1524 #[serde(rename = "TagKeys")]
1525 pub tag_keys: Vec<String>,
1526}
1527
1528#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1529#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1530pub struct UntagDeliveryStreamOutput {}
1531
1532#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1533#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1534pub struct UpdateDestinationInput {
1535 /// <p>Obtain this value from the <code>VersionId</code> result of <a>DeliveryStreamDescription</a>. This value is required, and helps the service perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the <code>VersionId</code> value is updated. The service then performs a merge of the old configuration with the new configuration.</p>
1536 #[serde(rename = "CurrentDeliveryStreamVersionId")]
1537 pub current_delivery_stream_version_id: String,
1538 /// <p>The name of the delivery stream.</p>
1539 #[serde(rename = "DeliveryStreamName")]
1540 pub delivery_stream_name: String,
1541 /// <p>The ID of the destination.</p>
1542 #[serde(rename = "DestinationId")]
1543 pub destination_id: String,
1544 /// <p>Describes an update for a destination in Amazon ES.</p>
1545 #[serde(rename = "ElasticsearchDestinationUpdate")]
1546 #[serde(skip_serializing_if = "Option::is_none")]
1547 pub elasticsearch_destination_update: Option<ElasticsearchDestinationUpdate>,
1548 /// <p>Describes an update for a destination in Amazon S3.</p>
1549 #[serde(rename = "ExtendedS3DestinationUpdate")]
1550 #[serde(skip_serializing_if = "Option::is_none")]
1551 pub extended_s3_destination_update: Option<ExtendedS3DestinationUpdate>,
1552 /// <p>Describes an update for a destination in Amazon Redshift.</p>
1553 #[serde(rename = "RedshiftDestinationUpdate")]
1554 #[serde(skip_serializing_if = "Option::is_none")]
1555 pub redshift_destination_update: Option<RedshiftDestinationUpdate>,
1556 /// <p>Describes an update for a destination in Splunk.</p>
1557 #[serde(rename = "SplunkDestinationUpdate")]
1558 #[serde(skip_serializing_if = "Option::is_none")]
1559 pub splunk_destination_update: Option<SplunkDestinationUpdate>,
1560}
1561
1562#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1563#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1564pub struct UpdateDestinationOutput {}
1565
1566/// <p>The details of the VPC of the Amazon ES destination.</p>
1567#[derive(Clone, Debug, Default, PartialEq, Serialize)]
1568#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
1569pub struct VpcConfiguration {
1570 /// <p>The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC.</p>
1571 #[serde(rename = "RoleARN")]
1572 pub role_arn: String,
1573 /// <p>The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination.</p>
1574 #[serde(rename = "SecurityGroupIds")]
1575 pub security_group_ids: Vec<String>,
1576 /// <p>The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.</p> <p>The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis">Network Interfaces </a> in the Amazon VPC Quotas topic.</p>
1577 #[serde(rename = "SubnetIds")]
1578 pub subnet_ids: Vec<String>,
1579}
1580
1581/// <p>The details of the VPC of the Amazon ES destination.</p>
1582#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
1583#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
1584pub struct VpcConfigurationDescription {
1585 /// <p>The ARN of the IAM role that you want the delivery stream uses to create endpoints in the destination VPC.</p>
1586 #[serde(rename = "RoleARN")]
1587 pub role_arn: String,
1588 /// <p>The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination.</p>
1589 #[serde(rename = "SecurityGroupIds")]
1590 pub security_group_ids: Vec<String>,
1591 /// <p>The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.</p> <p>The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis">Network Interfaces </a> in the Amazon VPC Quotas topic.</p>
1592 #[serde(rename = "SubnetIds")]
1593 pub subnet_ids: Vec<String>,
1594 /// <p>The ID of the Amazon ES destination's VPC.</p>
1595 #[serde(rename = "VpcId")]
1596 pub vpc_id: String,
1597}
1598
1599/// Errors returned by CreateDeliveryStream
1600#[derive(Debug, PartialEq)]
1601pub enum CreateDeliveryStreamError {
1602 /// <p>The specified input parameter has a value that is not valid.</p>
1603 InvalidArgument(String),
1604 /// <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: <code>AccessDeniedException</code>, <code>InvalidStateException</code>, <code>DisabledException</code>, or <code>NotFoundException</code>.</p>
1605 InvalidKMSResource(String),
1606 /// <p>You have already reached the limit for a requested resource.</p>
1607 LimitExceeded(String),
1608 /// <p>The resource is already in use and not available for this operation.</p>
1609 ResourceInUse(String),
1610}
1611
1612impl CreateDeliveryStreamError {
1613 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateDeliveryStreamError> {
1614 if let Some(err) = proto::json::Error::parse(&res) {
1615 match err.typ.as_str() {
1616 "InvalidArgumentException" => {
1617 return RusotoError::Service(CreateDeliveryStreamError::InvalidArgument(
1618 err.msg,
1619 ))
1620 }
1621 "InvalidKMSResourceException" => {
1622 return RusotoError::Service(CreateDeliveryStreamError::InvalidKMSResource(
1623 err.msg,
1624 ))
1625 }
1626 "LimitExceededException" => {
1627 return RusotoError::Service(CreateDeliveryStreamError::LimitExceeded(err.msg))
1628 }
1629 "ResourceInUseException" => {
1630 return RusotoError::Service(CreateDeliveryStreamError::ResourceInUse(err.msg))
1631 }
1632 "ValidationException" => return RusotoError::Validation(err.msg),
1633 _ => {}
1634 }
1635 }
1636 RusotoError::Unknown(res)
1637 }
1638}
1639impl fmt::Display for CreateDeliveryStreamError {
1640 #[allow(unused_variables)]
1641 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1642 match *self {
1643 CreateDeliveryStreamError::InvalidArgument(ref cause) => write!(f, "{}", cause),
1644 CreateDeliveryStreamError::InvalidKMSResource(ref cause) => write!(f, "{}", cause),
1645 CreateDeliveryStreamError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1646 CreateDeliveryStreamError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1647 }
1648 }
1649}
1650impl Error for CreateDeliveryStreamError {}
1651/// Errors returned by DeleteDeliveryStream
1652#[derive(Debug, PartialEq)]
1653pub enum DeleteDeliveryStreamError {
1654 /// <p>The resource is already in use and not available for this operation.</p>
1655 ResourceInUse(String),
1656 /// <p>The specified resource could not be found.</p>
1657 ResourceNotFound(String),
1658}
1659
1660impl DeleteDeliveryStreamError {
1661 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteDeliveryStreamError> {
1662 if let Some(err) = proto::json::Error::parse(&res) {
1663 match err.typ.as_str() {
1664 "ResourceInUseException" => {
1665 return RusotoError::Service(DeleteDeliveryStreamError::ResourceInUse(err.msg))
1666 }
1667 "ResourceNotFoundException" => {
1668 return RusotoError::Service(DeleteDeliveryStreamError::ResourceNotFound(
1669 err.msg,
1670 ))
1671 }
1672 "ValidationException" => return RusotoError::Validation(err.msg),
1673 _ => {}
1674 }
1675 }
1676 RusotoError::Unknown(res)
1677 }
1678}
1679impl fmt::Display for DeleteDeliveryStreamError {
1680 #[allow(unused_variables)]
1681 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1682 match *self {
1683 DeleteDeliveryStreamError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1684 DeleteDeliveryStreamError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1685 }
1686 }
1687}
1688impl Error for DeleteDeliveryStreamError {}
1689/// Errors returned by DescribeDeliveryStream
1690#[derive(Debug, PartialEq)]
1691pub enum DescribeDeliveryStreamError {
1692 /// <p>The specified resource could not be found.</p>
1693 ResourceNotFound(String),
1694}
1695
1696impl DescribeDeliveryStreamError {
1697 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeDeliveryStreamError> {
1698 if let Some(err) = proto::json::Error::parse(&res) {
1699 match err.typ.as_str() {
1700 "ResourceNotFoundException" => {
1701 return RusotoError::Service(DescribeDeliveryStreamError::ResourceNotFound(
1702 err.msg,
1703 ))
1704 }
1705 "ValidationException" => return RusotoError::Validation(err.msg),
1706 _ => {}
1707 }
1708 }
1709 RusotoError::Unknown(res)
1710 }
1711}
1712impl fmt::Display for DescribeDeliveryStreamError {
1713 #[allow(unused_variables)]
1714 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1715 match *self {
1716 DescribeDeliveryStreamError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1717 }
1718 }
1719}
1720impl Error for DescribeDeliveryStreamError {}
1721/// Errors returned by ListDeliveryStreams
1722#[derive(Debug, PartialEq)]
1723pub enum ListDeliveryStreamsError {}
1724
1725impl ListDeliveryStreamsError {
1726 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListDeliveryStreamsError> {
1727 if let Some(err) = proto::json::Error::parse(&res) {
1728 match err.typ.as_str() {
1729 "ValidationException" => return RusotoError::Validation(err.msg),
1730 _ => {}
1731 }
1732 }
1733 RusotoError::Unknown(res)
1734 }
1735}
1736impl fmt::Display for ListDeliveryStreamsError {
1737 #[allow(unused_variables)]
1738 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1739 match *self {}
1740 }
1741}
1742impl Error for ListDeliveryStreamsError {}
1743/// Errors returned by ListTagsForDeliveryStream
1744#[derive(Debug, PartialEq)]
1745pub enum ListTagsForDeliveryStreamError {
1746 /// <p>The specified input parameter has a value that is not valid.</p>
1747 InvalidArgument(String),
1748 /// <p>You have already reached the limit for a requested resource.</p>
1749 LimitExceeded(String),
1750 /// <p>The specified resource could not be found.</p>
1751 ResourceNotFound(String),
1752}
1753
1754impl ListTagsForDeliveryStreamError {
1755 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListTagsForDeliveryStreamError> {
1756 if let Some(err) = proto::json::Error::parse(&res) {
1757 match err.typ.as_str() {
1758 "InvalidArgumentException" => {
1759 return RusotoError::Service(ListTagsForDeliveryStreamError::InvalidArgument(
1760 err.msg,
1761 ))
1762 }
1763 "LimitExceededException" => {
1764 return RusotoError::Service(ListTagsForDeliveryStreamError::LimitExceeded(
1765 err.msg,
1766 ))
1767 }
1768 "ResourceNotFoundException" => {
1769 return RusotoError::Service(ListTagsForDeliveryStreamError::ResourceNotFound(
1770 err.msg,
1771 ))
1772 }
1773 "ValidationException" => return RusotoError::Validation(err.msg),
1774 _ => {}
1775 }
1776 }
1777 RusotoError::Unknown(res)
1778 }
1779}
1780impl fmt::Display for ListTagsForDeliveryStreamError {
1781 #[allow(unused_variables)]
1782 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1783 match *self {
1784 ListTagsForDeliveryStreamError::InvalidArgument(ref cause) => write!(f, "{}", cause),
1785 ListTagsForDeliveryStreamError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1786 ListTagsForDeliveryStreamError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1787 }
1788 }
1789}
1790impl Error for ListTagsForDeliveryStreamError {}
1791/// Errors returned by PutRecord
1792#[derive(Debug, PartialEq)]
1793pub enum PutRecordError {
1794 /// <p>The specified input parameter has a value that is not valid.</p>
1795 InvalidArgument(String),
1796 /// <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: <code>AccessDeniedException</code>, <code>InvalidStateException</code>, <code>DisabledException</code>, or <code>NotFoundException</code>.</p>
1797 InvalidKMSResource(String),
1798 /// <p>The specified resource could not be found.</p>
1799 ResourceNotFound(String),
1800 /// <p>The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose Limits</a>.</p>
1801 ServiceUnavailable(String),
1802}
1803
1804impl PutRecordError {
1805 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<PutRecordError> {
1806 if let Some(err) = proto::json::Error::parse(&res) {
1807 match err.typ.as_str() {
1808 "InvalidArgumentException" => {
1809 return RusotoError::Service(PutRecordError::InvalidArgument(err.msg))
1810 }
1811 "InvalidKMSResourceException" => {
1812 return RusotoError::Service(PutRecordError::InvalidKMSResource(err.msg))
1813 }
1814 "ResourceNotFoundException" => {
1815 return RusotoError::Service(PutRecordError::ResourceNotFound(err.msg))
1816 }
1817 "ServiceUnavailableException" => {
1818 return RusotoError::Service(PutRecordError::ServiceUnavailable(err.msg))
1819 }
1820 "ValidationException" => return RusotoError::Validation(err.msg),
1821 _ => {}
1822 }
1823 }
1824 RusotoError::Unknown(res)
1825 }
1826}
1827impl fmt::Display for PutRecordError {
1828 #[allow(unused_variables)]
1829 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1830 match *self {
1831 PutRecordError::InvalidArgument(ref cause) => write!(f, "{}", cause),
1832 PutRecordError::InvalidKMSResource(ref cause) => write!(f, "{}", cause),
1833 PutRecordError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1834 PutRecordError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
1835 }
1836 }
1837}
1838impl Error for PutRecordError {}
1839/// Errors returned by PutRecordBatch
1840#[derive(Debug, PartialEq)]
1841pub enum PutRecordBatchError {
1842 /// <p>The specified input parameter has a value that is not valid.</p>
1843 InvalidArgument(String),
1844 /// <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: <code>AccessDeniedException</code>, <code>InvalidStateException</code>, <code>DisabledException</code>, or <code>NotFoundException</code>.</p>
1845 InvalidKMSResource(String),
1846 /// <p>The specified resource could not be found.</p>
1847 ResourceNotFound(String),
1848 /// <p>The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose Limits</a>.</p>
1849 ServiceUnavailable(String),
1850}
1851
1852impl PutRecordBatchError {
1853 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<PutRecordBatchError> {
1854 if let Some(err) = proto::json::Error::parse(&res) {
1855 match err.typ.as_str() {
1856 "InvalidArgumentException" => {
1857 return RusotoError::Service(PutRecordBatchError::InvalidArgument(err.msg))
1858 }
1859 "InvalidKMSResourceException" => {
1860 return RusotoError::Service(PutRecordBatchError::InvalidKMSResource(err.msg))
1861 }
1862 "ResourceNotFoundException" => {
1863 return RusotoError::Service(PutRecordBatchError::ResourceNotFound(err.msg))
1864 }
1865 "ServiceUnavailableException" => {
1866 return RusotoError::Service(PutRecordBatchError::ServiceUnavailable(err.msg))
1867 }
1868 "ValidationException" => return RusotoError::Validation(err.msg),
1869 _ => {}
1870 }
1871 }
1872 RusotoError::Unknown(res)
1873 }
1874}
1875impl fmt::Display for PutRecordBatchError {
1876 #[allow(unused_variables)]
1877 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1878 match *self {
1879 PutRecordBatchError::InvalidArgument(ref cause) => write!(f, "{}", cause),
1880 PutRecordBatchError::InvalidKMSResource(ref cause) => write!(f, "{}", cause),
1881 PutRecordBatchError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
1882 PutRecordBatchError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
1883 }
1884 }
1885}
1886impl Error for PutRecordBatchError {}
1887/// Errors returned by StartDeliveryStreamEncryption
1888#[derive(Debug, PartialEq)]
1889pub enum StartDeliveryStreamEncryptionError {
1890 /// <p>The specified input parameter has a value that is not valid.</p>
1891 InvalidArgument(String),
1892 /// <p>Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: <code>AccessDeniedException</code>, <code>InvalidStateException</code>, <code>DisabledException</code>, or <code>NotFoundException</code>.</p>
1893 InvalidKMSResource(String),
1894 /// <p>You have already reached the limit for a requested resource.</p>
1895 LimitExceeded(String),
1896 /// <p>The resource is already in use and not available for this operation.</p>
1897 ResourceInUse(String),
1898 /// <p>The specified resource could not be found.</p>
1899 ResourceNotFound(String),
1900}
1901
1902impl StartDeliveryStreamEncryptionError {
1903 pub fn from_response(
1904 res: BufferedHttpResponse,
1905 ) -> RusotoError<StartDeliveryStreamEncryptionError> {
1906 if let Some(err) = proto::json::Error::parse(&res) {
1907 match err.typ.as_str() {
1908 "InvalidArgumentException" => {
1909 return RusotoError::Service(
1910 StartDeliveryStreamEncryptionError::InvalidArgument(err.msg),
1911 )
1912 }
1913 "InvalidKMSResourceException" => {
1914 return RusotoError::Service(
1915 StartDeliveryStreamEncryptionError::InvalidKMSResource(err.msg),
1916 )
1917 }
1918 "LimitExceededException" => {
1919 return RusotoError::Service(StartDeliveryStreamEncryptionError::LimitExceeded(
1920 err.msg,
1921 ))
1922 }
1923 "ResourceInUseException" => {
1924 return RusotoError::Service(StartDeliveryStreamEncryptionError::ResourceInUse(
1925 err.msg,
1926 ))
1927 }
1928 "ResourceNotFoundException" => {
1929 return RusotoError::Service(
1930 StartDeliveryStreamEncryptionError::ResourceNotFound(err.msg),
1931 )
1932 }
1933 "ValidationException" => return RusotoError::Validation(err.msg),
1934 _ => {}
1935 }
1936 }
1937 RusotoError::Unknown(res)
1938 }
1939}
1940impl fmt::Display for StartDeliveryStreamEncryptionError {
1941 #[allow(unused_variables)]
1942 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1943 match *self {
1944 StartDeliveryStreamEncryptionError::InvalidArgument(ref cause) => {
1945 write!(f, "{}", cause)
1946 }
1947 StartDeliveryStreamEncryptionError::InvalidKMSResource(ref cause) => {
1948 write!(f, "{}", cause)
1949 }
1950 StartDeliveryStreamEncryptionError::LimitExceeded(ref cause) => write!(f, "{}", cause),
1951 StartDeliveryStreamEncryptionError::ResourceInUse(ref cause) => write!(f, "{}", cause),
1952 StartDeliveryStreamEncryptionError::ResourceNotFound(ref cause) => {
1953 write!(f, "{}", cause)
1954 }
1955 }
1956 }
1957}
1958impl Error for StartDeliveryStreamEncryptionError {}
1959/// Errors returned by StopDeliveryStreamEncryption
1960#[derive(Debug, PartialEq)]
1961pub enum StopDeliveryStreamEncryptionError {
1962 /// <p>The specified input parameter has a value that is not valid.</p>
1963 InvalidArgument(String),
1964 /// <p>You have already reached the limit for a requested resource.</p>
1965 LimitExceeded(String),
1966 /// <p>The resource is already in use and not available for this operation.</p>
1967 ResourceInUse(String),
1968 /// <p>The specified resource could not be found.</p>
1969 ResourceNotFound(String),
1970}
1971
1972impl StopDeliveryStreamEncryptionError {
1973 pub fn from_response(
1974 res: BufferedHttpResponse,
1975 ) -> RusotoError<StopDeliveryStreamEncryptionError> {
1976 if let Some(err) = proto::json::Error::parse(&res) {
1977 match err.typ.as_str() {
1978 "InvalidArgumentException" => {
1979 return RusotoError::Service(
1980 StopDeliveryStreamEncryptionError::InvalidArgument(err.msg),
1981 )
1982 }
1983 "LimitExceededException" => {
1984 return RusotoError::Service(StopDeliveryStreamEncryptionError::LimitExceeded(
1985 err.msg,
1986 ))
1987 }
1988 "ResourceInUseException" => {
1989 return RusotoError::Service(StopDeliveryStreamEncryptionError::ResourceInUse(
1990 err.msg,
1991 ))
1992 }
1993 "ResourceNotFoundException" => {
1994 return RusotoError::Service(
1995 StopDeliveryStreamEncryptionError::ResourceNotFound(err.msg),
1996 )
1997 }
1998 "ValidationException" => return RusotoError::Validation(err.msg),
1999 _ => {}
2000 }
2001 }
2002 RusotoError::Unknown(res)
2003 }
2004}
2005impl fmt::Display for StopDeliveryStreamEncryptionError {
2006 #[allow(unused_variables)]
2007 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2008 match *self {
2009 StopDeliveryStreamEncryptionError::InvalidArgument(ref cause) => write!(f, "{}", cause),
2010 StopDeliveryStreamEncryptionError::LimitExceeded(ref cause) => write!(f, "{}", cause),
2011 StopDeliveryStreamEncryptionError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2012 StopDeliveryStreamEncryptionError::ResourceNotFound(ref cause) => {
2013 write!(f, "{}", cause)
2014 }
2015 }
2016 }
2017}
2018impl Error for StopDeliveryStreamEncryptionError {}
2019/// Errors returned by TagDeliveryStream
2020#[derive(Debug, PartialEq)]
2021pub enum TagDeliveryStreamError {
2022 /// <p>The specified input parameter has a value that is not valid.</p>
2023 InvalidArgument(String),
2024 /// <p>You have already reached the limit for a requested resource.</p>
2025 LimitExceeded(String),
2026 /// <p>The resource is already in use and not available for this operation.</p>
2027 ResourceInUse(String),
2028 /// <p>The specified resource could not be found.</p>
2029 ResourceNotFound(String),
2030}
2031
2032impl TagDeliveryStreamError {
2033 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<TagDeliveryStreamError> {
2034 if let Some(err) = proto::json::Error::parse(&res) {
2035 match err.typ.as_str() {
2036 "InvalidArgumentException" => {
2037 return RusotoError::Service(TagDeliveryStreamError::InvalidArgument(err.msg))
2038 }
2039 "LimitExceededException" => {
2040 return RusotoError::Service(TagDeliveryStreamError::LimitExceeded(err.msg))
2041 }
2042 "ResourceInUseException" => {
2043 return RusotoError::Service(TagDeliveryStreamError::ResourceInUse(err.msg))
2044 }
2045 "ResourceNotFoundException" => {
2046 return RusotoError::Service(TagDeliveryStreamError::ResourceNotFound(err.msg))
2047 }
2048 "ValidationException" => return RusotoError::Validation(err.msg),
2049 _ => {}
2050 }
2051 }
2052 RusotoError::Unknown(res)
2053 }
2054}
2055impl fmt::Display for TagDeliveryStreamError {
2056 #[allow(unused_variables)]
2057 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2058 match *self {
2059 TagDeliveryStreamError::InvalidArgument(ref cause) => write!(f, "{}", cause),
2060 TagDeliveryStreamError::LimitExceeded(ref cause) => write!(f, "{}", cause),
2061 TagDeliveryStreamError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2062 TagDeliveryStreamError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2063 }
2064 }
2065}
2066impl Error for TagDeliveryStreamError {}
2067/// Errors returned by UntagDeliveryStream
2068#[derive(Debug, PartialEq)]
2069pub enum UntagDeliveryStreamError {
2070 /// <p>The specified input parameter has a value that is not valid.</p>
2071 InvalidArgument(String),
2072 /// <p>You have already reached the limit for a requested resource.</p>
2073 LimitExceeded(String),
2074 /// <p>The resource is already in use and not available for this operation.</p>
2075 ResourceInUse(String),
2076 /// <p>The specified resource could not be found.</p>
2077 ResourceNotFound(String),
2078}
2079
2080impl UntagDeliveryStreamError {
2081 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UntagDeliveryStreamError> {
2082 if let Some(err) = proto::json::Error::parse(&res) {
2083 match err.typ.as_str() {
2084 "InvalidArgumentException" => {
2085 return RusotoError::Service(UntagDeliveryStreamError::InvalidArgument(err.msg))
2086 }
2087 "LimitExceededException" => {
2088 return RusotoError::Service(UntagDeliveryStreamError::LimitExceeded(err.msg))
2089 }
2090 "ResourceInUseException" => {
2091 return RusotoError::Service(UntagDeliveryStreamError::ResourceInUse(err.msg))
2092 }
2093 "ResourceNotFoundException" => {
2094 return RusotoError::Service(UntagDeliveryStreamError::ResourceNotFound(
2095 err.msg,
2096 ))
2097 }
2098 "ValidationException" => return RusotoError::Validation(err.msg),
2099 _ => {}
2100 }
2101 }
2102 RusotoError::Unknown(res)
2103 }
2104}
2105impl fmt::Display for UntagDeliveryStreamError {
2106 #[allow(unused_variables)]
2107 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2108 match *self {
2109 UntagDeliveryStreamError::InvalidArgument(ref cause) => write!(f, "{}", cause),
2110 UntagDeliveryStreamError::LimitExceeded(ref cause) => write!(f, "{}", cause),
2111 UntagDeliveryStreamError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2112 UntagDeliveryStreamError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2113 }
2114 }
2115}
2116impl Error for UntagDeliveryStreamError {}
2117/// Errors returned by UpdateDestination
2118#[derive(Debug, PartialEq)]
2119pub enum UpdateDestinationError {
2120 /// <p>Another modification has already happened. Fetch <code>VersionId</code> again and use it to update the destination.</p>
2121 ConcurrentModification(String),
2122 /// <p>The specified input parameter has a value that is not valid.</p>
2123 InvalidArgument(String),
2124 /// <p>The resource is already in use and not available for this operation.</p>
2125 ResourceInUse(String),
2126 /// <p>The specified resource could not be found.</p>
2127 ResourceNotFound(String),
2128}
2129
2130impl UpdateDestinationError {
2131 pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateDestinationError> {
2132 if let Some(err) = proto::json::Error::parse(&res) {
2133 match err.typ.as_str() {
2134 "ConcurrentModificationException" => {
2135 return RusotoError::Service(UpdateDestinationError::ConcurrentModification(
2136 err.msg,
2137 ))
2138 }
2139 "InvalidArgumentException" => {
2140 return RusotoError::Service(UpdateDestinationError::InvalidArgument(err.msg))
2141 }
2142 "ResourceInUseException" => {
2143 return RusotoError::Service(UpdateDestinationError::ResourceInUse(err.msg))
2144 }
2145 "ResourceNotFoundException" => {
2146 return RusotoError::Service(UpdateDestinationError::ResourceNotFound(err.msg))
2147 }
2148 "ValidationException" => return RusotoError::Validation(err.msg),
2149 _ => {}
2150 }
2151 }
2152 RusotoError::Unknown(res)
2153 }
2154}
2155impl fmt::Display for UpdateDestinationError {
2156 #[allow(unused_variables)]
2157 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2158 match *self {
2159 UpdateDestinationError::ConcurrentModification(ref cause) => write!(f, "{}", cause),
2160 UpdateDestinationError::InvalidArgument(ref cause) => write!(f, "{}", cause),
2161 UpdateDestinationError::ResourceInUse(ref cause) => write!(f, "{}", cause),
2162 UpdateDestinationError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
2163 }
2164 }
2165}
2166impl Error for UpdateDestinationError {}
2167/// Trait representing the capabilities of the Firehose API. Firehose clients implement this trait.
2168#[async_trait]
2169pub trait KinesisFirehose {
2170 /// <p>Creates a Kinesis Data Firehose delivery stream.</p> <p>By default, you can create up to 50 delivery streams per AWS Region.</p> <p>This is an asynchronous operation that immediately returns. The initial status of the delivery stream is <code>CREATING</code>. After the delivery stream is created, its status is <code>ACTIVE</code> and it now accepts data. If the delivery stream creation fails, the status transitions to <code>CREATING_FAILED</code>. Attempts to send data to a delivery stream that is not in the <code>ACTIVE</code> state cause an exception. To check the state of a delivery stream, use <a>DescribeDeliveryStream</a>.</p> <p>If the status of a delivery stream is <code>CREATING_FAILED</code>, this status doesn't change, and you can't invoke <code>CreateDeliveryStream</code> again on it. However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete it.</p> <p>A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using <a>PutRecord</a> or <a>PutRecordBatch</a>, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the <code>DeliveryStreamType</code> parameter to <code>KinesisStreamAsSource</code>, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the <code>KinesisStreamSourceConfiguration</code> parameter.</p> <p>To create a delivery stream with server-side encryption (SSE) enabled, include <a>DeliveryStreamEncryptionConfigurationInput</a> in your request. This is optional. You can also invoke <a>StartDeliveryStreamEncryption</a> to turn on SSE for an existing delivery stream that doesn't have SSE enabled.</p> <p>A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: <code>ExtendedS3DestinationConfiguration</code>, <code>S3DestinationConfiguration</code>, <code>ElasticsearchDestinationConfiguration</code>, <code>RedshiftDestinationConfiguration</code>, or <code>SplunkDestinationConfiguration</code>.</p> <p>When you specify <code>S3DestinationConfiguration</code>, you can also provide the following optional values: BufferingHints, <code>EncryptionConfiguration</code>, and <code>CompressionFormat</code>. By default, if no <code>BufferingHints</code> value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. <code>BufferingHints</code> is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.</p> <p>A few notes about Amazon Redshift as a destination:</p> <ul> <li> <p>An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses <code>COPY</code> syntax to load data into an Amazon Redshift table. This is specified in the <code>RedshiftDestinationConfiguration.S3Configuration</code> parameter.</p> </li> <li> <p>The compression formats <code>SNAPPY</code> or <code>ZIP</code> cannot be specified in <code>RedshiftDestinationConfiguration.S3Configuration</code> because the Amazon Redshift <code>COPY</code> operation that reads from the S3 bucket doesn't support these compression formats.</p> </li> <li> <p>We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift <code>INSERT</code> permissions.</p> </li> </ul> <p>Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3">Grant Kinesis Data Firehose Access to an Amazon S3 Destination</a> in the <i>Amazon Kinesis Data Firehose Developer Guide</i>.</p>
2171 async fn create_delivery_stream(
2172 &self,
2173 input: CreateDeliveryStreamInput,
2174 ) -> Result<CreateDeliveryStreamOutput, RusotoError<CreateDeliveryStreamError>>;
2175
2176 /// <p>Deletes a delivery stream and its data.</p> <p>To check the state of a delivery stream, use <a>DescribeDeliveryStream</a>. You can delete a delivery stream only if it is in one of the following states: <code>ACTIVE</code>, <code>DELETING</code>, <code>CREATING_FAILED</code>, or <code>DELETING_FAILED</code>. You can't delete a delivery stream that is in the <code>CREATING</code> state. While the deletion request is in process, the delivery stream is in the <code>DELETING</code> state.</p> <p>While the delivery stream is in the <code>DELETING</code> state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.</p>
2177 async fn delete_delivery_stream(
2178 &self,
2179 input: DeleteDeliveryStreamInput,
2180 ) -> Result<DeleteDeliveryStreamOutput, RusotoError<DeleteDeliveryStreamError>>;
2181
2182 /// <p>Describes the specified delivery stream and its status. For example, after your delivery stream is created, call <code>DescribeDeliveryStream</code> to see whether the delivery stream is <code>ACTIVE</code> and therefore ready for data to be sent to it. </p> <p>If the status of a delivery stream is <code>CREATING_FAILED</code>, this status doesn't change, and you can't invoke <a>CreateDeliveryStream</a> again on it. However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete it. If the status is <code>DELETING_FAILED</code>, you can force deletion by invoking <a>DeleteDeliveryStream</a> again but with <a>DeleteDeliveryStreamInput$AllowForceDelete</a> set to true.</p>
2183 async fn describe_delivery_stream(
2184 &self,
2185 input: DescribeDeliveryStreamInput,
2186 ) -> Result<DescribeDeliveryStreamOutput, RusotoError<DescribeDeliveryStreamError>>;
2187
2188 /// <p>Lists your delivery streams in alphabetical order of their names.</p> <p>The number of delivery streams might be too large to return using a single call to <code>ListDeliveryStreams</code>. You can limit the number of delivery streams returned, using the <code>Limit</code> parameter. To determine whether there are more delivery streams to list, check the value of <code>HasMoreDeliveryStreams</code> in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the <code>ExclusiveStartDeliveryStreamName</code> parameter to the name of the last delivery stream returned in the last call.</p>
2189 async fn list_delivery_streams(
2190 &self,
2191 input: ListDeliveryStreamsInput,
2192 ) -> Result<ListDeliveryStreamsOutput, RusotoError<ListDeliveryStreamsError>>;
2193
2194 /// <p>Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account. </p>
2195 async fn list_tags_for_delivery_stream(
2196 &self,
2197 input: ListTagsForDeliveryStreamInput,
2198 ) -> Result<ListTagsForDeliveryStreamOutput, RusotoError<ListTagsForDeliveryStreamError>>;
2199
2200 /// <p><p>Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use <a>PutRecordBatch</a>. Applications using these operations are referred to as producers.</p> <p>By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use <a>PutRecord</a> and <a>PutRecordBatch</a>, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose Limits</a>. </p> <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.</p> <p>Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (<code>\n</code>) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.</p> <p>The <code>PutRecord</code> operation returns a <code>RecordId</code>, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.</p> <p>If the <code>PutRecord</code> operation throws a <code>ServiceUnavailableException</code>, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. </p> <p>Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.</p> <important> <p>Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.</p> </important></p>
2201 async fn put_record(
2202 &self,
2203 input: PutRecordInput,
2204 ) -> Result<PutRecordOutput, RusotoError<PutRecordError>>;
2205
2206 /// <p><p>Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use <a>PutRecord</a>. Applications using these operations are referred to as producers.</p> <p>By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use <a>PutRecord</a> and <a>PutRecordBatch</a>, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose Limits</a>.</p> <p>Each <a>PutRecordBatch</a> request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.</p> <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.</p> <p>Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (<code>\n</code>) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.</p> <p>The <a>PutRecordBatch</a> response includes a count of failed records, <code>FailedPutCount</code>, and an array of responses, <code>RequestResponses</code>. Even if the <a>PutRecordBatch</a> call succeeds, the value of <code>FailedPutCount</code> may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the <code>RequestResponses</code> array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. <code>RequestResponses</code> includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each <a>PutRecordBatch</a> request. A single record failure does not stop the processing of subsequent records. </p> <p>A successfully processed record includes a <code>RecordId</code> value, which is unique for the record. An unsuccessfully processed record includes <code>ErrorCode</code> and <code>ErrorMessage</code> values. <code>ErrorCode</code> reflects the type of error, and is one of the following values: <code>ServiceUnavailableException</code> or <code>InternalFailure</code>. <code>ErrorMessage</code> provides more detailed information about the error.</p> <p>If there is an internal server error or a timeout, the write might have completed or it might have failed. If <code>FailedPutCount</code> is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.</p> <p>If <a>PutRecordBatch</a> throws <code>ServiceUnavailableException</code>, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.</p> <p>Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.</p> <important> <p>Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.</p> </important></p>
2207 async fn put_record_batch(
2208 &self,
2209 input: PutRecordBatchInput,
2210 ) -> Result<PutRecordBatchOutput, RusotoError<PutRecordBatchError>>;
2211
2212 /// <p>Enables server-side encryption (SSE) for the delivery stream. </p> <p>This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to <code>ENABLING</code>, and then to <code>ENABLED</code>. The encryption status of a delivery stream is the <code>Status</code> property in <a>DeliveryStreamEncryptionConfiguration</a>. If the operation fails, the encryption status changes to <code>ENABLING_FAILED</code>. You can continue to read and write data to your delivery stream while the encryption status is <code>ENABLING</code>, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to <code>ENABLED</code> before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements <a>PutRecordOutput$Encrypted</a> and <a>PutRecordBatchOutput$Encrypted</a>, respectively.</p> <p>To check the encryption status of a delivery stream, use <a>DescribeDeliveryStream</a>.</p> <p>Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type <code>CUSTOMER_MANAGED_CMK</code>, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type <code>CUSTOMER_MANAGED_CMK</code>, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.</p> <p>If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get <code>ENABLING_FAILED</code>, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.</p> <p>If the encryption status of your delivery stream is <code>ENABLING_FAILED</code>, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.</p> <p>You can enable SSE for a delivery stream only if it's a delivery stream that uses <code>DirectPut</code> as its source. </p> <p>The <code>StartDeliveryStreamEncryption</code> and <code>StopDeliveryStreamEncryption</code> operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call <code>StartDeliveryStreamEncryption</code> 13 times and <code>StopDeliveryStreamEncryption</code> 12 times for the same delivery stream in a 24-hour period.</p>
2213 async fn start_delivery_stream_encryption(
2214 &self,
2215 input: StartDeliveryStreamEncryptionInput,
2216 ) -> Result<StartDeliveryStreamEncryptionOutput, RusotoError<StartDeliveryStreamEncryptionError>>;
2217
2218 /// <p>Disables server-side encryption (SSE) for the delivery stream. </p> <p>This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to <code>DISABLING</code>, and then to <code>DISABLED</code>. You can continue to read and write data to your stream while its status is <code>DISABLING</code>. It can take up to 5 seconds after the encryption status changes to <code>DISABLED</code> before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements <a>PutRecordOutput$Encrypted</a> and <a>PutRecordBatchOutput$Encrypted</a>, respectively.</p> <p>To check the encryption state of a delivery stream, use <a>DescribeDeliveryStream</a>. </p> <p>If SSE is enabled using a customer managed CMK and then you invoke <code>StopDeliveryStreamEncryption</code>, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.</p> <p>The <code>StartDeliveryStreamEncryption</code> and <code>StopDeliveryStreamEncryption</code> operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call <code>StartDeliveryStreamEncryption</code> 13 times and <code>StopDeliveryStreamEncryption</code> 12 times for the same delivery stream in a 24-hour period.</p>
2219 async fn stop_delivery_stream_encryption(
2220 &self,
2221 input: StopDeliveryStreamEncryptionInput,
2222 ) -> Result<StopDeliveryStreamEncryptionOutput, RusotoError<StopDeliveryStreamEncryptionError>>;
2223
2224 /// <p>Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see <a href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html">Using Cost Allocation Tags</a> in the <i>AWS Billing and Cost Management User Guide</i>. </p> <p>Each delivery stream can have up to 50 tags. </p> <p>This operation has a limit of five transactions per second per account. </p>
2225 async fn tag_delivery_stream(
2226 &self,
2227 input: TagDeliveryStreamInput,
2228 ) -> Result<TagDeliveryStreamOutput, RusotoError<TagDeliveryStreamError>>;
2229
2230 /// <p>Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.</p> <p>If you specify a tag that doesn't exist, the operation ignores it.</p> <p>This operation has a limit of five transactions per second per account. </p>
2231 async fn untag_delivery_stream(
2232 &self,
2233 input: UntagDeliveryStreamInput,
2234 ) -> Result<UntagDeliveryStreamOutput, RusotoError<UntagDeliveryStreamError>>;
2235
2236 /// <p>Updates the specified destination of the specified delivery stream.</p> <p>Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.</p> <p>Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.</p> <p>If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if <a>EncryptionConfiguration</a> is not specified, then the existing <code>EncryptionConfiguration</code> is maintained on the destination.</p> <p>If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.</p> <p>Kinesis Data Firehose uses <code>CurrentDeliveryStreamVersionId</code> to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using <a>DescribeDeliveryStream</a>. Use the new version ID to set <code>CurrentDeliveryStreamVersionId</code> in the next call.</p>
2237 async fn update_destination(
2238 &self,
2239 input: UpdateDestinationInput,
2240 ) -> Result<UpdateDestinationOutput, RusotoError<UpdateDestinationError>>;
2241}
2242/// A client for the Firehose API.
2243#[derive(Clone)]
2244pub struct KinesisFirehoseClient {
2245 client: Client,
2246 region: region::Region,
2247}
2248
2249impl KinesisFirehoseClient {
2250 /// Creates a client backed by the default tokio event loop.
2251 ///
2252 /// The client will use the default credentials provider and tls client.
2253 pub fn new(region: region::Region) -> KinesisFirehoseClient {
2254 KinesisFirehoseClient {
2255 client: Client::shared(),
2256 region,
2257 }
2258 }
2259
2260 pub fn new_with<P, D>(
2261 request_dispatcher: D,
2262 credentials_provider: P,
2263 region: region::Region,
2264 ) -> KinesisFirehoseClient
2265 where
2266 P: ProvideAwsCredentials + Send + Sync + 'static,
2267 D: DispatchSignedRequest + Send + Sync + 'static,
2268 {
2269 KinesisFirehoseClient {
2270 client: Client::new_with(credentials_provider, request_dispatcher),
2271 region,
2272 }
2273 }
2274
2275 pub fn new_with_client(client: Client, region: region::Region) -> KinesisFirehoseClient {
2276 KinesisFirehoseClient { client, region }
2277 }
2278}
2279
2280#[async_trait]
2281impl KinesisFirehose for KinesisFirehoseClient {
2282 /// <p>Creates a Kinesis Data Firehose delivery stream.</p> <p>By default, you can create up to 50 delivery streams per AWS Region.</p> <p>This is an asynchronous operation that immediately returns. The initial status of the delivery stream is <code>CREATING</code>. After the delivery stream is created, its status is <code>ACTIVE</code> and it now accepts data. If the delivery stream creation fails, the status transitions to <code>CREATING_FAILED</code>. Attempts to send data to a delivery stream that is not in the <code>ACTIVE</code> state cause an exception. To check the state of a delivery stream, use <a>DescribeDeliveryStream</a>.</p> <p>If the status of a delivery stream is <code>CREATING_FAILED</code>, this status doesn't change, and you can't invoke <code>CreateDeliveryStream</code> again on it. However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete it.</p> <p>A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using <a>PutRecord</a> or <a>PutRecordBatch</a>, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the <code>DeliveryStreamType</code> parameter to <code>KinesisStreamAsSource</code>, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the <code>KinesisStreamSourceConfiguration</code> parameter.</p> <p>To create a delivery stream with server-side encryption (SSE) enabled, include <a>DeliveryStreamEncryptionConfigurationInput</a> in your request. This is optional. You can also invoke <a>StartDeliveryStreamEncryption</a> to turn on SSE for an existing delivery stream that doesn't have SSE enabled.</p> <p>A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: <code>ExtendedS3DestinationConfiguration</code>, <code>S3DestinationConfiguration</code>, <code>ElasticsearchDestinationConfiguration</code>, <code>RedshiftDestinationConfiguration</code>, or <code>SplunkDestinationConfiguration</code>.</p> <p>When you specify <code>S3DestinationConfiguration</code>, you can also provide the following optional values: BufferingHints, <code>EncryptionConfiguration</code>, and <code>CompressionFormat</code>. By default, if no <code>BufferingHints</code> value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. <code>BufferingHints</code> is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.</p> <p>A few notes about Amazon Redshift as a destination:</p> <ul> <li> <p>An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses <code>COPY</code> syntax to load data into an Amazon Redshift table. This is specified in the <code>RedshiftDestinationConfiguration.S3Configuration</code> parameter.</p> </li> <li> <p>The compression formats <code>SNAPPY</code> or <code>ZIP</code> cannot be specified in <code>RedshiftDestinationConfiguration.S3Configuration</code> because the Amazon Redshift <code>COPY</code> operation that reads from the S3 bucket doesn't support these compression formats.</p> </li> <li> <p>We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift <code>INSERT</code> permissions.</p> </li> </ul> <p>Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3">Grant Kinesis Data Firehose Access to an Amazon S3 Destination</a> in the <i>Amazon Kinesis Data Firehose Developer Guide</i>.</p>
2283 async fn create_delivery_stream(
2284 &self,
2285 input: CreateDeliveryStreamInput,
2286 ) -> Result<CreateDeliveryStreamOutput, RusotoError<CreateDeliveryStreamError>> {
2287 let mut request = self.new_signed_request("POST", "/");
2288 request.add_header("x-amz-target", "Firehose_20150804.CreateDeliveryStream");
2289 let encoded = serde_json::to_string(&input).unwrap();
2290 request.set_payload(Some(encoded));
2291
2292 let response = self
2293 .sign_and_dispatch(request, CreateDeliveryStreamError::from_response)
2294 .await?;
2295 let mut response = response;
2296 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2297 proto::json::ResponsePayload::new(&response).deserialize::<CreateDeliveryStreamOutput, _>()
2298 }
2299
2300 /// <p>Deletes a delivery stream and its data.</p> <p>To check the state of a delivery stream, use <a>DescribeDeliveryStream</a>. You can delete a delivery stream only if it is in one of the following states: <code>ACTIVE</code>, <code>DELETING</code>, <code>CREATING_FAILED</code>, or <code>DELETING_FAILED</code>. You can't delete a delivery stream that is in the <code>CREATING</code> state. While the deletion request is in process, the delivery stream is in the <code>DELETING</code> state.</p> <p>While the delivery stream is in the <code>DELETING</code> state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.</p>
2301 async fn delete_delivery_stream(
2302 &self,
2303 input: DeleteDeliveryStreamInput,
2304 ) -> Result<DeleteDeliveryStreamOutput, RusotoError<DeleteDeliveryStreamError>> {
2305 let mut request = self.new_signed_request("POST", "/");
2306 request.add_header("x-amz-target", "Firehose_20150804.DeleteDeliveryStream");
2307 let encoded = serde_json::to_string(&input).unwrap();
2308 request.set_payload(Some(encoded));
2309
2310 let response = self
2311 .sign_and_dispatch(request, DeleteDeliveryStreamError::from_response)
2312 .await?;
2313 let mut response = response;
2314 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2315 proto::json::ResponsePayload::new(&response).deserialize::<DeleteDeliveryStreamOutput, _>()
2316 }
2317
2318 /// <p>Describes the specified delivery stream and its status. For example, after your delivery stream is created, call <code>DescribeDeliveryStream</code> to see whether the delivery stream is <code>ACTIVE</code> and therefore ready for data to be sent to it. </p> <p>If the status of a delivery stream is <code>CREATING_FAILED</code>, this status doesn't change, and you can't invoke <a>CreateDeliveryStream</a> again on it. However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete it. If the status is <code>DELETING_FAILED</code>, you can force deletion by invoking <a>DeleteDeliveryStream</a> again but with <a>DeleteDeliveryStreamInput$AllowForceDelete</a> set to true.</p>
2319 async fn describe_delivery_stream(
2320 &self,
2321 input: DescribeDeliveryStreamInput,
2322 ) -> Result<DescribeDeliveryStreamOutput, RusotoError<DescribeDeliveryStreamError>> {
2323 let mut request = self.new_signed_request("POST", "/");
2324 request.add_header("x-amz-target", "Firehose_20150804.DescribeDeliveryStream");
2325 let encoded = serde_json::to_string(&input).unwrap();
2326 request.set_payload(Some(encoded));
2327
2328 let response = self
2329 .sign_and_dispatch(request, DescribeDeliveryStreamError::from_response)
2330 .await?;
2331 let mut response = response;
2332 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2333 proto::json::ResponsePayload::new(&response)
2334 .deserialize::<DescribeDeliveryStreamOutput, _>()
2335 }
2336
2337 /// <p>Lists your delivery streams in alphabetical order of their names.</p> <p>The number of delivery streams might be too large to return using a single call to <code>ListDeliveryStreams</code>. You can limit the number of delivery streams returned, using the <code>Limit</code> parameter. To determine whether there are more delivery streams to list, check the value of <code>HasMoreDeliveryStreams</code> in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the <code>ExclusiveStartDeliveryStreamName</code> parameter to the name of the last delivery stream returned in the last call.</p>
2338 async fn list_delivery_streams(
2339 &self,
2340 input: ListDeliveryStreamsInput,
2341 ) -> Result<ListDeliveryStreamsOutput, RusotoError<ListDeliveryStreamsError>> {
2342 let mut request = self.new_signed_request("POST", "/");
2343 request.add_header("x-amz-target", "Firehose_20150804.ListDeliveryStreams");
2344 let encoded = serde_json::to_string(&input).unwrap();
2345 request.set_payload(Some(encoded));
2346
2347 let response = self
2348 .sign_and_dispatch(request, ListDeliveryStreamsError::from_response)
2349 .await?;
2350 let mut response = response;
2351 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2352 proto::json::ResponsePayload::new(&response).deserialize::<ListDeliveryStreamsOutput, _>()
2353 }
2354
2355 /// <p>Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account. </p>
2356 async fn list_tags_for_delivery_stream(
2357 &self,
2358 input: ListTagsForDeliveryStreamInput,
2359 ) -> Result<ListTagsForDeliveryStreamOutput, RusotoError<ListTagsForDeliveryStreamError>> {
2360 let mut request = self.new_signed_request("POST", "/");
2361 request.add_header(
2362 "x-amz-target",
2363 "Firehose_20150804.ListTagsForDeliveryStream",
2364 );
2365 let encoded = serde_json::to_string(&input).unwrap();
2366 request.set_payload(Some(encoded));
2367
2368 let response = self
2369 .sign_and_dispatch(request, ListTagsForDeliveryStreamError::from_response)
2370 .await?;
2371 let mut response = response;
2372 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2373 proto::json::ResponsePayload::new(&response)
2374 .deserialize::<ListTagsForDeliveryStreamOutput, _>()
2375 }
2376
2377 /// <p><p>Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use <a>PutRecordBatch</a>. Applications using these operations are referred to as producers.</p> <p>By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use <a>PutRecord</a> and <a>PutRecordBatch</a>, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose Limits</a>. </p> <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.</p> <p>Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (<code>\n</code>) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.</p> <p>The <code>PutRecord</code> operation returns a <code>RecordId</code>, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.</p> <p>If the <code>PutRecord</code> operation throws a <code>ServiceUnavailableException</code>, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. </p> <p>Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.</p> <important> <p>Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.</p> </important></p>
2378 async fn put_record(
2379 &self,
2380 input: PutRecordInput,
2381 ) -> Result<PutRecordOutput, RusotoError<PutRecordError>> {
2382 let mut request = self.new_signed_request("POST", "/");
2383 request.add_header("x-amz-target", "Firehose_20150804.PutRecord");
2384 let encoded = serde_json::to_string(&input).unwrap();
2385 request.set_payload(Some(encoded));
2386
2387 let response = self
2388 .sign_and_dispatch(request, PutRecordError::from_response)
2389 .await?;
2390 let mut response = response;
2391 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2392 proto::json::ResponsePayload::new(&response).deserialize::<PutRecordOutput, _>()
2393 }
2394
2395 /// <p><p>Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use <a>PutRecord</a>. Applications using these operations are referred to as producers.</p> <p>By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use <a>PutRecord</a> and <a>PutRecordBatch</a>, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Kinesis Data Firehose Limits</a>.</p> <p>Each <a>PutRecordBatch</a> request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.</p> <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.</p> <p>Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (<code>\n</code>) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.</p> <p>The <a>PutRecordBatch</a> response includes a count of failed records, <code>FailedPutCount</code>, and an array of responses, <code>RequestResponses</code>. Even if the <a>PutRecordBatch</a> call succeeds, the value of <code>FailedPutCount</code> may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the <code>RequestResponses</code> array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. <code>RequestResponses</code> includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each <a>PutRecordBatch</a> request. A single record failure does not stop the processing of subsequent records. </p> <p>A successfully processed record includes a <code>RecordId</code> value, which is unique for the record. An unsuccessfully processed record includes <code>ErrorCode</code> and <code>ErrorMessage</code> values. <code>ErrorCode</code> reflects the type of error, and is one of the following values: <code>ServiceUnavailableException</code> or <code>InternalFailure</code>. <code>ErrorMessage</code> provides more detailed information about the error.</p> <p>If there is an internal server error or a timeout, the write might have completed or it might have failed. If <code>FailedPutCount</code> is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.</p> <p>If <a>PutRecordBatch</a> throws <code>ServiceUnavailableException</code>, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.</p> <p>Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.</p> <important> <p>Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.</p> </important></p>
2396 async fn put_record_batch(
2397 &self,
2398 input: PutRecordBatchInput,
2399 ) -> Result<PutRecordBatchOutput, RusotoError<PutRecordBatchError>> {
2400 let mut request = self.new_signed_request("POST", "/");
2401 request.add_header("x-amz-target", "Firehose_20150804.PutRecordBatch");
2402 let encoded = serde_json::to_string(&input).unwrap();
2403 request.set_payload(Some(encoded));
2404
2405 let response = self
2406 .sign_and_dispatch(request, PutRecordBatchError::from_response)
2407 .await?;
2408 let mut response = response;
2409 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2410 proto::json::ResponsePayload::new(&response).deserialize::<PutRecordBatchOutput, _>()
2411 }
2412
2413 /// <p>Enables server-side encryption (SSE) for the delivery stream. </p> <p>This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to <code>ENABLING</code>, and then to <code>ENABLED</code>. The encryption status of a delivery stream is the <code>Status</code> property in <a>DeliveryStreamEncryptionConfiguration</a>. If the operation fails, the encryption status changes to <code>ENABLING_FAILED</code>. You can continue to read and write data to your delivery stream while the encryption status is <code>ENABLING</code>, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to <code>ENABLED</code> before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements <a>PutRecordOutput$Encrypted</a> and <a>PutRecordBatchOutput$Encrypted</a>, respectively.</p> <p>To check the encryption status of a delivery stream, use <a>DescribeDeliveryStream</a>.</p> <p>Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type <code>CUSTOMER_MANAGED_CMK</code>, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type <code>CUSTOMER_MANAGED_CMK</code>, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.</p> <p>If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get <code>ENABLING_FAILED</code>, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.</p> <p>If the encryption status of your delivery stream is <code>ENABLING_FAILED</code>, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.</p> <p>You can enable SSE for a delivery stream only if it's a delivery stream that uses <code>DirectPut</code> as its source. </p> <p>The <code>StartDeliveryStreamEncryption</code> and <code>StopDeliveryStreamEncryption</code> operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call <code>StartDeliveryStreamEncryption</code> 13 times and <code>StopDeliveryStreamEncryption</code> 12 times for the same delivery stream in a 24-hour period.</p>
2414 async fn start_delivery_stream_encryption(
2415 &self,
2416 input: StartDeliveryStreamEncryptionInput,
2417 ) -> Result<StartDeliveryStreamEncryptionOutput, RusotoError<StartDeliveryStreamEncryptionError>>
2418 {
2419 let mut request = self.new_signed_request("POST", "/");
2420 request.add_header(
2421 "x-amz-target",
2422 "Firehose_20150804.StartDeliveryStreamEncryption",
2423 );
2424 let encoded = serde_json::to_string(&input).unwrap();
2425 request.set_payload(Some(encoded));
2426
2427 let response = self
2428 .sign_and_dispatch(request, StartDeliveryStreamEncryptionError::from_response)
2429 .await?;
2430 let mut response = response;
2431 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2432 proto::json::ResponsePayload::new(&response)
2433 .deserialize::<StartDeliveryStreamEncryptionOutput, _>()
2434 }
2435
2436 /// <p>Disables server-side encryption (SSE) for the delivery stream. </p> <p>This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to <code>DISABLING</code>, and then to <code>DISABLED</code>. You can continue to read and write data to your stream while its status is <code>DISABLING</code>. It can take up to 5 seconds after the encryption status changes to <code>DISABLED</code> before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements <a>PutRecordOutput$Encrypted</a> and <a>PutRecordBatchOutput$Encrypted</a>, respectively.</p> <p>To check the encryption state of a delivery stream, use <a>DescribeDeliveryStream</a>. </p> <p>If SSE is enabled using a customer managed CMK and then you invoke <code>StopDeliveryStreamEncryption</code>, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.</p> <p>The <code>StartDeliveryStreamEncryption</code> and <code>StopDeliveryStreamEncryption</code> operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call <code>StartDeliveryStreamEncryption</code> 13 times and <code>StopDeliveryStreamEncryption</code> 12 times for the same delivery stream in a 24-hour period.</p>
2437 async fn stop_delivery_stream_encryption(
2438 &self,
2439 input: StopDeliveryStreamEncryptionInput,
2440 ) -> Result<StopDeliveryStreamEncryptionOutput, RusotoError<StopDeliveryStreamEncryptionError>>
2441 {
2442 let mut request = self.new_signed_request("POST", "/");
2443 request.add_header(
2444 "x-amz-target",
2445 "Firehose_20150804.StopDeliveryStreamEncryption",
2446 );
2447 let encoded = serde_json::to_string(&input).unwrap();
2448 request.set_payload(Some(encoded));
2449
2450 let response = self
2451 .sign_and_dispatch(request, StopDeliveryStreamEncryptionError::from_response)
2452 .await?;
2453 let mut response = response;
2454 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2455 proto::json::ResponsePayload::new(&response)
2456 .deserialize::<StopDeliveryStreamEncryptionOutput, _>()
2457 }
2458
2459 /// <p>Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see <a href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html">Using Cost Allocation Tags</a> in the <i>AWS Billing and Cost Management User Guide</i>. </p> <p>Each delivery stream can have up to 50 tags. </p> <p>This operation has a limit of five transactions per second per account. </p>
2460 async fn tag_delivery_stream(
2461 &self,
2462 input: TagDeliveryStreamInput,
2463 ) -> Result<TagDeliveryStreamOutput, RusotoError<TagDeliveryStreamError>> {
2464 let mut request = self.new_signed_request("POST", "/");
2465 request.add_header("x-amz-target", "Firehose_20150804.TagDeliveryStream");
2466 let encoded = serde_json::to_string(&input).unwrap();
2467 request.set_payload(Some(encoded));
2468
2469 let response = self
2470 .sign_and_dispatch(request, TagDeliveryStreamError::from_response)
2471 .await?;
2472 let mut response = response;
2473 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2474 proto::json::ResponsePayload::new(&response).deserialize::<TagDeliveryStreamOutput, _>()
2475 }
2476
2477 /// <p>Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.</p> <p>If you specify a tag that doesn't exist, the operation ignores it.</p> <p>This operation has a limit of five transactions per second per account. </p>
2478 async fn untag_delivery_stream(
2479 &self,
2480 input: UntagDeliveryStreamInput,
2481 ) -> Result<UntagDeliveryStreamOutput, RusotoError<UntagDeliveryStreamError>> {
2482 let mut request = self.new_signed_request("POST", "/");
2483 request.add_header("x-amz-target", "Firehose_20150804.UntagDeliveryStream");
2484 let encoded = serde_json::to_string(&input).unwrap();
2485 request.set_payload(Some(encoded));
2486
2487 let response = self
2488 .sign_and_dispatch(request, UntagDeliveryStreamError::from_response)
2489 .await?;
2490 let mut response = response;
2491 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2492 proto::json::ResponsePayload::new(&response).deserialize::<UntagDeliveryStreamOutput, _>()
2493 }
2494
2495 /// <p>Updates the specified destination of the specified delivery stream.</p> <p>Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.</p> <p>Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.</p> <p>If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if <a>EncryptionConfiguration</a> is not specified, then the existing <code>EncryptionConfiguration</code> is maintained on the destination.</p> <p>If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.</p> <p>Kinesis Data Firehose uses <code>CurrentDeliveryStreamVersionId</code> to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using <a>DescribeDeliveryStream</a>. Use the new version ID to set <code>CurrentDeliveryStreamVersionId</code> in the next call.</p>
2496 async fn update_destination(
2497 &self,
2498 input: UpdateDestinationInput,
2499 ) -> Result<UpdateDestinationOutput, RusotoError<UpdateDestinationError>> {
2500 let mut request = self.new_signed_request("POST", "/");
2501 request.add_header("x-amz-target", "Firehose_20150804.UpdateDestination");
2502 let encoded = serde_json::to_string(&input).unwrap();
2503 request.set_payload(Some(encoded));
2504
2505 let response = self
2506 .sign_and_dispatch(request, UpdateDestinationError::from_response)
2507 .await?;
2508 let mut response = response;
2509 let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
2510 proto::json::ResponsePayload::new(&response).deserialize::<UpdateDestinationOutput, _>()
2511 }
2512}