aws_sdk_kinesis/operation/create_stream/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_stream::_create_stream_output::CreateStreamOutputBuilder;
3
4pub use crate::operation::create_stream::_create_stream_input::CreateStreamInputBuilder;
5
6impl crate::operation::create_stream::builders::CreateStreamInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::create_stream::CreateStreamOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::create_stream::CreateStreamError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.create_stream();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `CreateStream`.
24///
25/// <p>Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or <i>producers</i>. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream.</p>
26/// <p>You can create your data stream using either on-demand or provisioned capacity mode. Data streams with an on-demand mode require no capacity planning and automatically scale to handle gigabytes of write and read throughput per minute. With the on-demand mode, Kinesis Data Streams automatically manages the shards in order to provide the necessary throughput.</p>
27/// <p>If you'd still like to proactively scale your on-demand data stream’s capacity, you can unlock the warm throughput feature for on-demand data streams by enabling <code>MinimumThroughputBillingCommitment</code> for your account. Once your account has <code>MinimumThroughputBillingCommitment</code> enabled, you can specify the warm throughput in MiB per second that your stream can support in writes.</p>
28/// <p>For the data streams with a provisioned mode, you must specify the number of shards for the data stream. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second. If the amount of data input increases or decreases, you can add or remove shards.</p>
29/// <p>The stream name identifies the stream. The name is scoped to the Amazon Web Services account used by the application. It is also scoped by Amazon Web Services Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name.</p>
30/// <p><code>CreateStream</code> is an asynchronous operation. Upon receiving a <code>CreateStream</code> request, Kinesis Data Streams immediately returns and sets the stream status to <code>CREATING</code>. After the stream is created, Kinesis Data Streams sets the stream status to <code>ACTIVE</code>. You should perform read and write operations only on an <code>ACTIVE</code> stream.</p>
31/// <p>You receive a <code>LimitExceededException</code> when making a <code>CreateStream</code> request when you try to do one of the following:</p>
32/// <ul>
33/// <li>
34/// <p>Have more than five streams in the <code>CREATING</code> state at any point in time.</p></li>
35/// <li>
36/// <p>Create more shards than are authorized for your account.</p></li>
37/// </ul>
38/// <p>For the default shard or on-demand throughput limits for an Amazon Web Services account, see <a href="https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html">Amazon Kinesis Data Streams Limits</a> in the <i>Amazon Kinesis Data Streams Developer Guide</i>. To increase this limit, <a href="https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html">contact Amazon Web Services Support</a>.</p>
39/// <p>You can use <code>DescribeStreamSummary</code> to check the stream status, which is returned in <code>StreamStatus</code>.</p>
40/// <p><code>CreateStream</code> has a limit of five transactions per second per account.</p>
41/// <p>You can add tags to the stream when making a <code>CreateStream</code> request by setting the <code>Tags</code> parameter. If you pass the <code>Tags</code> parameter, in addition to having the <code>kinesis:CreateStream</code> permission, you must also have the <code>kinesis:AddTagsToStream</code> permission for the stream that will be created. The <code>kinesis:TagResource</code> permission won’t work to tag streams on creation. Tags will take effect from the <code>CREATING</code> status of the stream, but you can't make any updates to the tags until the stream is in <code>ACTIVE</code> state.</p>
42#[derive(::std::clone::Clone, ::std::fmt::Debug)]
43pub struct CreateStreamFluentBuilder {
44    handle: ::std::sync::Arc<crate::client::Handle>,
45    inner: crate::operation::create_stream::builders::CreateStreamInputBuilder,
46    config_override: ::std::option::Option<crate::config::Builder>,
47}
48impl
49    crate::client::customize::internal::CustomizableSend<
50        crate::operation::create_stream::CreateStreamOutput,
51        crate::operation::create_stream::CreateStreamError,
52    > for CreateStreamFluentBuilder
53{
54    fn send(
55        self,
56        config_override: crate::config::Builder,
57    ) -> crate::client::customize::internal::BoxFuture<
58        crate::client::customize::internal::SendResult<
59            crate::operation::create_stream::CreateStreamOutput,
60            crate::operation::create_stream::CreateStreamError,
61        >,
62    > {
63        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
64    }
65}
66impl CreateStreamFluentBuilder {
67    /// Creates a new `CreateStreamFluentBuilder`.
68    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
69        Self {
70            handle,
71            inner: ::std::default::Default::default(),
72            config_override: ::std::option::Option::None,
73        }
74    }
75    /// Access the CreateStream as a reference.
76    pub fn as_input(&self) -> &crate::operation::create_stream::builders::CreateStreamInputBuilder {
77        &self.inner
78    }
79    /// Sends the request and returns the response.
80    ///
81    /// If an error occurs, an `SdkError` will be returned with additional details that
82    /// can be matched against.
83    ///
84    /// By default, any retryable failures will be retried twice. Retry behavior
85    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
86    /// set when configuring the client.
87    pub async fn send(
88        self,
89    ) -> ::std::result::Result<
90        crate::operation::create_stream::CreateStreamOutput,
91        ::aws_smithy_runtime_api::client::result::SdkError<
92            crate::operation::create_stream::CreateStreamError,
93            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
94        >,
95    > {
96        let input = self
97            .inner
98            .build()
99            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
100        let runtime_plugins = crate::operation::create_stream::CreateStream::operation_runtime_plugins(
101            self.handle.runtime_plugins.clone(),
102            &self.handle.conf,
103            self.config_override,
104        );
105        crate::operation::create_stream::CreateStream::orchestrate(&runtime_plugins, input).await
106    }
107
108    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
109    pub fn customize(
110        self,
111    ) -> crate::client::customize::CustomizableOperation<
112        crate::operation::create_stream::CreateStreamOutput,
113        crate::operation::create_stream::CreateStreamError,
114        Self,
115    > {
116        crate::client::customize::CustomizableOperation::new(self)
117    }
118    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
119        self.set_config_override(::std::option::Option::Some(config_override.into()));
120        self
121    }
122
123    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
124        self.config_override = config_override;
125        self
126    }
127    /// <p>A name to identify the stream. The stream name is scoped to the Amazon Web Services account used by the application that creates the stream. It is also scoped by Amazon Web Services Region. That is, two streams in two different Amazon Web Services accounts can have the same name. Two streams in the same Amazon Web Services account but in two different Regions can also have the same name.</p>
128    pub fn stream_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
129        self.inner = self.inner.stream_name(input.into());
130        self
131    }
132    /// <p>A name to identify the stream. The stream name is scoped to the Amazon Web Services account used by the application that creates the stream. It is also scoped by Amazon Web Services Region. That is, two streams in two different Amazon Web Services accounts can have the same name. Two streams in the same Amazon Web Services account but in two different Regions can also have the same name.</p>
133    pub fn set_stream_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
134        self.inner = self.inner.set_stream_name(input);
135        self
136    }
137    /// <p>A name to identify the stream. The stream name is scoped to the Amazon Web Services account used by the application that creates the stream. It is also scoped by Amazon Web Services Region. That is, two streams in two different Amazon Web Services accounts can have the same name. Two streams in the same Amazon Web Services account but in two different Regions can also have the same name.</p>
138    pub fn get_stream_name(&self) -> &::std::option::Option<::std::string::String> {
139        self.inner.get_stream_name()
140    }
141    /// <p>The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput.</p>
142    pub fn shard_count(mut self, input: i32) -> Self {
143        self.inner = self.inner.shard_count(input);
144        self
145    }
146    /// <p>The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput.</p>
147    pub fn set_shard_count(mut self, input: ::std::option::Option<i32>) -> Self {
148        self.inner = self.inner.set_shard_count(input);
149        self
150    }
151    /// <p>The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput.</p>
152    pub fn get_shard_count(&self) -> &::std::option::Option<i32> {
153        self.inner.get_shard_count()
154    }
155    /// <p>Indicates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you can choose between an <b>on-demand</b> capacity mode and a <b>provisioned</b> capacity mode for your data streams.</p>
156    pub fn stream_mode_details(mut self, input: crate::types::StreamModeDetails) -> Self {
157        self.inner = self.inner.stream_mode_details(input);
158        self
159    }
160    /// <p>Indicates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you can choose between an <b>on-demand</b> capacity mode and a <b>provisioned</b> capacity mode for your data streams.</p>
161    pub fn set_stream_mode_details(mut self, input: ::std::option::Option<crate::types::StreamModeDetails>) -> Self {
162        self.inner = self.inner.set_stream_mode_details(input);
163        self
164    }
165    /// <p>Indicates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you can choose between an <b>on-demand</b> capacity mode and a <b>provisioned</b> capacity mode for your data streams.</p>
166    pub fn get_stream_mode_details(&self) -> &::std::option::Option<crate::types::StreamModeDetails> {
167        self.inner.get_stream_mode_details()
168    }
169    ///
170    /// Adds a key-value pair to `Tags`.
171    ///
172    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
173    ///
174    /// <p>A set of up to 50 key-value pairs to use to create the tags. A tag consists of a required key and an optional value.</p>
175    pub fn tags(mut self, k: impl ::std::convert::Into<::std::string::String>, v: impl ::std::convert::Into<::std::string::String>) -> Self {
176        self.inner = self.inner.tags(k.into(), v.into());
177        self
178    }
179    /// <p>A set of up to 50 key-value pairs to use to create the tags. A tag consists of a required key and an optional value.</p>
180    pub fn set_tags(mut self, input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>) -> Self {
181        self.inner = self.inner.set_tags(input);
182        self
183    }
184    /// <p>A set of up to 50 key-value pairs to use to create the tags. A tag consists of a required key and an optional value.</p>
185    pub fn get_tags(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
186        self.inner.get_tags()
187    }
188    /// <p>The target warm throughput in MB/s that the stream should be scaled to handle. This represents the throughput capacity that will be immediately available for write operations.</p>
189    pub fn warm_throughput_mibps(mut self, input: i32) -> Self {
190        self.inner = self.inner.warm_throughput_mibps(input);
191        self
192    }
193    /// <p>The target warm throughput in MB/s that the stream should be scaled to handle. This represents the throughput capacity that will be immediately available for write operations.</p>
194    pub fn set_warm_throughput_mibps(mut self, input: ::std::option::Option<i32>) -> Self {
195        self.inner = self.inner.set_warm_throughput_mibps(input);
196        self
197    }
198    /// <p>The target warm throughput in MB/s that the stream should be scaled to handle. This represents the throughput capacity that will be immediately available for write operations.</p>
199    pub fn get_warm_throughput_mibps(&self) -> &::std::option::Option<i32> {
200        self.inner.get_warm_throughput_mibps()
201    }
202    /// <p>The maximum record size of a single record in kibibyte (KiB) that you can write to, and read from a stream.</p>
203    pub fn max_record_size_in_kib(mut self, input: i32) -> Self {
204        self.inner = self.inner.max_record_size_in_kib(input);
205        self
206    }
207    /// <p>The maximum record size of a single record in kibibyte (KiB) that you can write to, and read from a stream.</p>
208    pub fn set_max_record_size_in_kib(mut self, input: ::std::option::Option<i32>) -> Self {
209        self.inner = self.inner.set_max_record_size_in_kib(input);
210        self
211    }
212    /// <p>The maximum record size of a single record in kibibyte (KiB) that you can write to, and read from a stream.</p>
213    pub fn get_max_record_size_in_kib(&self) -> &::std::option::Option<i32> {
214        self.inner.get_max_record_size_in_kib()
215    }
216}