aws_sdk_machinelearning/operation/create_data_source_from_redshift/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_data_source_from_redshift::_create_data_source_from_redshift_output::CreateDataSourceFromRedshiftOutputBuilder;
3
4pub use crate::operation::create_data_source_from_redshift::_create_data_source_from_redshift_input::CreateDataSourceFromRedshiftInputBuilder;
5
6impl crate::operation::create_data_source_from_redshift::builders::CreateDataSourceFromRedshiftInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.create_data_source_from_redshift();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `CreateDataSourceFromRedshift`.
24///
25/// <p>Creates a <code>DataSource</code> from a database hosted on an Amazon Redshift cluster. A <code>DataSource</code> references data that can be used to perform either <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p>
26/// <p><code>CreateDataSourceFromRedshift</code> is an asynchronous operation. In response to <code>CreateDataSourceFromRedshift</code>, Amazon Machine Learning (Amazon ML) immediately returns and sets the <code>DataSource</code> status to <code>PENDING</code>. After the <code>DataSource</code> is created and ready for use, Amazon ML sets the <code>Status</code> parameter to <code>COMPLETED</code>. <code>DataSource</code> in <code>COMPLETED</code> or <code>PENDING</code> states can be used to perform only <code>CreateMLModel</code>, <code>CreateEvaluation</code>, or <code>CreateBatchPrediction</code> operations.</p>
27/// <p>If Amazon ML can't accept the input source, it sets the <code>Status</code> parameter to <code>FAILED</code> and includes an error message in the <code>Message</code> attribute of the <code>GetDataSource</code> operation response.</p>
28/// <p>The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a <code>SelectSqlQuery</code> query. Amazon ML executes an <code>Unload</code> command in Amazon Redshift to transfer the result set of the <code>SelectSqlQuery</code> query to <code>S3StagingLocation</code>.</p>
29/// <p>After the <code>DataSource</code> has been created, it's ready for use in evaluations and batch predictions. If you plan to use the <code>DataSource</code> to train an <code>MLModel</code>, the <code>DataSource</code> also requires a recipe. A recipe describes how each input variable will be used in training an <code>MLModel</code>. Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.</p>
30/// <p>You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call <code>GetDataSource</code> for an existing datasource and copy the values to a <code>CreateDataSource</code> call. Change the settings that you want to change and make sure that all required fields have the appropriate values.</p>
31#[derive(::std::clone::Clone, ::std::fmt::Debug)]
32pub struct CreateDataSourceFromRedshiftFluentBuilder {
33    handle: ::std::sync::Arc<crate::client::Handle>,
34    inner: crate::operation::create_data_source_from_redshift::builders::CreateDataSourceFromRedshiftInputBuilder,
35    config_override: ::std::option::Option<crate::config::Builder>,
36}
37impl
38    crate::client::customize::internal::CustomizableSend<
39        crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftOutput,
40        crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftError,
41    > for CreateDataSourceFromRedshiftFluentBuilder
42{
43    fn send(
44        self,
45        config_override: crate::config::Builder,
46    ) -> crate::client::customize::internal::BoxFuture<
47        crate::client::customize::internal::SendResult<
48            crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftOutput,
49            crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftError,
50        >,
51    > {
52        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
53    }
54}
55impl CreateDataSourceFromRedshiftFluentBuilder {
56    /// Creates a new `CreateDataSourceFromRedshiftFluentBuilder`.
57    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
58        Self {
59            handle,
60            inner: ::std::default::Default::default(),
61            config_override: ::std::option::Option::None,
62        }
63    }
64    /// Access the CreateDataSourceFromRedshift as a reference.
65    pub fn as_input(&self) -> &crate::operation::create_data_source_from_redshift::builders::CreateDataSourceFromRedshiftInputBuilder {
66        &self.inner
67    }
68    /// Sends the request and returns the response.
69    ///
70    /// If an error occurs, an `SdkError` will be returned with additional details that
71    /// can be matched against.
72    ///
73    /// By default, any retryable failures will be retried twice. Retry behavior
74    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
75    /// set when configuring the client.
76    pub async fn send(
77        self,
78    ) -> ::std::result::Result<
79        crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftOutput,
80        ::aws_smithy_runtime_api::client::result::SdkError<
81            crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftError,
82            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
83        >,
84    > {
85        let input = self
86            .inner
87            .build()
88            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
89        let runtime_plugins = crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshift::operation_runtime_plugins(
90            self.handle.runtime_plugins.clone(),
91            &self.handle.conf,
92            self.config_override,
93        );
94        crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshift::orchestrate(&runtime_plugins, input).await
95    }
96
97    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
98    pub fn customize(
99        self,
100    ) -> crate::client::customize::CustomizableOperation<
101        crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftOutput,
102        crate::operation::create_data_source_from_redshift::CreateDataSourceFromRedshiftError,
103        Self,
104    > {
105        crate::client::customize::CustomizableOperation::new(self)
106    }
107    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
108        self.set_config_override(::std::option::Option::Some(config_override.into()));
109        self
110    }
111
112    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
113        self.config_override = config_override;
114        self
115    }
116    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>.</p>
117    pub fn data_source_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
118        self.inner = self.inner.data_source_id(input.into());
119        self
120    }
121    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>.</p>
122    pub fn set_data_source_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
123        self.inner = self.inner.set_data_source_id(input);
124        self
125    }
126    /// <p>A user-supplied ID that uniquely identifies the <code>DataSource</code>.</p>
127    pub fn get_data_source_id(&self) -> &::std::option::Option<::std::string::String> {
128        self.inner.get_data_source_id()
129    }
130    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
131    pub fn data_source_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
132        self.inner = self.inner.data_source_name(input.into());
133        self
134    }
135    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
136    pub fn set_data_source_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
137        self.inner = self.inner.set_data_source_name(input);
138        self
139    }
140    /// <p>A user-supplied name or description of the <code>DataSource</code>.</p>
141    pub fn get_data_source_name(&self) -> &::std::option::Option<::std::string::String> {
142        self.inner.get_data_source_name()
143    }
144    /// <p>The data specification of an Amazon Redshift <code>DataSource</code>:</p>
145    /// <ul>
146    /// <li>
147    /// <p>DatabaseInformation -</p>
148    /// <ul>
149    /// <li>
150    /// <p><code>DatabaseName</code> - The name of the Amazon Redshift database.</p></li>
151    /// <li>
152    /// <p><code> ClusterIdentifier</code> - The unique ID for the Amazon Redshift cluster.</p></li>
153    /// </ul></li>
154    /// <li>
155    /// <p>DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.</p></li>
156    /// <li>
157    /// <p>SelectSqlQuery - The query that is used to retrieve the observation data for the <code>Datasource</code>.</p></li>
158    /// <li>
159    /// <p>S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Redshift using the <code>SelectSqlQuery</code> query is stored in this location.</p></li>
160    /// <li>
161    /// <p>DataSchemaUri - The Amazon S3 location of the <code>DataSchema</code>.</p></li>
162    /// <li>
163    /// <p>DataSchema - A JSON string representing the schema. This is not required if <code>DataSchemaUri</code> is specified.</p></li>
164    /// <li>
165    /// <p>DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the <code>DataSource</code>.</p>
166    /// <p>Sample - <code> "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"</code></p></li>
167    /// </ul>
168    pub fn data_spec(mut self, input: crate::types::RedshiftDataSpec) -> Self {
169        self.inner = self.inner.data_spec(input);
170        self
171    }
172    /// <p>The data specification of an Amazon Redshift <code>DataSource</code>:</p>
173    /// <ul>
174    /// <li>
175    /// <p>DatabaseInformation -</p>
176    /// <ul>
177    /// <li>
178    /// <p><code>DatabaseName</code> - The name of the Amazon Redshift database.</p></li>
179    /// <li>
180    /// <p><code> ClusterIdentifier</code> - The unique ID for the Amazon Redshift cluster.</p></li>
181    /// </ul></li>
182    /// <li>
183    /// <p>DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.</p></li>
184    /// <li>
185    /// <p>SelectSqlQuery - The query that is used to retrieve the observation data for the <code>Datasource</code>.</p></li>
186    /// <li>
187    /// <p>S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Redshift using the <code>SelectSqlQuery</code> query is stored in this location.</p></li>
188    /// <li>
189    /// <p>DataSchemaUri - The Amazon S3 location of the <code>DataSchema</code>.</p></li>
190    /// <li>
191    /// <p>DataSchema - A JSON string representing the schema. This is not required if <code>DataSchemaUri</code> is specified.</p></li>
192    /// <li>
193    /// <p>DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the <code>DataSource</code>.</p>
194    /// <p>Sample - <code> "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"</code></p></li>
195    /// </ul>
196    pub fn set_data_spec(mut self, input: ::std::option::Option<crate::types::RedshiftDataSpec>) -> Self {
197        self.inner = self.inner.set_data_spec(input);
198        self
199    }
200    /// <p>The data specification of an Amazon Redshift <code>DataSource</code>:</p>
201    /// <ul>
202    /// <li>
203    /// <p>DatabaseInformation -</p>
204    /// <ul>
205    /// <li>
206    /// <p><code>DatabaseName</code> - The name of the Amazon Redshift database.</p></li>
207    /// <li>
208    /// <p><code> ClusterIdentifier</code> - The unique ID for the Amazon Redshift cluster.</p></li>
209    /// </ul></li>
210    /// <li>
211    /// <p>DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.</p></li>
212    /// <li>
213    /// <p>SelectSqlQuery - The query that is used to retrieve the observation data for the <code>Datasource</code>.</p></li>
214    /// <li>
215    /// <p>S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Redshift using the <code>SelectSqlQuery</code> query is stored in this location.</p></li>
216    /// <li>
217    /// <p>DataSchemaUri - The Amazon S3 location of the <code>DataSchema</code>.</p></li>
218    /// <li>
219    /// <p>DataSchema - A JSON string representing the schema. This is not required if <code>DataSchemaUri</code> is specified.</p></li>
220    /// <li>
221    /// <p>DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the <code>DataSource</code>.</p>
222    /// <p>Sample - <code> "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"</code></p></li>
223    /// </ul>
224    pub fn get_data_spec(&self) -> &::std::option::Option<crate::types::RedshiftDataSpec> {
225        self.inner.get_data_spec()
226    }
227    /// <p>A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following:</p>
228    /// <ul>
229    /// <li>
230    /// <p>A security group to allow Amazon ML to execute the <code>SelectSqlQuery</code> query on an Amazon Redshift cluster</p></li>
231    /// <li>
232    /// <p>An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the <code>S3StagingLocation</code></p></li>
233    /// </ul>
234    pub fn role_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
235        self.inner = self.inner.role_arn(input.into());
236        self
237    }
238    /// <p>A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following:</p>
239    /// <ul>
240    /// <li>
241    /// <p>A security group to allow Amazon ML to execute the <code>SelectSqlQuery</code> query on an Amazon Redshift cluster</p></li>
242    /// <li>
243    /// <p>An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the <code>S3StagingLocation</code></p></li>
244    /// </ul>
245    pub fn set_role_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
246        self.inner = self.inner.set_role_arn(input);
247        self
248    }
249    /// <p>A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following:</p>
250    /// <ul>
251    /// <li>
252    /// <p>A security group to allow Amazon ML to execute the <code>SelectSqlQuery</code> query on an Amazon Redshift cluster</p></li>
253    /// <li>
254    /// <p>An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the <code>S3StagingLocation</code></p></li>
255    /// </ul>
256    pub fn get_role_arn(&self) -> &::std::option::Option<::std::string::String> {
257        self.inner.get_role_arn()
258    }
259    /// <p>The compute statistics for a <code>DataSource</code>. The statistics are generated from the observation data referenced by a <code>DataSource</code>. Amazon ML uses the statistics internally during <code>MLModel</code> training. This parameter must be set to <code>true</code> if the <code>DataSource</code> needs to be used for <code>MLModel</code> training.</p>
260    pub fn compute_statistics(mut self, input: bool) -> Self {
261        self.inner = self.inner.compute_statistics(input);
262        self
263    }
264    /// <p>The compute statistics for a <code>DataSource</code>. The statistics are generated from the observation data referenced by a <code>DataSource</code>. Amazon ML uses the statistics internally during <code>MLModel</code> training. This parameter must be set to <code>true</code> if the <code>DataSource</code> needs to be used for <code>MLModel</code> training.</p>
265    pub fn set_compute_statistics(mut self, input: ::std::option::Option<bool>) -> Self {
266        self.inner = self.inner.set_compute_statistics(input);
267        self
268    }
269    /// <p>The compute statistics for a <code>DataSource</code>. The statistics are generated from the observation data referenced by a <code>DataSource</code>. Amazon ML uses the statistics internally during <code>MLModel</code> training. This parameter must be set to <code>true</code> if the <code>DataSource</code> needs to be used for <code>MLModel</code> training.</p>
270    pub fn get_compute_statistics(&self) -> &::std::option::Option<bool> {
271        self.inner.get_compute_statistics()
272    }
273}