aws_sdk_neptunedata/operation/start_ml_data_processing_job/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_ml_data_processing_job::_start_ml_data_processing_job_output::StartMlDataProcessingJobOutputBuilder;
3
4pub use crate::operation::start_ml_data_processing_job::_start_ml_data_processing_job_input::StartMlDataProcessingJobInputBuilder;
5
6impl crate::operation::start_ml_data_processing_job::builders::StartMlDataProcessingJobInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::start_ml_data_processing_job::StartMlDataProcessingJobOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::start_ml_data_processing_job::StartMLDataProcessingJobError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.start_ml_data_processing_job();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `StartMLDataProcessingJob`.
24///
25/// <p>Creates a new Neptune ML data processing job for processing the graph data exported from Neptune for training. See <a href="https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-dataprocessing.html">The <code>dataprocessing</code> command</a>.</p>
26/// <p>When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the <a href="https://docs.aws.amazon.com/neptune/latest/userguide/iam-dp-actions.html#startmlmodeldataprocessingjob">neptune-db:StartMLModelDataProcessingJob</a> IAM action in that cluster.</p>
27#[derive(::std::clone::Clone, ::std::fmt::Debug)]
28pub struct StartMLDataProcessingJobFluentBuilder {
29    handle: ::std::sync::Arc<crate::client::Handle>,
30    inner: crate::operation::start_ml_data_processing_job::builders::StartMlDataProcessingJobInputBuilder,
31    config_override: ::std::option::Option<crate::config::Builder>,
32}
33impl
34    crate::client::customize::internal::CustomizableSend<
35        crate::operation::start_ml_data_processing_job::StartMlDataProcessingJobOutput,
36        crate::operation::start_ml_data_processing_job::StartMLDataProcessingJobError,
37    > for StartMLDataProcessingJobFluentBuilder
38{
39    fn send(
40        self,
41        config_override: crate::config::Builder,
42    ) -> crate::client::customize::internal::BoxFuture<
43        crate::client::customize::internal::SendResult<
44            crate::operation::start_ml_data_processing_job::StartMlDataProcessingJobOutput,
45            crate::operation::start_ml_data_processing_job::StartMLDataProcessingJobError,
46        >,
47    > {
48        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
49    }
50}
51impl StartMLDataProcessingJobFluentBuilder {
52    /// Creates a new `StartMLDataProcessingJobFluentBuilder`.
53    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
54        Self {
55            handle,
56            inner: ::std::default::Default::default(),
57            config_override: ::std::option::Option::None,
58        }
59    }
60    /// Access the StartMLDataProcessingJob as a reference.
61    pub fn as_input(&self) -> &crate::operation::start_ml_data_processing_job::builders::StartMlDataProcessingJobInputBuilder {
62        &self.inner
63    }
64    /// Sends the request and returns the response.
65    ///
66    /// If an error occurs, an `SdkError` will be returned with additional details that
67    /// can be matched against.
68    ///
69    /// By default, any retryable failures will be retried twice. Retry behavior
70    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
71    /// set when configuring the client.
72    pub async fn send(
73        self,
74    ) -> ::std::result::Result<
75        crate::operation::start_ml_data_processing_job::StartMlDataProcessingJobOutput,
76        ::aws_smithy_runtime_api::client::result::SdkError<
77            crate::operation::start_ml_data_processing_job::StartMLDataProcessingJobError,
78            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
79        >,
80    > {
81        let input = self
82            .inner
83            .build()
84            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
85        let runtime_plugins = crate::operation::start_ml_data_processing_job::StartMLDataProcessingJob::operation_runtime_plugins(
86            self.handle.runtime_plugins.clone(),
87            &self.handle.conf,
88            self.config_override,
89        );
90        crate::operation::start_ml_data_processing_job::StartMLDataProcessingJob::orchestrate(&runtime_plugins, input).await
91    }
92
93    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
94    pub fn customize(
95        self,
96    ) -> crate::client::customize::CustomizableOperation<
97        crate::operation::start_ml_data_processing_job::StartMlDataProcessingJobOutput,
98        crate::operation::start_ml_data_processing_job::StartMLDataProcessingJobError,
99        Self,
100    > {
101        crate::client::customize::CustomizableOperation::new(self)
102    }
103    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
104        self.set_config_override(::std::option::Option::Some(config_override.into()));
105        self
106    }
107
108    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
109        self.config_override = config_override;
110        self
111    }
112    /// <p>A unique identifier for the new job. The default is an autogenerated UUID.</p>
113    pub fn id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
114        self.inner = self.inner.id(input.into());
115        self
116    }
117    /// <p>A unique identifier for the new job. The default is an autogenerated UUID.</p>
118    pub fn set_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
119        self.inner = self.inner.set_id(input);
120        self
121    }
122    /// <p>A unique identifier for the new job. The default is an autogenerated UUID.</p>
123    pub fn get_id(&self) -> &::std::option::Option<::std::string::String> {
124        self.inner.get_id()
125    }
126    /// <p>The job ID of a completed data processing job run on an earlier version of the data.</p>
127    pub fn previous_data_processing_job_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
128        self.inner = self.inner.previous_data_processing_job_id(input.into());
129        self
130    }
131    /// <p>The job ID of a completed data processing job run on an earlier version of the data.</p>
132    pub fn set_previous_data_processing_job_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
133        self.inner = self.inner.set_previous_data_processing_job_id(input);
134        self
135    }
136    /// <p>The job ID of a completed data processing job run on an earlier version of the data.</p>
137    pub fn get_previous_data_processing_job_id(&self) -> &::std::option::Option<::std::string::String> {
138        self.inner.get_previous_data_processing_job_id()
139    }
140    /// <p>The URI of the Amazon S3 location where you want SageMaker to download the data needed to run the data processing job.</p>
141    pub fn input_data_s3_location(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
142        self.inner = self.inner.input_data_s3_location(input.into());
143        self
144    }
145    /// <p>The URI of the Amazon S3 location where you want SageMaker to download the data needed to run the data processing job.</p>
146    pub fn set_input_data_s3_location(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
147        self.inner = self.inner.set_input_data_s3_location(input);
148        self
149    }
150    /// <p>The URI of the Amazon S3 location where you want SageMaker to download the data needed to run the data processing job.</p>
151    pub fn get_input_data_s3_location(&self) -> &::std::option::Option<::std::string::String> {
152        self.inner.get_input_data_s3_location()
153    }
154    /// <p>The URI of the Amazon S3 location where you want SageMaker to save the results of a data processing job.</p>
155    pub fn processed_data_s3_location(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
156        self.inner = self.inner.processed_data_s3_location(input.into());
157        self
158    }
159    /// <p>The URI of the Amazon S3 location where you want SageMaker to save the results of a data processing job.</p>
160    pub fn set_processed_data_s3_location(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
161        self.inner = self.inner.set_processed_data_s3_location(input);
162        self
163    }
164    /// <p>The URI of the Amazon S3 location where you want SageMaker to save the results of a data processing job.</p>
165    pub fn get_processed_data_s3_location(&self) -> &::std::option::Option<::std::string::String> {
166        self.inner.get_processed_data_s3_location()
167    }
168    /// <p>The ARN of an IAM role for SageMaker execution. This must be listed in your DB cluster parameter group or an error will occur.</p>
169    pub fn sagemaker_iam_role_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
170        self.inner = self.inner.sagemaker_iam_role_arn(input.into());
171        self
172    }
173    /// <p>The ARN of an IAM role for SageMaker execution. This must be listed in your DB cluster parameter group or an error will occur.</p>
174    pub fn set_sagemaker_iam_role_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
175        self.inner = self.inner.set_sagemaker_iam_role_arn(input);
176        self
177    }
178    /// <p>The ARN of an IAM role for SageMaker execution. This must be listed in your DB cluster parameter group or an error will occur.</p>
179    pub fn get_sagemaker_iam_role_arn(&self) -> &::std::option::Option<::std::string::String> {
180        self.inner.get_sagemaker_iam_role_arn()
181    }
182    /// <p>The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. This must be listed in your DB cluster parameter group or an error will occur.</p>
183    pub fn neptune_iam_role_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
184        self.inner = self.inner.neptune_iam_role_arn(input.into());
185        self
186    }
187    /// <p>The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. This must be listed in your DB cluster parameter group or an error will occur.</p>
188    pub fn set_neptune_iam_role_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
189        self.inner = self.inner.set_neptune_iam_role_arn(input);
190        self
191    }
192    /// <p>The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. This must be listed in your DB cluster parameter group or an error will occur.</p>
193    pub fn get_neptune_iam_role_arn(&self) -> &::std::option::Option<::std::string::String> {
194        self.inner.get_neptune_iam_role_arn()
195    }
196    /// <p>The type of ML instance used during data processing. Its memory should be large enough to hold the processed dataset. The default is the smallest ml.r5 type whose memory is ten times larger than the size of the exported graph data on disk.</p>
197    pub fn processing_instance_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
198        self.inner = self.inner.processing_instance_type(input.into());
199        self
200    }
201    /// <p>The type of ML instance used during data processing. Its memory should be large enough to hold the processed dataset. The default is the smallest ml.r5 type whose memory is ten times larger than the size of the exported graph data on disk.</p>
202    pub fn set_processing_instance_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
203        self.inner = self.inner.set_processing_instance_type(input);
204        self
205    }
206    /// <p>The type of ML instance used during data processing. Its memory should be large enough to hold the processed dataset. The default is the smallest ml.r5 type whose memory is ten times larger than the size of the exported graph data on disk.</p>
207    pub fn get_processing_instance_type(&self) -> &::std::option::Option<::std::string::String> {
208        self.inner.get_processing_instance_type()
209    }
210    /// <p>The disk volume size of the processing instance. Both input data and processed data are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML chooses the volume size automatically based on the data size.</p>
211    pub fn processing_instance_volume_size_in_gb(mut self, input: i32) -> Self {
212        self.inner = self.inner.processing_instance_volume_size_in_gb(input);
213        self
214    }
215    /// <p>The disk volume size of the processing instance. Both input data and processed data are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML chooses the volume size automatically based on the data size.</p>
216    pub fn set_processing_instance_volume_size_in_gb(mut self, input: ::std::option::Option<i32>) -> Self {
217        self.inner = self.inner.set_processing_instance_volume_size_in_gb(input);
218        self
219    }
220    /// <p>The disk volume size of the processing instance. Both input data and processed data are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML chooses the volume size automatically based on the data size.</p>
221    pub fn get_processing_instance_volume_size_in_gb(&self) -> &::std::option::Option<i32> {
222        self.inner.get_processing_instance_volume_size_in_gb()
223    }
224    /// <p>Timeout in seconds for the data processing job. The default is 86,400 (1 day).</p>
225    pub fn processing_time_out_in_seconds(mut self, input: i32) -> Self {
226        self.inner = self.inner.processing_time_out_in_seconds(input);
227        self
228    }
229    /// <p>Timeout in seconds for the data processing job. The default is 86,400 (1 day).</p>
230    pub fn set_processing_time_out_in_seconds(mut self, input: ::std::option::Option<i32>) -> Self {
231        self.inner = self.inner.set_processing_time_out_in_seconds(input);
232        self
233    }
234    /// <p>Timeout in seconds for the data processing job. The default is 86,400 (1 day).</p>
235    pub fn get_processing_time_out_in_seconds(&self) -> &::std::option::Option<i32> {
236        self.inner.get_processing_time_out_in_seconds()
237    }
238    /// <p>One of the two model types that Neptune ML currently supports: heterogeneous graph models (<code>heterogeneous</code>), and knowledge graph (<code>kge</code>). The default is none. If not specified, Neptune ML chooses the model type automatically based on the data.</p>
239    pub fn model_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
240        self.inner = self.inner.model_type(input.into());
241        self
242    }
243    /// <p>One of the two model types that Neptune ML currently supports: heterogeneous graph models (<code>heterogeneous</code>), and knowledge graph (<code>kge</code>). The default is none. If not specified, Neptune ML chooses the model type automatically based on the data.</p>
244    pub fn set_model_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
245        self.inner = self.inner.set_model_type(input);
246        self
247    }
248    /// <p>One of the two model types that Neptune ML currently supports: heterogeneous graph models (<code>heterogeneous</code>), and knowledge graph (<code>kge</code>). The default is none. If not specified, Neptune ML chooses the model type automatically based on the data.</p>
249    pub fn get_model_type(&self) -> &::std::option::Option<::std::string::String> {
250        self.inner.get_model_type()
251    }
252    /// <p>A data specification file that describes how to load the exported graph data for training. The file is automatically generated by the Neptune export toolkit. The default is <code>training-data-configuration.json</code>.</p>
253    pub fn config_file_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
254        self.inner = self.inner.config_file_name(input.into());
255        self
256    }
257    /// <p>A data specification file that describes how to load the exported graph data for training. The file is automatically generated by the Neptune export toolkit. The default is <code>training-data-configuration.json</code>.</p>
258    pub fn set_config_file_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
259        self.inner = self.inner.set_config_file_name(input);
260        self
261    }
262    /// <p>A data specification file that describes how to load the exported graph data for training. The file is automatically generated by the Neptune export toolkit. The default is <code>training-data-configuration.json</code>.</p>
263    pub fn get_config_file_name(&self) -> &::std::option::Option<::std::string::String> {
264        self.inner.get_config_file_name()
265    }
266    ///
267    /// Appends an item to `subnets`.
268    ///
269    /// To override the contents of this collection use [`set_subnets`](Self::set_subnets).
270    ///
271    /// <p>The IDs of the subnets in the Neptune VPC. The default is None.</p>
272    pub fn subnets(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
273        self.inner = self.inner.subnets(input.into());
274        self
275    }
276    /// <p>The IDs of the subnets in the Neptune VPC. The default is None.</p>
277    pub fn set_subnets(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
278        self.inner = self.inner.set_subnets(input);
279        self
280    }
281    /// <p>The IDs of the subnets in the Neptune VPC. The default is None.</p>
282    pub fn get_subnets(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
283        self.inner.get_subnets()
284    }
285    ///
286    /// Appends an item to `securityGroupIds`.
287    ///
288    /// To override the contents of this collection use [`set_security_group_ids`](Self::set_security_group_ids).
289    ///
290    /// <p>The VPC security group IDs. The default is None.</p>
291    pub fn security_group_ids(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
292        self.inner = self.inner.security_group_ids(input.into());
293        self
294    }
295    /// <p>The VPC security group IDs. The default is None.</p>
296    pub fn set_security_group_ids(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
297        self.inner = self.inner.set_security_group_ids(input);
298        self
299    }
300    /// <p>The VPC security group IDs. The default is None.</p>
301    pub fn get_security_group_ids(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
302        self.inner.get_security_group_ids()
303    }
304    /// <p>The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.</p>
305    pub fn volume_encryption_kms_key(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
306        self.inner = self.inner.volume_encryption_kms_key(input.into());
307        self
308    }
309    /// <p>The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.</p>
310    pub fn set_volume_encryption_kms_key(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
311        self.inner = self.inner.set_volume_encryption_kms_key(input);
312        self
313    }
314    /// <p>The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.</p>
315    pub fn get_volume_encryption_kms_key(&self) -> &::std::option::Option<::std::string::String> {
316        self.inner.get_volume_encryption_kms_key()
317    }
318    /// <p>The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.</p>
319    pub fn s3_output_encryption_kms_key(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
320        self.inner = self.inner.s3_output_encryption_kms_key(input.into());
321        self
322    }
323    /// <p>The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.</p>
324    pub fn set_s3_output_encryption_kms_key(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
325        self.inner = self.inner.set_s3_output_encryption_kms_key(input);
326        self
327    }
328    /// <p>The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.</p>
329    pub fn get_s3_output_encryption_kms_key(&self) -> &::std::option::Option<::std::string::String> {
330        self.inner.get_s3_output_encryption_kms_key()
331    }
332}