aws_sdk_cleanroomsml/operation/start_trained_model_inference_job/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_trained_model_inference_job::_start_trained_model_inference_job_output::StartTrainedModelInferenceJobOutputBuilder;
3
4pub use crate::operation::start_trained_model_inference_job::_start_trained_model_inference_job_input::StartTrainedModelInferenceJobInputBuilder;
5
6impl crate::operation::start_trained_model_inference_job::builders::StartTrainedModelInferenceJobInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.start_trained_model_inference_job();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `StartTrainedModelInferenceJob`.
24///
25/// <p>Defines the information necessary to begin a trained model inference job.</p>
26#[derive(::std::clone::Clone, ::std::fmt::Debug)]
27pub struct StartTrainedModelInferenceJobFluentBuilder {
28    handle: ::std::sync::Arc<crate::client::Handle>,
29    inner: crate::operation::start_trained_model_inference_job::builders::StartTrainedModelInferenceJobInputBuilder,
30    config_override: ::std::option::Option<crate::config::Builder>,
31}
32impl
33    crate::client::customize::internal::CustomizableSend<
34        crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobOutput,
35        crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobError,
36    > for StartTrainedModelInferenceJobFluentBuilder
37{
38    fn send(
39        self,
40        config_override: crate::config::Builder,
41    ) -> crate::client::customize::internal::BoxFuture<
42        crate::client::customize::internal::SendResult<
43            crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobOutput,
44            crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobError,
45        >,
46    > {
47        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
48    }
49}
50impl StartTrainedModelInferenceJobFluentBuilder {
51    /// Creates a new `StartTrainedModelInferenceJobFluentBuilder`.
52    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
53        Self {
54            handle,
55            inner: ::std::default::Default::default(),
56            config_override: ::std::option::Option::None,
57        }
58    }
59    /// Access the StartTrainedModelInferenceJob as a reference.
60    pub fn as_input(&self) -> &crate::operation::start_trained_model_inference_job::builders::StartTrainedModelInferenceJobInputBuilder {
61        &self.inner
62    }
63    /// Sends the request and returns the response.
64    ///
65    /// If an error occurs, an `SdkError` will be returned with additional details that
66    /// can be matched against.
67    ///
68    /// By default, any retryable failures will be retried twice. Retry behavior
69    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
70    /// set when configuring the client.
71    pub async fn send(
72        self,
73    ) -> ::std::result::Result<
74        crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobOutput,
75        ::aws_smithy_runtime_api::client::result::SdkError<
76            crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobError,
77            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
78        >,
79    > {
80        let input = self
81            .inner
82            .build()
83            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
84        let runtime_plugins = crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJob::operation_runtime_plugins(
85            self.handle.runtime_plugins.clone(),
86            &self.handle.conf,
87            self.config_override,
88        );
89        crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJob::orchestrate(&runtime_plugins, input).await
90    }
91
92    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
93    pub fn customize(
94        self,
95    ) -> crate::client::customize::CustomizableOperation<
96        crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobOutput,
97        crate::operation::start_trained_model_inference_job::StartTrainedModelInferenceJobError,
98        Self,
99    > {
100        crate::client::customize::CustomizableOperation::new(self)
101    }
102    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
103        self.set_config_override(::std::option::Option::Some(config_override.into()));
104        self
105    }
106
107    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
108        self.config_override = config_override;
109        self
110    }
111    /// <p>The membership ID of the membership that contains the trained model inference job.</p>
112    pub fn membership_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
113        self.inner = self.inner.membership_identifier(input.into());
114        self
115    }
116    /// <p>The membership ID of the membership that contains the trained model inference job.</p>
117    pub fn set_membership_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
118        self.inner = self.inner.set_membership_identifier(input);
119        self
120    }
121    /// <p>The membership ID of the membership that contains the trained model inference job.</p>
122    pub fn get_membership_identifier(&self) -> &::std::option::Option<::std::string::String> {
123        self.inner.get_membership_identifier()
124    }
125    /// <p>The name of the trained model inference job.</p>
126    pub fn name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
127        self.inner = self.inner.name(input.into());
128        self
129    }
130    /// <p>The name of the trained model inference job.</p>
131    pub fn set_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
132        self.inner = self.inner.set_name(input);
133        self
134    }
135    /// <p>The name of the trained model inference job.</p>
136    pub fn get_name(&self) -> &::std::option::Option<::std::string::String> {
137        self.inner.get_name()
138    }
139    /// <p>The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job.</p>
140    pub fn trained_model_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
141        self.inner = self.inner.trained_model_arn(input.into());
142        self
143    }
144    /// <p>The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job.</p>
145    pub fn set_trained_model_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
146        self.inner = self.inner.set_trained_model_arn(input);
147        self
148    }
149    /// <p>The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job.</p>
150    pub fn get_trained_model_arn(&self) -> &::std::option::Option<::std::string::String> {
151        self.inner.get_trained_model_arn()
152    }
153    /// <p>The version identifier of the trained model to use for inference. This specifies which version of the trained model should be used to generate predictions on the input data.</p>
154    pub fn trained_model_version_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
155        self.inner = self.inner.trained_model_version_identifier(input.into());
156        self
157    }
158    /// <p>The version identifier of the trained model to use for inference. This specifies which version of the trained model should be used to generate predictions on the input data.</p>
159    pub fn set_trained_model_version_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
160        self.inner = self.inner.set_trained_model_version_identifier(input);
161        self
162    }
163    /// <p>The version identifier of the trained model to use for inference. This specifies which version of the trained model should be used to generate predictions on the input data.</p>
164    pub fn get_trained_model_version_identifier(&self) -> &::std::option::Option<::std::string::String> {
165        self.inner.get_trained_model_version_identifier()
166    }
167    /// <p>The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job.</p>
168    pub fn configured_model_algorithm_association_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
169        self.inner = self.inner.configured_model_algorithm_association_arn(input.into());
170        self
171    }
172    /// <p>The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job.</p>
173    pub fn set_configured_model_algorithm_association_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
174        self.inner = self.inner.set_configured_model_algorithm_association_arn(input);
175        self
176    }
177    /// <p>The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job.</p>
178    pub fn get_configured_model_algorithm_association_arn(&self) -> &::std::option::Option<::std::string::String> {
179        self.inner.get_configured_model_algorithm_association_arn()
180    }
181    /// <p>Defines the resource configuration for the trained model inference job.</p>
182    pub fn resource_config(mut self, input: crate::types::InferenceResourceConfig) -> Self {
183        self.inner = self.inner.resource_config(input);
184        self
185    }
186    /// <p>Defines the resource configuration for the trained model inference job.</p>
187    pub fn set_resource_config(mut self, input: ::std::option::Option<crate::types::InferenceResourceConfig>) -> Self {
188        self.inner = self.inner.set_resource_config(input);
189        self
190    }
191    /// <p>Defines the resource configuration for the trained model inference job.</p>
192    pub fn get_resource_config(&self) -> &::std::option::Option<crate::types::InferenceResourceConfig> {
193        self.inner.get_resource_config()
194    }
195    /// <p>Defines the output configuration information for the trained model inference job.</p>
196    pub fn output_configuration(mut self, input: crate::types::InferenceOutputConfiguration) -> Self {
197        self.inner = self.inner.output_configuration(input);
198        self
199    }
200    /// <p>Defines the output configuration information for the trained model inference job.</p>
201    pub fn set_output_configuration(mut self, input: ::std::option::Option<crate::types::InferenceOutputConfiguration>) -> Self {
202        self.inner = self.inner.set_output_configuration(input);
203        self
204    }
205    /// <p>Defines the output configuration information for the trained model inference job.</p>
206    pub fn get_output_configuration(&self) -> &::std::option::Option<crate::types::InferenceOutputConfiguration> {
207        self.inner.get_output_configuration()
208    }
209    /// <p>Defines the data source that is used for the trained model inference job.</p>
210    pub fn data_source(mut self, input: crate::types::ModelInferenceDataSource) -> Self {
211        self.inner = self.inner.data_source(input);
212        self
213    }
214    /// <p>Defines the data source that is used for the trained model inference job.</p>
215    pub fn set_data_source(mut self, input: ::std::option::Option<crate::types::ModelInferenceDataSource>) -> Self {
216        self.inner = self.inner.set_data_source(input);
217        self
218    }
219    /// <p>Defines the data source that is used for the trained model inference job.</p>
220    pub fn get_data_source(&self) -> &::std::option::Option<crate::types::ModelInferenceDataSource> {
221        self.inner.get_data_source()
222    }
223    /// <p>The description of the trained model inference job.</p>
224    pub fn description(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
225        self.inner = self.inner.description(input.into());
226        self
227    }
228    /// <p>The description of the trained model inference job.</p>
229    pub fn set_description(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
230        self.inner = self.inner.set_description(input);
231        self
232    }
233    /// <p>The description of the trained model inference job.</p>
234    pub fn get_description(&self) -> &::std::option::Option<::std::string::String> {
235        self.inner.get_description()
236    }
237    /// <p>The execution parameters for the container.</p>
238    pub fn container_execution_parameters(mut self, input: crate::types::InferenceContainerExecutionParameters) -> Self {
239        self.inner = self.inner.container_execution_parameters(input);
240        self
241    }
242    /// <p>The execution parameters for the container.</p>
243    pub fn set_container_execution_parameters(mut self, input: ::std::option::Option<crate::types::InferenceContainerExecutionParameters>) -> Self {
244        self.inner = self.inner.set_container_execution_parameters(input);
245        self
246    }
247    /// <p>The execution parameters for the container.</p>
248    pub fn get_container_execution_parameters(&self) -> &::std::option::Option<crate::types::InferenceContainerExecutionParameters> {
249        self.inner.get_container_execution_parameters()
250    }
251    ///
252    /// Adds a key-value pair to `environment`.
253    ///
254    /// To override the contents of this collection use [`set_environment`](Self::set_environment).
255    ///
256    /// <p>The environment variables to set in the Docker container.</p>
257    pub fn environment(mut self, k: impl ::std::convert::Into<::std::string::String>, v: impl ::std::convert::Into<::std::string::String>) -> Self {
258        self.inner = self.inner.environment(k.into(), v.into());
259        self
260    }
261    /// <p>The environment variables to set in the Docker container.</p>
262    pub fn set_environment(
263        mut self,
264        input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
265    ) -> Self {
266        self.inner = self.inner.set_environment(input);
267        self
268    }
269    /// <p>The environment variables to set in the Docker container.</p>
270    pub fn get_environment(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
271        self.inner.get_environment()
272    }
273    /// <p>The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data.</p>
274    pub fn kms_key_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
275        self.inner = self.inner.kms_key_arn(input.into());
276        self
277    }
278    /// <p>The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data.</p>
279    pub fn set_kms_key_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
280        self.inner = self.inner.set_kms_key_arn(input);
281        self
282    }
283    /// <p>The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data.</p>
284    pub fn get_kms_key_arn(&self) -> &::std::option::Option<::std::string::String> {
285        self.inner.get_kms_key_arn()
286    }
287    ///
288    /// Adds a key-value pair to `tags`.
289    ///
290    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
291    ///
292    /// <p>The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p>
293    /// <p>The following basic restrictions apply to tags:</p>
294    /// <ul>
295    /// <li>
296    /// <p>Maximum number of tags per resource - 50.</p></li>
297    /// <li>
298    /// <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p></li>
299    /// <li>
300    /// <p>Maximum key length - 128 Unicode characters in UTF-8.</p></li>
301    /// <li>
302    /// <p>Maximum value length - 256 Unicode characters in UTF-8.</p></li>
303    /// <li>
304    /// <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p></li>
305    /// <li>
306    /// <p>Tag keys and values are case sensitive.</p></li>
307    /// <li>
308    /// <p>Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.</p></li>
309    /// </ul>
310    pub fn tags(mut self, k: impl ::std::convert::Into<::std::string::String>, v: impl ::std::convert::Into<::std::string::String>) -> Self {
311        self.inner = self.inner.tags(k.into(), v.into());
312        self
313    }
314    /// <p>The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p>
315    /// <p>The following basic restrictions apply to tags:</p>
316    /// <ul>
317    /// <li>
318    /// <p>Maximum number of tags per resource - 50.</p></li>
319    /// <li>
320    /// <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p></li>
321    /// <li>
322    /// <p>Maximum key length - 128 Unicode characters in UTF-8.</p></li>
323    /// <li>
324    /// <p>Maximum value length - 256 Unicode characters in UTF-8.</p></li>
325    /// <li>
326    /// <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p></li>
327    /// <li>
328    /// <p>Tag keys and values are case sensitive.</p></li>
329    /// <li>
330    /// <p>Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.</p></li>
331    /// </ul>
332    pub fn set_tags(mut self, input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>) -> Self {
333        self.inner = self.inner.set_tags(input);
334        self
335    }
336    /// <p>The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.</p>
337    /// <p>The following basic restrictions apply to tags:</p>
338    /// <ul>
339    /// <li>
340    /// <p>Maximum number of tags per resource - 50.</p></li>
341    /// <li>
342    /// <p>For each resource, each tag key must be unique, and each tag key can have only one value.</p></li>
343    /// <li>
344    /// <p>Maximum key length - 128 Unicode characters in UTF-8.</p></li>
345    /// <li>
346    /// <p>Maximum value length - 256 Unicode characters in UTF-8.</p></li>
347    /// <li>
348    /// <p>If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.</p></li>
349    /// <li>
350    /// <p>Tag keys and values are case sensitive.</p></li>
351    /// <li>
352    /// <p>Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.</p></li>
353    /// </ul>
354    pub fn get_tags(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
355        self.inner.get_tags()
356    }
357}