aws_sdk_sagemaker/operation/create_optimization_job/builders.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_optimization_job::_create_optimization_job_output::CreateOptimizationJobOutputBuilder;
3
4pub use crate::operation::create_optimization_job::_create_optimization_job_input::CreateOptimizationJobInputBuilder;
5
6impl crate::operation::create_optimization_job::builders::CreateOptimizationJobInputBuilder {
7 /// Sends a request with this input using the given client.
8 pub async fn send_with(
9 self,
10 client: &crate::Client,
11 ) -> ::std::result::Result<
12 crate::operation::create_optimization_job::CreateOptimizationJobOutput,
13 ::aws_smithy_runtime_api::client::result::SdkError<
14 crate::operation::create_optimization_job::CreateOptimizationJobError,
15 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16 >,
17 > {
18 let mut fluent_builder = client.create_optimization_job();
19 fluent_builder.inner = self;
20 fluent_builder.send().await
21 }
22}
23/// Fluent builder constructing a request to `CreateOptimizationJob`.
24///
25/// <p>Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a source model, and you provide the settings for the optimization techniques that you want the job to apply. When the job completes successfully, SageMaker uploads the new optimized model to the output destination that you specify.</p>
26/// <p>For more information about how to use this action, and about the supported optimization techniques, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/model-optimize.html">Optimize model inference with Amazon SageMaker</a>.</p>
27#[derive(::std::clone::Clone, ::std::fmt::Debug)]
28pub struct CreateOptimizationJobFluentBuilder {
29 handle: ::std::sync::Arc<crate::client::Handle>,
30 inner: crate::operation::create_optimization_job::builders::CreateOptimizationJobInputBuilder,
31 config_override: ::std::option::Option<crate::config::Builder>,
32}
33impl
34 crate::client::customize::internal::CustomizableSend<
35 crate::operation::create_optimization_job::CreateOptimizationJobOutput,
36 crate::operation::create_optimization_job::CreateOptimizationJobError,
37 > for CreateOptimizationJobFluentBuilder
38{
39 fn send(
40 self,
41 config_override: crate::config::Builder,
42 ) -> crate::client::customize::internal::BoxFuture<
43 crate::client::customize::internal::SendResult<
44 crate::operation::create_optimization_job::CreateOptimizationJobOutput,
45 crate::operation::create_optimization_job::CreateOptimizationJobError,
46 >,
47 > {
48 ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
49 }
50}
51impl CreateOptimizationJobFluentBuilder {
52 /// Creates a new `CreateOptimizationJobFluentBuilder`.
53 pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
54 Self {
55 handle,
56 inner: ::std::default::Default::default(),
57 config_override: ::std::option::Option::None,
58 }
59 }
60 /// Access the CreateOptimizationJob as a reference.
61 pub fn as_input(&self) -> &crate::operation::create_optimization_job::builders::CreateOptimizationJobInputBuilder {
62 &self.inner
63 }
64 /// Sends the request and returns the response.
65 ///
66 /// If an error occurs, an `SdkError` will be returned with additional details that
67 /// can be matched against.
68 ///
69 /// By default, any retryable failures will be retried twice. Retry behavior
70 /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
71 /// set when configuring the client.
72 pub async fn send(
73 self,
74 ) -> ::std::result::Result<
75 crate::operation::create_optimization_job::CreateOptimizationJobOutput,
76 ::aws_smithy_runtime_api::client::result::SdkError<
77 crate::operation::create_optimization_job::CreateOptimizationJobError,
78 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
79 >,
80 > {
81 let input = self
82 .inner
83 .build()
84 .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
85 let runtime_plugins = crate::operation::create_optimization_job::CreateOptimizationJob::operation_runtime_plugins(
86 self.handle.runtime_plugins.clone(),
87 &self.handle.conf,
88 self.config_override,
89 );
90 crate::operation::create_optimization_job::CreateOptimizationJob::orchestrate(&runtime_plugins, input).await
91 }
92
93 /// Consumes this builder, creating a customizable operation that can be modified before being sent.
94 pub fn customize(
95 self,
96 ) -> crate::client::customize::CustomizableOperation<
97 crate::operation::create_optimization_job::CreateOptimizationJobOutput,
98 crate::operation::create_optimization_job::CreateOptimizationJobError,
99 Self,
100 > {
101 crate::client::customize::CustomizableOperation::new(self)
102 }
103 pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
104 self.set_config_override(::std::option::Option::Some(config_override.into()));
105 self
106 }
107
108 pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
109 self.config_override = config_override;
110 self
111 }
112 /// <p>A custom name for the new optimization job.</p>
113 pub fn optimization_job_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
114 self.inner = self.inner.optimization_job_name(input.into());
115 self
116 }
117 /// <p>A custom name for the new optimization job.</p>
118 pub fn set_optimization_job_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
119 self.inner = self.inner.set_optimization_job_name(input);
120 self
121 }
122 /// <p>A custom name for the new optimization job.</p>
123 pub fn get_optimization_job_name(&self) -> &::std::option::Option<::std::string::String> {
124 self.inner.get_optimization_job_name()
125 }
126 /// <p>The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.</p>
127 /// <p>During model optimization, Amazon SageMaker AI needs your permission to:</p>
128 /// <ul>
129 /// <li>
130 /// <p>Read input data from an S3 bucket</p></li>
131 /// <li>
132 /// <p>Write model artifacts to an S3 bucket</p></li>
133 /// <li>
134 /// <p>Write logs to Amazon CloudWatch Logs</p></li>
135 /// <li>
136 /// <p>Publish metrics to Amazon CloudWatch</p></li>
137 /// </ul>
138 /// <p>You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the <code>iam:PassRole</code> permission. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">Amazon SageMaker AI Roles.</a></p>
139 pub fn role_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
140 self.inner = self.inner.role_arn(input.into());
141 self
142 }
143 /// <p>The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.</p>
144 /// <p>During model optimization, Amazon SageMaker AI needs your permission to:</p>
145 /// <ul>
146 /// <li>
147 /// <p>Read input data from an S3 bucket</p></li>
148 /// <li>
149 /// <p>Write model artifacts to an S3 bucket</p></li>
150 /// <li>
151 /// <p>Write logs to Amazon CloudWatch Logs</p></li>
152 /// <li>
153 /// <p>Publish metrics to Amazon CloudWatch</p></li>
154 /// </ul>
155 /// <p>You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the <code>iam:PassRole</code> permission. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">Amazon SageMaker AI Roles.</a></p>
156 pub fn set_role_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
157 self.inner = self.inner.set_role_arn(input);
158 self
159 }
160 /// <p>The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.</p>
161 /// <p>During model optimization, Amazon SageMaker AI needs your permission to:</p>
162 /// <ul>
163 /// <li>
164 /// <p>Read input data from an S3 bucket</p></li>
165 /// <li>
166 /// <p>Write model artifacts to an S3 bucket</p></li>
167 /// <li>
168 /// <p>Write logs to Amazon CloudWatch Logs</p></li>
169 /// <li>
170 /// <p>Publish metrics to Amazon CloudWatch</p></li>
171 /// </ul>
172 /// <p>You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the <code>iam:PassRole</code> permission. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">Amazon SageMaker AI Roles.</a></p>
173 pub fn get_role_arn(&self) -> &::std::option::Option<::std::string::String> {
174 self.inner.get_role_arn()
175 }
176 /// <p>The location of the source model to optimize with an optimization job.</p>
177 pub fn model_source(mut self, input: crate::types::OptimizationJobModelSource) -> Self {
178 self.inner = self.inner.model_source(input);
179 self
180 }
181 /// <p>The location of the source model to optimize with an optimization job.</p>
182 pub fn set_model_source(mut self, input: ::std::option::Option<crate::types::OptimizationJobModelSource>) -> Self {
183 self.inner = self.inner.set_model_source(input);
184 self
185 }
186 /// <p>The location of the source model to optimize with an optimization job.</p>
187 pub fn get_model_source(&self) -> &::std::option::Option<crate::types::OptimizationJobModelSource> {
188 self.inner.get_model_source()
189 }
190 /// <p>The type of instance that hosts the optimized model that you create with the optimization job.</p>
191 pub fn deployment_instance_type(mut self, input: crate::types::OptimizationJobDeploymentInstanceType) -> Self {
192 self.inner = self.inner.deployment_instance_type(input);
193 self
194 }
195 /// <p>The type of instance that hosts the optimized model that you create with the optimization job.</p>
196 pub fn set_deployment_instance_type(mut self, input: ::std::option::Option<crate::types::OptimizationJobDeploymentInstanceType>) -> Self {
197 self.inner = self.inner.set_deployment_instance_type(input);
198 self
199 }
200 /// <p>The type of instance that hosts the optimized model that you create with the optimization job.</p>
201 pub fn get_deployment_instance_type(&self) -> &::std::option::Option<crate::types::OptimizationJobDeploymentInstanceType> {
202 self.inner.get_deployment_instance_type()
203 }
204 ///
205 /// Adds a key-value pair to `OptimizationEnvironment`.
206 ///
207 /// To override the contents of this collection use [`set_optimization_environment`](Self::set_optimization_environment).
208 ///
209 /// <p>The environment variables to set in the model container.</p>
210 pub fn optimization_environment(
211 mut self,
212 k: impl ::std::convert::Into<::std::string::String>,
213 v: impl ::std::convert::Into<::std::string::String>,
214 ) -> Self {
215 self.inner = self.inner.optimization_environment(k.into(), v.into());
216 self
217 }
218 /// <p>The environment variables to set in the model container.</p>
219 pub fn set_optimization_environment(
220 mut self,
221 input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
222 ) -> Self {
223 self.inner = self.inner.set_optimization_environment(input);
224 self
225 }
226 /// <p>The environment variables to set in the model container.</p>
227 pub fn get_optimization_environment(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
228 self.inner.get_optimization_environment()
229 }
230 ///
231 /// Appends an item to `OptimizationConfigs`.
232 ///
233 /// To override the contents of this collection use [`set_optimization_configs`](Self::set_optimization_configs).
234 ///
235 /// <p>Settings for each of the optimization techniques that the job applies.</p>
236 pub fn optimization_configs(mut self, input: crate::types::OptimizationConfig) -> Self {
237 self.inner = self.inner.optimization_configs(input);
238 self
239 }
240 /// <p>Settings for each of the optimization techniques that the job applies.</p>
241 pub fn set_optimization_configs(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::OptimizationConfig>>) -> Self {
242 self.inner = self.inner.set_optimization_configs(input);
243 self
244 }
245 /// <p>Settings for each of the optimization techniques that the job applies.</p>
246 pub fn get_optimization_configs(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::OptimizationConfig>> {
247 self.inner.get_optimization_configs()
248 }
249 /// <p>Details for where to store the optimized model that you create with the optimization job.</p>
250 pub fn output_config(mut self, input: crate::types::OptimizationJobOutputConfig) -> Self {
251 self.inner = self.inner.output_config(input);
252 self
253 }
254 /// <p>Details for where to store the optimized model that you create with the optimization job.</p>
255 pub fn set_output_config(mut self, input: ::std::option::Option<crate::types::OptimizationJobOutputConfig>) -> Self {
256 self.inner = self.inner.set_output_config(input);
257 self
258 }
259 /// <p>Details for where to store the optimized model that you create with the optimization job.</p>
260 pub fn get_output_config(&self) -> &::std::option::Option<crate::types::OptimizationJobOutputConfig> {
261 self.inner.get_output_config()
262 }
263 /// <p>Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.</p>
264 /// <p>To stop a training job, SageMaker sends the algorithm the <code>SIGTERM</code> signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.</p>
265 /// <p>The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with <code>CreateModel</code>.</p><note>
266 /// <p>The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.</p>
267 /// </note>
268 pub fn stopping_condition(mut self, input: crate::types::StoppingCondition) -> Self {
269 self.inner = self.inner.stopping_condition(input);
270 self
271 }
272 /// <p>Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.</p>
273 /// <p>To stop a training job, SageMaker sends the algorithm the <code>SIGTERM</code> signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.</p>
274 /// <p>The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with <code>CreateModel</code>.</p><note>
275 /// <p>The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.</p>
276 /// </note>
277 pub fn set_stopping_condition(mut self, input: ::std::option::Option<crate::types::StoppingCondition>) -> Self {
278 self.inner = self.inner.set_stopping_condition(input);
279 self
280 }
281 /// <p>Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.</p>
282 /// <p>To stop a training job, SageMaker sends the algorithm the <code>SIGTERM</code> signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.</p>
283 /// <p>The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with <code>CreateModel</code>.</p><note>
284 /// <p>The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.</p>
285 /// </note>
286 pub fn get_stopping_condition(&self) -> &::std::option::Option<crate::types::StoppingCondition> {
287 self.inner.get_stopping_condition()
288 }
289 ///
290 /// Appends an item to `Tags`.
291 ///
292 /// To override the contents of this collection use [`set_tags`](Self::set_tags).
293 ///
294 /// <p>A list of key-value pairs associated with the optimization job. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon Web Services General Reference Guide</i>.</p>
295 pub fn tags(mut self, input: crate::types::Tag) -> Self {
296 self.inner = self.inner.tags(input);
297 self
298 }
299 /// <p>A list of key-value pairs associated with the optimization job. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon Web Services General Reference Guide</i>.</p>
300 pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
301 self.inner = self.inner.set_tags(input);
302 self
303 }
304 /// <p>A list of key-value pairs associated with the optimization job. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon Web Services General Reference Guide</i>.</p>
305 pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
306 self.inner.get_tags()
307 }
308 /// <p>A VPC in Amazon VPC that your optimized model has access to.</p>
309 pub fn vpc_config(mut self, input: crate::types::OptimizationVpcConfig) -> Self {
310 self.inner = self.inner.vpc_config(input);
311 self
312 }
313 /// <p>A VPC in Amazon VPC that your optimized model has access to.</p>
314 pub fn set_vpc_config(mut self, input: ::std::option::Option<crate::types::OptimizationVpcConfig>) -> Self {
315 self.inner = self.inner.set_vpc_config(input);
316 self
317 }
318 /// <p>A VPC in Amazon VPC that your optimized model has access to.</p>
319 pub fn get_vpc_config(&self) -> &::std::option::Option<crate::types::OptimizationVpcConfig> {
320 self.inner.get_vpc_config()
321 }
322}