aws_sdk_sagemaker/operation/create_model/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_model::_create_model_output::CreateModelOutputBuilder;
3
4pub use crate::operation::create_model::_create_model_input::CreateModelInputBuilder;
5
6impl crate::operation::create_model::builders::CreateModelInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::create_model::CreateModelOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::create_model::CreateModelError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.create_model();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `CreateModel`.
24///
25/// <p>Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions.</p>
26/// <p>Use this API to create a model if you want to use SageMaker hosting services or run a batch transform job.</p>
27/// <p>To host your model, you create an endpoint configuration with the <code>CreateEndpointConfig</code> API, and then create an endpoint with the <code>CreateEndpoint</code> API. SageMaker then deploys all of the containers that you defined for the model in the hosting environment.</p>
28/// <p>To run a batch transform using your model, you start a job with the <code>CreateTransformJob</code> API. SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.</p>
29/// <p>In the request, you also provide an IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.</p>
30#[derive(::std::clone::Clone, ::std::fmt::Debug)]
31pub struct CreateModelFluentBuilder {
32    handle: ::std::sync::Arc<crate::client::Handle>,
33    inner: crate::operation::create_model::builders::CreateModelInputBuilder,
34    config_override: ::std::option::Option<crate::config::Builder>,
35}
36impl
37    crate::client::customize::internal::CustomizableSend<
38        crate::operation::create_model::CreateModelOutput,
39        crate::operation::create_model::CreateModelError,
40    > for CreateModelFluentBuilder
41{
42    fn send(
43        self,
44        config_override: crate::config::Builder,
45    ) -> crate::client::customize::internal::BoxFuture<
46        crate::client::customize::internal::SendResult<
47            crate::operation::create_model::CreateModelOutput,
48            crate::operation::create_model::CreateModelError,
49        >,
50    > {
51        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
52    }
53}
54impl CreateModelFluentBuilder {
55    /// Creates a new `CreateModelFluentBuilder`.
56    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
57        Self {
58            handle,
59            inner: ::std::default::Default::default(),
60            config_override: ::std::option::Option::None,
61        }
62    }
63    /// Access the CreateModel as a reference.
64    pub fn as_input(&self) -> &crate::operation::create_model::builders::CreateModelInputBuilder {
65        &self.inner
66    }
67    /// Sends the request and returns the response.
68    ///
69    /// If an error occurs, an `SdkError` will be returned with additional details that
70    /// can be matched against.
71    ///
72    /// By default, any retryable failures will be retried twice. Retry behavior
73    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
74    /// set when configuring the client.
75    pub async fn send(
76        self,
77    ) -> ::std::result::Result<
78        crate::operation::create_model::CreateModelOutput,
79        ::aws_smithy_runtime_api::client::result::SdkError<
80            crate::operation::create_model::CreateModelError,
81            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
82        >,
83    > {
84        let input = self
85            .inner
86            .build()
87            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
88        let runtime_plugins = crate::operation::create_model::CreateModel::operation_runtime_plugins(
89            self.handle.runtime_plugins.clone(),
90            &self.handle.conf,
91            self.config_override,
92        );
93        crate::operation::create_model::CreateModel::orchestrate(&runtime_plugins, input).await
94    }
95
96    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
97    pub fn customize(
98        self,
99    ) -> crate::client::customize::CustomizableOperation<
100        crate::operation::create_model::CreateModelOutput,
101        crate::operation::create_model::CreateModelError,
102        Self,
103    > {
104        crate::client::customize::CustomizableOperation::new(self)
105    }
106    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
107        self.set_config_override(::std::option::Option::Some(config_override.into()));
108        self
109    }
110
111    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
112        self.config_override = config_override;
113        self
114    }
115    /// <p>The name of the new model.</p>
116    pub fn model_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
117        self.inner = self.inner.model_name(input.into());
118        self
119    }
120    /// <p>The name of the new model.</p>
121    pub fn set_model_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
122        self.inner = self.inner.set_model_name(input);
123        self
124    }
125    /// <p>The name of the new model.</p>
126    pub fn get_model_name(&self) -> &::std::option::Option<::std::string::String> {
127        self.inner.get_model_name()
128    }
129    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
130    pub fn primary_container(mut self, input: crate::types::ContainerDefinition) -> Self {
131        self.inner = self.inner.primary_container(input);
132        self
133    }
134    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
135    pub fn set_primary_container(mut self, input: ::std::option::Option<crate::types::ContainerDefinition>) -> Self {
136        self.inner = self.inner.set_primary_container(input);
137        self
138    }
139    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
140    pub fn get_primary_container(&self) -> &::std::option::Option<crate::types::ContainerDefinition> {
141        self.inner.get_primary_container()
142    }
143    ///
144    /// Appends an item to `Containers`.
145    ///
146    /// To override the contents of this collection use [`set_containers`](Self::set_containers).
147    ///
148    /// <p>Specifies the containers in the inference pipeline.</p>
149    pub fn containers(mut self, input: crate::types::ContainerDefinition) -> Self {
150        self.inner = self.inner.containers(input);
151        self
152    }
153    /// <p>Specifies the containers in the inference pipeline.</p>
154    pub fn set_containers(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::ContainerDefinition>>) -> Self {
155        self.inner = self.inner.set_containers(input);
156        self
157    }
158    /// <p>Specifies the containers in the inference pipeline.</p>
159    pub fn get_containers(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::ContainerDefinition>> {
160        self.inner.get_containers()
161    }
162    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
163    pub fn inference_execution_config(mut self, input: crate::types::InferenceExecutionConfig) -> Self {
164        self.inner = self.inner.inference_execution_config(input);
165        self
166    }
167    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
168    pub fn set_inference_execution_config(mut self, input: ::std::option::Option<crate::types::InferenceExecutionConfig>) -> Self {
169        self.inner = self.inner.set_inference_execution_config(input);
170        self
171    }
172    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
173    pub fn get_inference_execution_config(&self) -> &::std::option::Option<crate::types::InferenceExecutionConfig> {
174        self.inner.get_inference_execution_config()
175    }
176    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
177    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
178    /// </note>
179    pub fn execution_role_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
180        self.inner = self.inner.execution_role_arn(input.into());
181        self
182    }
183    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
184    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
185    /// </note>
186    pub fn set_execution_role_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
187        self.inner = self.inner.set_execution_role_arn(input);
188        self
189    }
190    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
191    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
192    /// </note>
193    pub fn get_execution_role_arn(&self) -> &::std::option::Option<::std::string::String> {
194        self.inner.get_execution_role_arn()
195    }
196    ///
197    /// Appends an item to `Tags`.
198    ///
199    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
200    ///
201    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
202    pub fn tags(mut self, input: crate::types::Tag) -> Self {
203        self.inner = self.inner.tags(input);
204        self
205    }
206    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
207    pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
208        self.inner = self.inner.set_tags(input);
209        self
210    }
211    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
212    pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
213        self.inner.get_tags()
214    }
215    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
216    pub fn vpc_config(mut self, input: crate::types::VpcConfig) -> Self {
217        self.inner = self.inner.vpc_config(input);
218        self
219    }
220    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
221    pub fn set_vpc_config(mut self, input: ::std::option::Option<crate::types::VpcConfig>) -> Self {
222        self.inner = self.inner.set_vpc_config(input);
223        self
224    }
225    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
226    pub fn get_vpc_config(&self) -> &::std::option::Option<crate::types::VpcConfig> {
227        self.inner.get_vpc_config()
228    }
229    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
230    pub fn enable_network_isolation(mut self, input: bool) -> Self {
231        self.inner = self.inner.enable_network_isolation(input);
232        self
233    }
234    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
235    pub fn set_enable_network_isolation(mut self, input: ::std::option::Option<bool>) -> Self {
236        self.inner = self.inner.set_enable_network_isolation(input);
237        self
238    }
239    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
240    pub fn get_enable_network_isolation(&self) -> &::std::option::Option<bool> {
241        self.inner.get_enable_network_isolation()
242    }
243}