aws_sdk_sagemaker/operation/create_model/
_create_model_input.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
5pub struct CreateModelInput {
6    /// <p>The name of the new model.</p>
7    pub model_name: ::std::option::Option<::std::string::String>,
8    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
9    pub primary_container: ::std::option::Option<crate::types::ContainerDefinition>,
10    /// <p>Specifies the containers in the inference pipeline.</p>
11    pub containers: ::std::option::Option<::std::vec::Vec<crate::types::ContainerDefinition>>,
12    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
13    pub inference_execution_config: ::std::option::Option<crate::types::InferenceExecutionConfig>,
14    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
15    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
16    /// </note>
17    pub execution_role_arn: ::std::option::Option<::std::string::String>,
18    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
19    pub tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
20    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
21    pub vpc_config: ::std::option::Option<crate::types::VpcConfig>,
22    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
23    pub enable_network_isolation: ::std::option::Option<bool>,
24}
25impl CreateModelInput {
26    /// <p>The name of the new model.</p>
27    pub fn model_name(&self) -> ::std::option::Option<&str> {
28        self.model_name.as_deref()
29    }
30    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
31    pub fn primary_container(&self) -> ::std::option::Option<&crate::types::ContainerDefinition> {
32        self.primary_container.as_ref()
33    }
34    /// <p>Specifies the containers in the inference pipeline.</p>
35    ///
36    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.containers.is_none()`.
37    pub fn containers(&self) -> &[crate::types::ContainerDefinition] {
38        self.containers.as_deref().unwrap_or_default()
39    }
40    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
41    pub fn inference_execution_config(&self) -> ::std::option::Option<&crate::types::InferenceExecutionConfig> {
42        self.inference_execution_config.as_ref()
43    }
44    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
45    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
46    /// </note>
47    pub fn execution_role_arn(&self) -> ::std::option::Option<&str> {
48        self.execution_role_arn.as_deref()
49    }
50    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
51    ///
52    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.tags.is_none()`.
53    pub fn tags(&self) -> &[crate::types::Tag] {
54        self.tags.as_deref().unwrap_or_default()
55    }
56    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
57    pub fn vpc_config(&self) -> ::std::option::Option<&crate::types::VpcConfig> {
58        self.vpc_config.as_ref()
59    }
60    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
61    pub fn enable_network_isolation(&self) -> ::std::option::Option<bool> {
62        self.enable_network_isolation
63    }
64}
65impl CreateModelInput {
66    /// Creates a new builder-style object to manufacture [`CreateModelInput`](crate::operation::create_model::CreateModelInput).
67    pub fn builder() -> crate::operation::create_model::builders::CreateModelInputBuilder {
68        crate::operation::create_model::builders::CreateModelInputBuilder::default()
69    }
70}
71
72/// A builder for [`CreateModelInput`](crate::operation::create_model::CreateModelInput).
73#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
74#[non_exhaustive]
75pub struct CreateModelInputBuilder {
76    pub(crate) model_name: ::std::option::Option<::std::string::String>,
77    pub(crate) primary_container: ::std::option::Option<crate::types::ContainerDefinition>,
78    pub(crate) containers: ::std::option::Option<::std::vec::Vec<crate::types::ContainerDefinition>>,
79    pub(crate) inference_execution_config: ::std::option::Option<crate::types::InferenceExecutionConfig>,
80    pub(crate) execution_role_arn: ::std::option::Option<::std::string::String>,
81    pub(crate) tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
82    pub(crate) vpc_config: ::std::option::Option<crate::types::VpcConfig>,
83    pub(crate) enable_network_isolation: ::std::option::Option<bool>,
84}
85impl CreateModelInputBuilder {
86    /// <p>The name of the new model.</p>
87    /// This field is required.
88    pub fn model_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
89        self.model_name = ::std::option::Option::Some(input.into());
90        self
91    }
92    /// <p>The name of the new model.</p>
93    pub fn set_model_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
94        self.model_name = input;
95        self
96    }
97    /// <p>The name of the new model.</p>
98    pub fn get_model_name(&self) -> &::std::option::Option<::std::string::String> {
99        &self.model_name
100    }
101    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
102    pub fn primary_container(mut self, input: crate::types::ContainerDefinition) -> Self {
103        self.primary_container = ::std::option::Option::Some(input);
104        self
105    }
106    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
107    pub fn set_primary_container(mut self, input: ::std::option::Option<crate::types::ContainerDefinition>) -> Self {
108        self.primary_container = input;
109        self
110    }
111    /// <p>The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions.</p>
112    pub fn get_primary_container(&self) -> &::std::option::Option<crate::types::ContainerDefinition> {
113        &self.primary_container
114    }
115    /// Appends an item to `containers`.
116    ///
117    /// To override the contents of this collection use [`set_containers`](Self::set_containers).
118    ///
119    /// <p>Specifies the containers in the inference pipeline.</p>
120    pub fn containers(mut self, input: crate::types::ContainerDefinition) -> Self {
121        let mut v = self.containers.unwrap_or_default();
122        v.push(input);
123        self.containers = ::std::option::Option::Some(v);
124        self
125    }
126    /// <p>Specifies the containers in the inference pipeline.</p>
127    pub fn set_containers(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::ContainerDefinition>>) -> Self {
128        self.containers = input;
129        self
130    }
131    /// <p>Specifies the containers in the inference pipeline.</p>
132    pub fn get_containers(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::ContainerDefinition>> {
133        &self.containers
134    }
135    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
136    pub fn inference_execution_config(mut self, input: crate::types::InferenceExecutionConfig) -> Self {
137        self.inference_execution_config = ::std::option::Option::Some(input);
138        self
139    }
140    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
141    pub fn set_inference_execution_config(mut self, input: ::std::option::Option<crate::types::InferenceExecutionConfig>) -> Self {
142        self.inference_execution_config = input;
143        self
144    }
145    /// <p>Specifies details of how containers in a multi-container endpoint are called.</p>
146    pub fn get_inference_execution_config(&self) -> &::std::option::Option<crate::types::InferenceExecutionConfig> {
147        &self.inference_execution_config
148    }
149    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
150    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
151    /// </note>
152    pub fn execution_role_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
153        self.execution_role_arn = ::std::option::Option::Some(input.into());
154        self
155    }
156    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
157    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
158    /// </note>
159    pub fn set_execution_role_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
160        self.execution_role_arn = input;
161        self
162    }
163    /// <p>The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html">SageMaker Roles</a>.</p><note>
164    /// <p>To be able to pass this role to SageMaker, the caller of this API must have the <code>iam:PassRole</code> permission.</p>
165    /// </note>
166    pub fn get_execution_role_arn(&self) -> &::std::option::Option<::std::string::String> {
167        &self.execution_role_arn
168    }
169    /// Appends an item to `tags`.
170    ///
171    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
172    ///
173    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
174    pub fn tags(mut self, input: crate::types::Tag) -> Self {
175        let mut v = self.tags.unwrap_or_default();
176        v.push(input);
177        self.tags = ::std::option::Option::Some(v);
178        self
179    }
180    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
181    pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
182        self.tags = input;
183        self
184    }
185    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a>.</p>
186    pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
187        &self.tags
188    }
189    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
190    pub fn vpc_config(mut self, input: crate::types::VpcConfig) -> Self {
191        self.vpc_config = ::std::option::Option::Some(input);
192        self
193    }
194    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
195    pub fn set_vpc_config(mut self, input: ::std::option::Option<crate::types::VpcConfig>) -> Self {
196        self.vpc_config = input;
197        self
198    }
199    /// <p>A <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_VpcConfig.html">VpcConfig</a> object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. <code>VpcConfig</code> is used in hosting services and in batch transform. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html">Protect Endpoints by Using an Amazon Virtual Private Cloud</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html">Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud</a>.</p>
200    pub fn get_vpc_config(&self) -> &::std::option::Option<crate::types::VpcConfig> {
201        &self.vpc_config
202    }
203    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
204    pub fn enable_network_isolation(mut self, input: bool) -> Self {
205        self.enable_network_isolation = ::std::option::Option::Some(input);
206        self
207    }
208    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
209    pub fn set_enable_network_isolation(mut self, input: ::std::option::Option<bool>) -> Self {
210        self.enable_network_isolation = input;
211        self
212    }
213    /// <p>Isolates the model container. No inbound or outbound network calls can be made to or from the model container.</p>
214    pub fn get_enable_network_isolation(&self) -> &::std::option::Option<bool> {
215        &self.enable_network_isolation
216    }
217    /// Consumes the builder and constructs a [`CreateModelInput`](crate::operation::create_model::CreateModelInput).
218    pub fn build(self) -> ::std::result::Result<crate::operation::create_model::CreateModelInput, ::aws_smithy_types::error::operation::BuildError> {
219        ::std::result::Result::Ok(crate::operation::create_model::CreateModelInput {
220            model_name: self.model_name,
221            primary_container: self.primary_container,
222            containers: self.containers,
223            inference_execution_config: self.inference_execution_config,
224            execution_role_arn: self.execution_role_arn,
225            tags: self.tags,
226            vpc_config: self.vpc_config,
227            enable_network_isolation: self.enable_network_isolation,
228        })
229    }
230}