aws_sdk_sagemaker/operation/create_cluster/_create_cluster_input.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
5pub struct CreateClusterInput {
6 /// <p>The name for the new SageMaker HyperPod cluster.</p>
7 pub cluster_name: ::std::option::Option<::std::string::String>,
8 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
9 pub instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>,
10 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
11 pub restricted_instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
12 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
13 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
14 /// <ul>
15 /// <li>
16 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
17 /// <li>
18 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
19 /// </ul>
20 /// <p>Additional resources for IPv6 configuration:</p>
21 /// <ul>
22 /// <li>
23 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
24 /// <li>
25 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
26 /// <li>
27 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
28 /// </ul>
29 /// </note>
30 pub vpc_config: ::std::option::Option<crate::types::VpcConfig>,
31 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
32 pub tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
33 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
34 pub orchestrator: ::std::option::Option<crate::types::ClusterOrchestrator>,
35 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
36 pub node_recovery: ::std::option::Option<crate::types::ClusterNodeRecovery>,
37 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
38 /// <ul>
39 /// <li>
40 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
41 /// </ul>
42 pub node_provisioning_mode: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>,
43 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
44 pub cluster_role: ::std::option::Option<::std::string::String>,
45 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
46 pub auto_scaling: ::std::option::Option<crate::types::ClusterAutoScalingConfig>,
47}
48impl CreateClusterInput {
49 /// <p>The name for the new SageMaker HyperPod cluster.</p>
50 pub fn cluster_name(&self) -> ::std::option::Option<&str> {
51 self.cluster_name.as_deref()
52 }
53 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
54 ///
55 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.instance_groups.is_none()`.
56 pub fn instance_groups(&self) -> &[crate::types::ClusterInstanceGroupSpecification] {
57 self.instance_groups.as_deref().unwrap_or_default()
58 }
59 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
60 ///
61 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.restricted_instance_groups.is_none()`.
62 pub fn restricted_instance_groups(&self) -> &[crate::types::ClusterRestrictedInstanceGroupSpecification] {
63 self.restricted_instance_groups.as_deref().unwrap_or_default()
64 }
65 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
66 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
67 /// <ul>
68 /// <li>
69 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
70 /// <li>
71 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
72 /// </ul>
73 /// <p>Additional resources for IPv6 configuration:</p>
74 /// <ul>
75 /// <li>
76 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
77 /// <li>
78 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
79 /// <li>
80 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
81 /// </ul>
82 /// </note>
83 pub fn vpc_config(&self) -> ::std::option::Option<&crate::types::VpcConfig> {
84 self.vpc_config.as_ref()
85 }
86 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
87 ///
88 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.tags.is_none()`.
89 pub fn tags(&self) -> &[crate::types::Tag] {
90 self.tags.as_deref().unwrap_or_default()
91 }
92 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
93 pub fn orchestrator(&self) -> ::std::option::Option<&crate::types::ClusterOrchestrator> {
94 self.orchestrator.as_ref()
95 }
96 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
97 pub fn node_recovery(&self) -> ::std::option::Option<&crate::types::ClusterNodeRecovery> {
98 self.node_recovery.as_ref()
99 }
100 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
101 /// <ul>
102 /// <li>
103 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
104 /// </ul>
105 pub fn node_provisioning_mode(&self) -> ::std::option::Option<&crate::types::ClusterNodeProvisioningMode> {
106 self.node_provisioning_mode.as_ref()
107 }
108 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
109 pub fn cluster_role(&self) -> ::std::option::Option<&str> {
110 self.cluster_role.as_deref()
111 }
112 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
113 pub fn auto_scaling(&self) -> ::std::option::Option<&crate::types::ClusterAutoScalingConfig> {
114 self.auto_scaling.as_ref()
115 }
116}
117impl CreateClusterInput {
118 /// Creates a new builder-style object to manufacture [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
119 pub fn builder() -> crate::operation::create_cluster::builders::CreateClusterInputBuilder {
120 crate::operation::create_cluster::builders::CreateClusterInputBuilder::default()
121 }
122}
123
124/// A builder for [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
125#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
126#[non_exhaustive]
127pub struct CreateClusterInputBuilder {
128 pub(crate) cluster_name: ::std::option::Option<::std::string::String>,
129 pub(crate) instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>,
130 pub(crate) restricted_instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
131 pub(crate) vpc_config: ::std::option::Option<crate::types::VpcConfig>,
132 pub(crate) tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
133 pub(crate) orchestrator: ::std::option::Option<crate::types::ClusterOrchestrator>,
134 pub(crate) node_recovery: ::std::option::Option<crate::types::ClusterNodeRecovery>,
135 pub(crate) node_provisioning_mode: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>,
136 pub(crate) cluster_role: ::std::option::Option<::std::string::String>,
137 pub(crate) auto_scaling: ::std::option::Option<crate::types::ClusterAutoScalingConfig>,
138}
139impl CreateClusterInputBuilder {
140 /// <p>The name for the new SageMaker HyperPod cluster.</p>
141 /// This field is required.
142 pub fn cluster_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
143 self.cluster_name = ::std::option::Option::Some(input.into());
144 self
145 }
146 /// <p>The name for the new SageMaker HyperPod cluster.</p>
147 pub fn set_cluster_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
148 self.cluster_name = input;
149 self
150 }
151 /// <p>The name for the new SageMaker HyperPod cluster.</p>
152 pub fn get_cluster_name(&self) -> &::std::option::Option<::std::string::String> {
153 &self.cluster_name
154 }
155 /// Appends an item to `instance_groups`.
156 ///
157 /// To override the contents of this collection use [`set_instance_groups`](Self::set_instance_groups).
158 ///
159 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
160 pub fn instance_groups(mut self, input: crate::types::ClusterInstanceGroupSpecification) -> Self {
161 let mut v = self.instance_groups.unwrap_or_default();
162 v.push(input);
163 self.instance_groups = ::std::option::Option::Some(v);
164 self
165 }
166 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
167 pub fn set_instance_groups(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>) -> Self {
168 self.instance_groups = input;
169 self
170 }
171 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
172 pub fn get_instance_groups(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>> {
173 &self.instance_groups
174 }
175 /// Appends an item to `restricted_instance_groups`.
176 ///
177 /// To override the contents of this collection use [`set_restricted_instance_groups`](Self::set_restricted_instance_groups).
178 ///
179 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
180 pub fn restricted_instance_groups(mut self, input: crate::types::ClusterRestrictedInstanceGroupSpecification) -> Self {
181 let mut v = self.restricted_instance_groups.unwrap_or_default();
182 v.push(input);
183 self.restricted_instance_groups = ::std::option::Option::Some(v);
184 self
185 }
186 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
187 pub fn set_restricted_instance_groups(
188 mut self,
189 input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
190 ) -> Self {
191 self.restricted_instance_groups = input;
192 self
193 }
194 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
195 pub fn get_restricted_instance_groups(
196 &self,
197 ) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>> {
198 &self.restricted_instance_groups
199 }
200 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
201 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
202 /// <ul>
203 /// <li>
204 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
205 /// <li>
206 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
207 /// </ul>
208 /// <p>Additional resources for IPv6 configuration:</p>
209 /// <ul>
210 /// <li>
211 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
212 /// <li>
213 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
214 /// <li>
215 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
216 /// </ul>
217 /// </note>
218 pub fn vpc_config(mut self, input: crate::types::VpcConfig) -> Self {
219 self.vpc_config = ::std::option::Option::Some(input);
220 self
221 }
222 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
223 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
224 /// <ul>
225 /// <li>
226 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
227 /// <li>
228 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
229 /// </ul>
230 /// <p>Additional resources for IPv6 configuration:</p>
231 /// <ul>
232 /// <li>
233 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
234 /// <li>
235 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
236 /// <li>
237 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
238 /// </ul>
239 /// </note>
240 pub fn set_vpc_config(mut self, input: ::std::option::Option<crate::types::VpcConfig>) -> Self {
241 self.vpc_config = input;
242 self
243 }
244 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
245 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
246 /// <ul>
247 /// <li>
248 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
249 /// <li>
250 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
251 /// </ul>
252 /// <p>Additional resources for IPv6 configuration:</p>
253 /// <ul>
254 /// <li>
255 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
256 /// <li>
257 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
258 /// <li>
259 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
260 /// </ul>
261 /// </note>
262 pub fn get_vpc_config(&self) -> &::std::option::Option<crate::types::VpcConfig> {
263 &self.vpc_config
264 }
265 /// Appends an item to `tags`.
266 ///
267 /// To override the contents of this collection use [`set_tags`](Self::set_tags).
268 ///
269 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
270 pub fn tags(mut self, input: crate::types::Tag) -> Self {
271 let mut v = self.tags.unwrap_or_default();
272 v.push(input);
273 self.tags = ::std::option::Option::Some(v);
274 self
275 }
276 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
277 pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
278 self.tags = input;
279 self
280 }
281 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
282 pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
283 &self.tags
284 }
285 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
286 pub fn orchestrator(mut self, input: crate::types::ClusterOrchestrator) -> Self {
287 self.orchestrator = ::std::option::Option::Some(input);
288 self
289 }
290 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
291 pub fn set_orchestrator(mut self, input: ::std::option::Option<crate::types::ClusterOrchestrator>) -> Self {
292 self.orchestrator = input;
293 self
294 }
295 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
296 pub fn get_orchestrator(&self) -> &::std::option::Option<crate::types::ClusterOrchestrator> {
297 &self.orchestrator
298 }
299 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
300 pub fn node_recovery(mut self, input: crate::types::ClusterNodeRecovery) -> Self {
301 self.node_recovery = ::std::option::Option::Some(input);
302 self
303 }
304 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
305 pub fn set_node_recovery(mut self, input: ::std::option::Option<crate::types::ClusterNodeRecovery>) -> Self {
306 self.node_recovery = input;
307 self
308 }
309 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
310 pub fn get_node_recovery(&self) -> &::std::option::Option<crate::types::ClusterNodeRecovery> {
311 &self.node_recovery
312 }
313 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
314 /// <ul>
315 /// <li>
316 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
317 /// </ul>
318 pub fn node_provisioning_mode(mut self, input: crate::types::ClusterNodeProvisioningMode) -> Self {
319 self.node_provisioning_mode = ::std::option::Option::Some(input);
320 self
321 }
322 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
323 /// <ul>
324 /// <li>
325 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
326 /// </ul>
327 pub fn set_node_provisioning_mode(mut self, input: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>) -> Self {
328 self.node_provisioning_mode = input;
329 self
330 }
331 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
332 /// <ul>
333 /// <li>
334 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
335 /// </ul>
336 pub fn get_node_provisioning_mode(&self) -> &::std::option::Option<crate::types::ClusterNodeProvisioningMode> {
337 &self.node_provisioning_mode
338 }
339 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
340 pub fn cluster_role(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
341 self.cluster_role = ::std::option::Option::Some(input.into());
342 self
343 }
344 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
345 pub fn set_cluster_role(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
346 self.cluster_role = input;
347 self
348 }
349 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
350 pub fn get_cluster_role(&self) -> &::std::option::Option<::std::string::String> {
351 &self.cluster_role
352 }
353 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
354 pub fn auto_scaling(mut self, input: crate::types::ClusterAutoScalingConfig) -> Self {
355 self.auto_scaling = ::std::option::Option::Some(input);
356 self
357 }
358 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
359 pub fn set_auto_scaling(mut self, input: ::std::option::Option<crate::types::ClusterAutoScalingConfig>) -> Self {
360 self.auto_scaling = input;
361 self
362 }
363 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
364 pub fn get_auto_scaling(&self) -> &::std::option::Option<crate::types::ClusterAutoScalingConfig> {
365 &self.auto_scaling
366 }
367 /// Consumes the builder and constructs a [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
368 pub fn build(
369 self,
370 ) -> ::std::result::Result<crate::operation::create_cluster::CreateClusterInput, ::aws_smithy_types::error::operation::BuildError> {
371 ::std::result::Result::Ok(crate::operation::create_cluster::CreateClusterInput {
372 cluster_name: self.cluster_name,
373 instance_groups: self.instance_groups,
374 restricted_instance_groups: self.restricted_instance_groups,
375 vpc_config: self.vpc_config,
376 tags: self.tags,
377 orchestrator: self.orchestrator,
378 node_recovery: self.node_recovery,
379 node_provisioning_mode: self.node_provisioning_mode,
380 cluster_role: self.cluster_role,
381 auto_scaling: self.auto_scaling,
382 })
383 }
384}