aws_sdk_sagemaker/operation/create_cluster/
_create_cluster_input.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
5pub struct CreateClusterInput {
6    /// <p>The name for the new SageMaker HyperPod cluster.</p>
7    pub cluster_name: ::std::option::Option<::std::string::String>,
8    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
9    pub instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>,
10    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
11    pub restricted_instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
12    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
13    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
14    /// <ul>
15    /// <li>
16    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
17    /// <li>
18    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
19    /// </ul>
20    /// <p>Additional resources for IPv6 configuration:</p>
21    /// <ul>
22    /// <li>
23    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
24    /// <li>
25    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
26    /// <li>
27    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
28    /// </ul>
29    /// </note>
30    pub vpc_config: ::std::option::Option<crate::types::VpcConfig>,
31    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
32    pub tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
33    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
34    pub orchestrator: ::std::option::Option<crate::types::ClusterOrchestrator>,
35    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
36    pub node_recovery: ::std::option::Option<crate::types::ClusterNodeRecovery>,
37    /// <p>The configuration for managed tier checkpointing on the HyperPod cluster. When enabled, this feature uses a multi-tier storage approach for storing model checkpoints, providing faster checkpoint operations and improved fault tolerance across cluster nodes.</p>
38    pub tiered_storage_config: ::std::option::Option<crate::types::ClusterTieredStorageConfig>,
39    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
40    /// <ul>
41    /// <li>
42    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
43    /// </ul>
44    pub node_provisioning_mode: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>,
45    /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
46    pub cluster_role: ::std::option::Option<::std::string::String>,
47    /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
48    pub auto_scaling: ::std::option::Option<crate::types::ClusterAutoScalingConfig>,
49}
50impl CreateClusterInput {
51    /// <p>The name for the new SageMaker HyperPod cluster.</p>
52    pub fn cluster_name(&self) -> ::std::option::Option<&str> {
53        self.cluster_name.as_deref()
54    }
55    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
56    ///
57    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.instance_groups.is_none()`.
58    pub fn instance_groups(&self) -> &[crate::types::ClusterInstanceGroupSpecification] {
59        self.instance_groups.as_deref().unwrap_or_default()
60    }
61    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
62    ///
63    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.restricted_instance_groups.is_none()`.
64    pub fn restricted_instance_groups(&self) -> &[crate::types::ClusterRestrictedInstanceGroupSpecification] {
65        self.restricted_instance_groups.as_deref().unwrap_or_default()
66    }
67    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
68    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
69    /// <ul>
70    /// <li>
71    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
72    /// <li>
73    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
74    /// </ul>
75    /// <p>Additional resources for IPv6 configuration:</p>
76    /// <ul>
77    /// <li>
78    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
79    /// <li>
80    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
81    /// <li>
82    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
83    /// </ul>
84    /// </note>
85    pub fn vpc_config(&self) -> ::std::option::Option<&crate::types::VpcConfig> {
86        self.vpc_config.as_ref()
87    }
88    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
89    ///
90    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.tags.is_none()`.
91    pub fn tags(&self) -> &[crate::types::Tag] {
92        self.tags.as_deref().unwrap_or_default()
93    }
94    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
95    pub fn orchestrator(&self) -> ::std::option::Option<&crate::types::ClusterOrchestrator> {
96        self.orchestrator.as_ref()
97    }
98    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
99    pub fn node_recovery(&self) -> ::std::option::Option<&crate::types::ClusterNodeRecovery> {
100        self.node_recovery.as_ref()
101    }
102    /// <p>The configuration for managed tier checkpointing on the HyperPod cluster. When enabled, this feature uses a multi-tier storage approach for storing model checkpoints, providing faster checkpoint operations and improved fault tolerance across cluster nodes.</p>
103    pub fn tiered_storage_config(&self) -> ::std::option::Option<&crate::types::ClusterTieredStorageConfig> {
104        self.tiered_storage_config.as_ref()
105    }
106    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
107    /// <ul>
108    /// <li>
109    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
110    /// </ul>
111    pub fn node_provisioning_mode(&self) -> ::std::option::Option<&crate::types::ClusterNodeProvisioningMode> {
112        self.node_provisioning_mode.as_ref()
113    }
114    /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
115    pub fn cluster_role(&self) -> ::std::option::Option<&str> {
116        self.cluster_role.as_deref()
117    }
118    /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
119    pub fn auto_scaling(&self) -> ::std::option::Option<&crate::types::ClusterAutoScalingConfig> {
120        self.auto_scaling.as_ref()
121    }
122}
123impl CreateClusterInput {
124    /// Creates a new builder-style object to manufacture [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
125    pub fn builder() -> crate::operation::create_cluster::builders::CreateClusterInputBuilder {
126        crate::operation::create_cluster::builders::CreateClusterInputBuilder::default()
127    }
128}
129
130/// A builder for [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
131#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
132#[non_exhaustive]
133pub struct CreateClusterInputBuilder {
134    pub(crate) cluster_name: ::std::option::Option<::std::string::String>,
135    pub(crate) instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>,
136    pub(crate) restricted_instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
137    pub(crate) vpc_config: ::std::option::Option<crate::types::VpcConfig>,
138    pub(crate) tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
139    pub(crate) orchestrator: ::std::option::Option<crate::types::ClusterOrchestrator>,
140    pub(crate) node_recovery: ::std::option::Option<crate::types::ClusterNodeRecovery>,
141    pub(crate) tiered_storage_config: ::std::option::Option<crate::types::ClusterTieredStorageConfig>,
142    pub(crate) node_provisioning_mode: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>,
143    pub(crate) cluster_role: ::std::option::Option<::std::string::String>,
144    pub(crate) auto_scaling: ::std::option::Option<crate::types::ClusterAutoScalingConfig>,
145}
146impl CreateClusterInputBuilder {
147    /// <p>The name for the new SageMaker HyperPod cluster.</p>
148    /// This field is required.
149    pub fn cluster_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
150        self.cluster_name = ::std::option::Option::Some(input.into());
151        self
152    }
153    /// <p>The name for the new SageMaker HyperPod cluster.</p>
154    pub fn set_cluster_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
155        self.cluster_name = input;
156        self
157    }
158    /// <p>The name for the new SageMaker HyperPod cluster.</p>
159    pub fn get_cluster_name(&self) -> &::std::option::Option<::std::string::String> {
160        &self.cluster_name
161    }
162    /// Appends an item to `instance_groups`.
163    ///
164    /// To override the contents of this collection use [`set_instance_groups`](Self::set_instance_groups).
165    ///
166    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
167    pub fn instance_groups(mut self, input: crate::types::ClusterInstanceGroupSpecification) -> Self {
168        let mut v = self.instance_groups.unwrap_or_default();
169        v.push(input);
170        self.instance_groups = ::std::option::Option::Some(v);
171        self
172    }
173    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
174    pub fn set_instance_groups(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>) -> Self {
175        self.instance_groups = input;
176        self
177    }
178    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
179    pub fn get_instance_groups(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>> {
180        &self.instance_groups
181    }
182    /// Appends an item to `restricted_instance_groups`.
183    ///
184    /// To override the contents of this collection use [`set_restricted_instance_groups`](Self::set_restricted_instance_groups).
185    ///
186    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
187    pub fn restricted_instance_groups(mut self, input: crate::types::ClusterRestrictedInstanceGroupSpecification) -> Self {
188        let mut v = self.restricted_instance_groups.unwrap_or_default();
189        v.push(input);
190        self.restricted_instance_groups = ::std::option::Option::Some(v);
191        self
192    }
193    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
194    pub fn set_restricted_instance_groups(
195        mut self,
196        input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
197    ) -> Self {
198        self.restricted_instance_groups = input;
199        self
200    }
201    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
202    pub fn get_restricted_instance_groups(
203        &self,
204    ) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>> {
205        &self.restricted_instance_groups
206    }
207    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
208    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
209    /// <ul>
210    /// <li>
211    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
212    /// <li>
213    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
214    /// </ul>
215    /// <p>Additional resources for IPv6 configuration:</p>
216    /// <ul>
217    /// <li>
218    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
219    /// <li>
220    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
221    /// <li>
222    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
223    /// </ul>
224    /// </note>
225    pub fn vpc_config(mut self, input: crate::types::VpcConfig) -> Self {
226        self.vpc_config = ::std::option::Option::Some(input);
227        self
228    }
229    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
230    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
231    /// <ul>
232    /// <li>
233    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
234    /// <li>
235    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
236    /// </ul>
237    /// <p>Additional resources for IPv6 configuration:</p>
238    /// <ul>
239    /// <li>
240    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
241    /// <li>
242    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
243    /// <li>
244    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
245    /// </ul>
246    /// </note>
247    pub fn set_vpc_config(mut self, input: ::std::option::Option<crate::types::VpcConfig>) -> Self {
248        self.vpc_config = input;
249        self
250    }
251    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
252    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
253    /// <ul>
254    /// <li>
255    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
256    /// <li>
257    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
258    /// </ul>
259    /// <p>Additional resources for IPv6 configuration:</p>
260    /// <ul>
261    /// <li>
262    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
263    /// <li>
264    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
265    /// <li>
266    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
267    /// </ul>
268    /// </note>
269    pub fn get_vpc_config(&self) -> &::std::option::Option<crate::types::VpcConfig> {
270        &self.vpc_config
271    }
272    /// Appends an item to `tags`.
273    ///
274    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
275    ///
276    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
277    pub fn tags(mut self, input: crate::types::Tag) -> Self {
278        let mut v = self.tags.unwrap_or_default();
279        v.push(input);
280        self.tags = ::std::option::Option::Some(v);
281        self
282    }
283    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
284    pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
285        self.tags = input;
286        self
287    }
288    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
289    pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
290        &self.tags
291    }
292    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
293    pub fn orchestrator(mut self, input: crate::types::ClusterOrchestrator) -> Self {
294        self.orchestrator = ::std::option::Option::Some(input);
295        self
296    }
297    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
298    pub fn set_orchestrator(mut self, input: ::std::option::Option<crate::types::ClusterOrchestrator>) -> Self {
299        self.orchestrator = input;
300        self
301    }
302    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
303    pub fn get_orchestrator(&self) -> &::std::option::Option<crate::types::ClusterOrchestrator> {
304        &self.orchestrator
305    }
306    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
307    pub fn node_recovery(mut self, input: crate::types::ClusterNodeRecovery) -> Self {
308        self.node_recovery = ::std::option::Option::Some(input);
309        self
310    }
311    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
312    pub fn set_node_recovery(mut self, input: ::std::option::Option<crate::types::ClusterNodeRecovery>) -> Self {
313        self.node_recovery = input;
314        self
315    }
316    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
317    pub fn get_node_recovery(&self) -> &::std::option::Option<crate::types::ClusterNodeRecovery> {
318        &self.node_recovery
319    }
320    /// <p>The configuration for managed tier checkpointing on the HyperPod cluster. When enabled, this feature uses a multi-tier storage approach for storing model checkpoints, providing faster checkpoint operations and improved fault tolerance across cluster nodes.</p>
321    pub fn tiered_storage_config(mut self, input: crate::types::ClusterTieredStorageConfig) -> Self {
322        self.tiered_storage_config = ::std::option::Option::Some(input);
323        self
324    }
325    /// <p>The configuration for managed tier checkpointing on the HyperPod cluster. When enabled, this feature uses a multi-tier storage approach for storing model checkpoints, providing faster checkpoint operations and improved fault tolerance across cluster nodes.</p>
326    pub fn set_tiered_storage_config(mut self, input: ::std::option::Option<crate::types::ClusterTieredStorageConfig>) -> Self {
327        self.tiered_storage_config = input;
328        self
329    }
330    /// <p>The configuration for managed tier checkpointing on the HyperPod cluster. When enabled, this feature uses a multi-tier storage approach for storing model checkpoints, providing faster checkpoint operations and improved fault tolerance across cluster nodes.</p>
331    pub fn get_tiered_storage_config(&self) -> &::std::option::Option<crate::types::ClusterTieredStorageConfig> {
332        &self.tiered_storage_config
333    }
334    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
335    /// <ul>
336    /// <li>
337    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
338    /// </ul>
339    pub fn node_provisioning_mode(mut self, input: crate::types::ClusterNodeProvisioningMode) -> Self {
340        self.node_provisioning_mode = ::std::option::Option::Some(input);
341        self
342    }
343    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
344    /// <ul>
345    /// <li>
346    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
347    /// </ul>
348    pub fn set_node_provisioning_mode(mut self, input: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>) -> Self {
349        self.node_provisioning_mode = input;
350        self
351    }
352    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
353    /// <ul>
354    /// <li>
355    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
356    /// </ul>
357    pub fn get_node_provisioning_mode(&self) -> &::std::option::Option<crate::types::ClusterNodeProvisioningMode> {
358        &self.node_provisioning_mode
359    }
360    /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
361    pub fn cluster_role(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
362        self.cluster_role = ::std::option::Option::Some(input.into());
363        self
364    }
365    /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
366    pub fn set_cluster_role(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
367        self.cluster_role = input;
368        self
369    }
370    /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
371    pub fn get_cluster_role(&self) -> &::std::option::Option<::std::string::String> {
372        &self.cluster_role
373    }
374    /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
375    pub fn auto_scaling(mut self, input: crate::types::ClusterAutoScalingConfig) -> Self {
376        self.auto_scaling = ::std::option::Option::Some(input);
377        self
378    }
379    /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
380    pub fn set_auto_scaling(mut self, input: ::std::option::Option<crate::types::ClusterAutoScalingConfig>) -> Self {
381        self.auto_scaling = input;
382        self
383    }
384    /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
385    pub fn get_auto_scaling(&self) -> &::std::option::Option<crate::types::ClusterAutoScalingConfig> {
386        &self.auto_scaling
387    }
388    /// Consumes the builder and constructs a [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
389    pub fn build(
390        self,
391    ) -> ::std::result::Result<crate::operation::create_cluster::CreateClusterInput, ::aws_smithy_types::error::operation::BuildError> {
392        ::std::result::Result::Ok(crate::operation::create_cluster::CreateClusterInput {
393            cluster_name: self.cluster_name,
394            instance_groups: self.instance_groups,
395            restricted_instance_groups: self.restricted_instance_groups,
396            vpc_config: self.vpc_config,
397            tags: self.tags,
398            orchestrator: self.orchestrator,
399            node_recovery: self.node_recovery,
400            tiered_storage_config: self.tiered_storage_config,
401            node_provisioning_mode: self.node_provisioning_mode,
402            cluster_role: self.cluster_role,
403            auto_scaling: self.auto_scaling,
404        })
405    }
406}