aws_sdk_sagemaker/operation/create_cluster/
_create_cluster_input.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
5pub struct CreateClusterInput {
6    /// <p>The name for the new SageMaker HyperPod cluster.</p>
7    pub cluster_name: ::std::option::Option<::std::string::String>,
8    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
9    pub instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>,
10    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
11    pub restricted_instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
12    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
13    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
14    /// <ul>
15    /// <li>
16    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
17    /// <li>
18    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
19    /// </ul>
20    /// <p>Additional resources for IPv6 configuration:</p>
21    /// <ul>
22    /// <li>
23    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
24    /// <li>
25    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
26    /// <li>
27    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
28    /// </ul>
29    /// </note>
30    pub vpc_config: ::std::option::Option<crate::types::VpcConfig>,
31    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
32    pub tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
33    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
34    pub orchestrator: ::std::option::Option<crate::types::ClusterOrchestrator>,
35    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
36    pub node_recovery: ::std::option::Option<crate::types::ClusterNodeRecovery>,
37    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
38    /// <ul>
39    /// <li>
40    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
41    /// </ul>
42    pub node_provisioning_mode: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>,
43}
44impl CreateClusterInput {
45    /// <p>The name for the new SageMaker HyperPod cluster.</p>
46    pub fn cluster_name(&self) -> ::std::option::Option<&str> {
47        self.cluster_name.as_deref()
48    }
49    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
50    ///
51    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.instance_groups.is_none()`.
52    pub fn instance_groups(&self) -> &[crate::types::ClusterInstanceGroupSpecification] {
53        self.instance_groups.as_deref().unwrap_or_default()
54    }
55    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
56    ///
57    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.restricted_instance_groups.is_none()`.
58    pub fn restricted_instance_groups(&self) -> &[crate::types::ClusterRestrictedInstanceGroupSpecification] {
59        self.restricted_instance_groups.as_deref().unwrap_or_default()
60    }
61    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
62    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
63    /// <ul>
64    /// <li>
65    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
66    /// <li>
67    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
68    /// </ul>
69    /// <p>Additional resources for IPv6 configuration:</p>
70    /// <ul>
71    /// <li>
72    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
73    /// <li>
74    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
75    /// <li>
76    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
77    /// </ul>
78    /// </note>
79    pub fn vpc_config(&self) -> ::std::option::Option<&crate::types::VpcConfig> {
80        self.vpc_config.as_ref()
81    }
82    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
83    ///
84    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.tags.is_none()`.
85    pub fn tags(&self) -> &[crate::types::Tag] {
86        self.tags.as_deref().unwrap_or_default()
87    }
88    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
89    pub fn orchestrator(&self) -> ::std::option::Option<&crate::types::ClusterOrchestrator> {
90        self.orchestrator.as_ref()
91    }
92    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
93    pub fn node_recovery(&self) -> ::std::option::Option<&crate::types::ClusterNodeRecovery> {
94        self.node_recovery.as_ref()
95    }
96    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
97    /// <ul>
98    /// <li>
99    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
100    /// </ul>
101    pub fn node_provisioning_mode(&self) -> ::std::option::Option<&crate::types::ClusterNodeProvisioningMode> {
102        self.node_provisioning_mode.as_ref()
103    }
104}
105impl CreateClusterInput {
106    /// Creates a new builder-style object to manufacture [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
107    pub fn builder() -> crate::operation::create_cluster::builders::CreateClusterInputBuilder {
108        crate::operation::create_cluster::builders::CreateClusterInputBuilder::default()
109    }
110}
111
112/// A builder for [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
113#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
114#[non_exhaustive]
115pub struct CreateClusterInputBuilder {
116    pub(crate) cluster_name: ::std::option::Option<::std::string::String>,
117    pub(crate) instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>,
118    pub(crate) restricted_instance_groups: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
119    pub(crate) vpc_config: ::std::option::Option<crate::types::VpcConfig>,
120    pub(crate) tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
121    pub(crate) orchestrator: ::std::option::Option<crate::types::ClusterOrchestrator>,
122    pub(crate) node_recovery: ::std::option::Option<crate::types::ClusterNodeRecovery>,
123    pub(crate) node_provisioning_mode: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>,
124}
125impl CreateClusterInputBuilder {
126    /// <p>The name for the new SageMaker HyperPod cluster.</p>
127    /// This field is required.
128    pub fn cluster_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
129        self.cluster_name = ::std::option::Option::Some(input.into());
130        self
131    }
132    /// <p>The name for the new SageMaker HyperPod cluster.</p>
133    pub fn set_cluster_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
134        self.cluster_name = input;
135        self
136    }
137    /// <p>The name for the new SageMaker HyperPod cluster.</p>
138    pub fn get_cluster_name(&self) -> &::std::option::Option<::std::string::String> {
139        &self.cluster_name
140    }
141    /// Appends an item to `instance_groups`.
142    ///
143    /// To override the contents of this collection use [`set_instance_groups`](Self::set_instance_groups).
144    ///
145    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
146    pub fn instance_groups(mut self, input: crate::types::ClusterInstanceGroupSpecification) -> Self {
147        let mut v = self.instance_groups.unwrap_or_default();
148        v.push(input);
149        self.instance_groups = ::std::option::Option::Some(v);
150        self
151    }
152    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
153    pub fn set_instance_groups(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>) -> Self {
154        self.instance_groups = input;
155        self
156    }
157    /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
158    pub fn get_instance_groups(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>> {
159        &self.instance_groups
160    }
161    /// Appends an item to `restricted_instance_groups`.
162    ///
163    /// To override the contents of this collection use [`set_restricted_instance_groups`](Self::set_restricted_instance_groups).
164    ///
165    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
166    pub fn restricted_instance_groups(mut self, input: crate::types::ClusterRestrictedInstanceGroupSpecification) -> Self {
167        let mut v = self.restricted_instance_groups.unwrap_or_default();
168        v.push(input);
169        self.restricted_instance_groups = ::std::option::Option::Some(v);
170        self
171    }
172    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
173    pub fn set_restricted_instance_groups(
174        mut self,
175        input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
176    ) -> Self {
177        self.restricted_instance_groups = input;
178        self
179    }
180    /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
181    pub fn get_restricted_instance_groups(
182        &self,
183    ) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>> {
184        &self.restricted_instance_groups
185    }
186    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
187    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
188    /// <ul>
189    /// <li>
190    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
191    /// <li>
192    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
193    /// </ul>
194    /// <p>Additional resources for IPv6 configuration:</p>
195    /// <ul>
196    /// <li>
197    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
198    /// <li>
199    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
200    /// <li>
201    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
202    /// </ul>
203    /// </note>
204    pub fn vpc_config(mut self, input: crate::types::VpcConfig) -> Self {
205        self.vpc_config = ::std::option::Option::Some(input);
206        self
207    }
208    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
209    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
210    /// <ul>
211    /// <li>
212    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
213    /// <li>
214    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
215    /// </ul>
216    /// <p>Additional resources for IPv6 configuration:</p>
217    /// <ul>
218    /// <li>
219    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
220    /// <li>
221    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
222    /// <li>
223    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
224    /// </ul>
225    /// </note>
226    pub fn set_vpc_config(mut self, input: ::std::option::Option<crate::types::VpcConfig>) -> Self {
227        self.vpc_config = input;
228        self
229    }
230    /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
231    /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
232    /// <ul>
233    /// <li>
234    /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
235    /// <li>
236    /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
237    /// </ul>
238    /// <p>Additional resources for IPv6 configuration:</p>
239    /// <ul>
240    /// <li>
241    /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
242    /// <li>
243    /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
244    /// <li>
245    /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
246    /// </ul>
247    /// </note>
248    pub fn get_vpc_config(&self) -> &::std::option::Option<crate::types::VpcConfig> {
249        &self.vpc_config
250    }
251    /// Appends an item to `tags`.
252    ///
253    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
254    ///
255    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
256    pub fn tags(mut self, input: crate::types::Tag) -> Self {
257        let mut v = self.tags.unwrap_or_default();
258        v.push(input);
259        self.tags = ::std::option::Option::Some(v);
260        self
261    }
262    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
263    pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
264        self.tags = input;
265        self
266    }
267    /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
268    pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
269        &self.tags
270    }
271    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
272    pub fn orchestrator(mut self, input: crate::types::ClusterOrchestrator) -> Self {
273        self.orchestrator = ::std::option::Option::Some(input);
274        self
275    }
276    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
277    pub fn set_orchestrator(mut self, input: ::std::option::Option<crate::types::ClusterOrchestrator>) -> Self {
278        self.orchestrator = input;
279        self
280    }
281    /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
282    pub fn get_orchestrator(&self) -> &::std::option::Option<crate::types::ClusterOrchestrator> {
283        &self.orchestrator
284    }
285    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
286    pub fn node_recovery(mut self, input: crate::types::ClusterNodeRecovery) -> Self {
287        self.node_recovery = ::std::option::Option::Some(input);
288        self
289    }
290    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
291    pub fn set_node_recovery(mut self, input: ::std::option::Option<crate::types::ClusterNodeRecovery>) -> Self {
292        self.node_recovery = input;
293        self
294    }
295    /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
296    pub fn get_node_recovery(&self) -> &::std::option::Option<crate::types::ClusterNodeRecovery> {
297        &self.node_recovery
298    }
299    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
300    /// <ul>
301    /// <li>
302    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
303    /// </ul>
304    pub fn node_provisioning_mode(mut self, input: crate::types::ClusterNodeProvisioningMode) -> Self {
305        self.node_provisioning_mode = ::std::option::Option::Some(input);
306        self
307    }
308    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
309    /// <ul>
310    /// <li>
311    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
312    /// </ul>
313    pub fn set_node_provisioning_mode(mut self, input: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>) -> Self {
314        self.node_provisioning_mode = input;
315        self
316    }
317    /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
318    /// <ul>
319    /// <li>
320    /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
321    /// </ul>
322    pub fn get_node_provisioning_mode(&self) -> &::std::option::Option<crate::types::ClusterNodeProvisioningMode> {
323        &self.node_provisioning_mode
324    }
325    /// Consumes the builder and constructs a [`CreateClusterInput`](crate::operation::create_cluster::CreateClusterInput).
326    pub fn build(
327        self,
328    ) -> ::std::result::Result<crate::operation::create_cluster::CreateClusterInput, ::aws_smithy_types::error::operation::BuildError> {
329        ::std::result::Result::Ok(crate::operation::create_cluster::CreateClusterInput {
330            cluster_name: self.cluster_name,
331            instance_groups: self.instance_groups,
332            restricted_instance_groups: self.restricted_instance_groups,
333            vpc_config: self.vpc_config,
334            tags: self.tags,
335            orchestrator: self.orchestrator,
336            node_recovery: self.node_recovery,
337            node_provisioning_mode: self.node_provisioning_mode,
338        })
339    }
340}