aws_sdk_sagemaker/operation/create_cluster/builders.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_cluster::_create_cluster_output::CreateClusterOutputBuilder;
3
4pub use crate::operation::create_cluster::_create_cluster_input::CreateClusterInputBuilder;
5
6impl crate::operation::create_cluster::builders::CreateClusterInputBuilder {
7 /// Sends a request with this input using the given client.
8 pub async fn send_with(
9 self,
10 client: &crate::Client,
11 ) -> ::std::result::Result<
12 crate::operation::create_cluster::CreateClusterOutput,
13 ::aws_smithy_runtime_api::client::result::SdkError<
14 crate::operation::create_cluster::CreateClusterError,
15 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16 >,
17 > {
18 let mut fluent_builder = client.create_cluster();
19 fluent_builder.inner = self;
20 fluent_builder.send().await
21 }
22}
23/// Fluent builder constructing a request to `CreateCluster`.
24///
25/// <p>Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing persistent clusters for developing large machine learning models, such as large language models (LLMs) and diffusion models. To learn more, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod.html">Amazon SageMaker HyperPod</a> in the <i>Amazon SageMaker Developer Guide</i>.</p>
26#[derive(::std::clone::Clone, ::std::fmt::Debug)]
27pub struct CreateClusterFluentBuilder {
28 handle: ::std::sync::Arc<crate::client::Handle>,
29 inner: crate::operation::create_cluster::builders::CreateClusterInputBuilder,
30 config_override: ::std::option::Option<crate::config::Builder>,
31}
32impl
33 crate::client::customize::internal::CustomizableSend<
34 crate::operation::create_cluster::CreateClusterOutput,
35 crate::operation::create_cluster::CreateClusterError,
36 > for CreateClusterFluentBuilder
37{
38 fn send(
39 self,
40 config_override: crate::config::Builder,
41 ) -> crate::client::customize::internal::BoxFuture<
42 crate::client::customize::internal::SendResult<
43 crate::operation::create_cluster::CreateClusterOutput,
44 crate::operation::create_cluster::CreateClusterError,
45 >,
46 > {
47 ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
48 }
49}
50impl CreateClusterFluentBuilder {
51 /// Creates a new `CreateClusterFluentBuilder`.
52 pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
53 Self {
54 handle,
55 inner: ::std::default::Default::default(),
56 config_override: ::std::option::Option::None,
57 }
58 }
59 /// Access the CreateCluster as a reference.
60 pub fn as_input(&self) -> &crate::operation::create_cluster::builders::CreateClusterInputBuilder {
61 &self.inner
62 }
63 /// Sends the request and returns the response.
64 ///
65 /// If an error occurs, an `SdkError` will be returned with additional details that
66 /// can be matched against.
67 ///
68 /// By default, any retryable failures will be retried twice. Retry behavior
69 /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
70 /// set when configuring the client.
71 pub async fn send(
72 self,
73 ) -> ::std::result::Result<
74 crate::operation::create_cluster::CreateClusterOutput,
75 ::aws_smithy_runtime_api::client::result::SdkError<
76 crate::operation::create_cluster::CreateClusterError,
77 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
78 >,
79 > {
80 let input = self
81 .inner
82 .build()
83 .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
84 let runtime_plugins = crate::operation::create_cluster::CreateCluster::operation_runtime_plugins(
85 self.handle.runtime_plugins.clone(),
86 &self.handle.conf,
87 self.config_override,
88 );
89 crate::operation::create_cluster::CreateCluster::orchestrate(&runtime_plugins, input).await
90 }
91
92 /// Consumes this builder, creating a customizable operation that can be modified before being sent.
93 pub fn customize(
94 self,
95 ) -> crate::client::customize::CustomizableOperation<
96 crate::operation::create_cluster::CreateClusterOutput,
97 crate::operation::create_cluster::CreateClusterError,
98 Self,
99 > {
100 crate::client::customize::CustomizableOperation::new(self)
101 }
102 pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
103 self.set_config_override(::std::option::Option::Some(config_override.into()));
104 self
105 }
106
107 pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
108 self.config_override = config_override;
109 self
110 }
111 /// <p>The name for the new SageMaker HyperPod cluster.</p>
112 pub fn cluster_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
113 self.inner = self.inner.cluster_name(input.into());
114 self
115 }
116 /// <p>The name for the new SageMaker HyperPod cluster.</p>
117 pub fn set_cluster_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
118 self.inner = self.inner.set_cluster_name(input);
119 self
120 }
121 /// <p>The name for the new SageMaker HyperPod cluster.</p>
122 pub fn get_cluster_name(&self) -> &::std::option::Option<::std::string::String> {
123 self.inner.get_cluster_name()
124 }
125 ///
126 /// Appends an item to `InstanceGroups`.
127 ///
128 /// To override the contents of this collection use [`set_instance_groups`](Self::set_instance_groups).
129 ///
130 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
131 pub fn instance_groups(mut self, input: crate::types::ClusterInstanceGroupSpecification) -> Self {
132 self.inner = self.inner.instance_groups(input);
133 self
134 }
135 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
136 pub fn set_instance_groups(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>>) -> Self {
137 self.inner = self.inner.set_instance_groups(input);
138 self
139 }
140 /// <p>The instance groups to be created in the SageMaker HyperPod cluster.</p>
141 pub fn get_instance_groups(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterInstanceGroupSpecification>> {
142 self.inner.get_instance_groups()
143 }
144 ///
145 /// Appends an item to `RestrictedInstanceGroups`.
146 ///
147 /// To override the contents of this collection use [`set_restricted_instance_groups`](Self::set_restricted_instance_groups).
148 ///
149 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
150 pub fn restricted_instance_groups(mut self, input: crate::types::ClusterRestrictedInstanceGroupSpecification) -> Self {
151 self.inner = self.inner.restricted_instance_groups(input);
152 self
153 }
154 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
155 pub fn set_restricted_instance_groups(
156 mut self,
157 input: ::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>>,
158 ) -> Self {
159 self.inner = self.inner.set_restricted_instance_groups(input);
160 self
161 }
162 /// <p>The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.</p>
163 pub fn get_restricted_instance_groups(
164 &self,
165 ) -> &::std::option::Option<::std::vec::Vec<crate::types::ClusterRestrictedInstanceGroupSpecification>> {
166 self.inner.get_restricted_instance_groups()
167 }
168 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
169 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
170 /// <ul>
171 /// <li>
172 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
173 /// <li>
174 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
175 /// </ul>
176 /// <p>Additional resources for IPv6 configuration:</p>
177 /// <ul>
178 /// <li>
179 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
180 /// <li>
181 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
182 /// <li>
183 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
184 /// </ul>
185 /// </note>
186 pub fn vpc_config(mut self, input: crate::types::VpcConfig) -> Self {
187 self.inner = self.inner.vpc_config(input);
188 self
189 }
190 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
191 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
192 /// <ul>
193 /// <li>
194 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
195 /// <li>
196 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
197 /// </ul>
198 /// <p>Additional resources for IPv6 configuration:</p>
199 /// <ul>
200 /// <li>
201 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
202 /// <li>
203 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
204 /// <li>
205 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
206 /// </ul>
207 /// </note>
208 pub fn set_vpc_config(mut self, input: ::std::option::Option<crate::types::VpcConfig>) -> Self {
209 self.inner = self.inner.set_vpc_config(input);
210 self
211 }
212 /// <p>Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html">Give SageMaker access to resources in your Amazon VPC</a>.</p><note>
213 /// <p>When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform:</p>
214 /// <ul>
215 /// <li>
216 /// <p>Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications.</p></li>
217 /// <li>
218 /// <p>In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see <a href="https://docs.aws.amazon.com/eks/latest/userguide/deploy-ipv6-cluster.html#_deploy_an_ipv6_cluster_with_eksctl">Amazon EKS IPv6 Cluster Deployment</a>.</p></li>
219 /// </ul>
220 /// <p>Additional resources for IPv6 configuration:</p>
221 /// <ul>
222 /// <li>
223 /// <p>For information about adding IPv6 support to your VPC, see to <a href="https://docs.aws.amazon.com/vpc/latest/userguide/vpc-migrate-ipv6.html">IPv6 Support for VPC</a>.</p></li>
224 /// <li>
225 /// <p>For information about creating a new IPv6-compatible VPC, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/create-vpc.html">Amazon VPC Creation Guide</a>.</p></li>
226 /// <li>
227 /// <p>To configure SageMaker HyperPod with a custom Amazon VPC, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-optional-vpc">Custom Amazon VPC Setup for SageMaker HyperPod</a>.</p></li>
228 /// </ul>
229 /// </note>
230 pub fn get_vpc_config(&self) -> &::std::option::Option<crate::types::VpcConfig> {
231 self.inner.get_vpc_config()
232 }
233 ///
234 /// Appends an item to `Tags`.
235 ///
236 /// To override the contents of this collection use [`set_tags`](Self::set_tags).
237 ///
238 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
239 pub fn tags(mut self, input: crate::types::Tag) -> Self {
240 self.inner = self.inner.tags(input);
241 self
242 }
243 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
244 pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
245 self.inner = self.inner.set_tags(input);
246 self
247 }
248 /// <p>Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see <a href="https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html">Tagging Amazon Web Services Resources User Guide</a>.</p>
249 pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
250 self.inner.get_tags()
251 }
252 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
253 pub fn orchestrator(mut self, input: crate::types::ClusterOrchestrator) -> Self {
254 self.inner = self.inner.orchestrator(input);
255 self
256 }
257 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
258 pub fn set_orchestrator(mut self, input: ::std::option::Option<crate::types::ClusterOrchestrator>) -> Self {
259 self.inner = self.inner.set_orchestrator(input);
260 self
261 }
262 /// <p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>"eks"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>
263 pub fn get_orchestrator(&self) -> &::std::option::Option<crate::types::ClusterOrchestrator> {
264 self.inner.get_orchestrator()
265 }
266 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
267 pub fn node_recovery(mut self, input: crate::types::ClusterNodeRecovery) -> Self {
268 self.inner = self.inner.node_recovery(input);
269 self
270 }
271 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
272 pub fn set_node_recovery(mut self, input: ::std::option::Option<crate::types::ClusterNodeRecovery>) -> Self {
273 self.inner = self.inner.set_node_recovery(input);
274 self
275 }
276 /// <p>The node recovery mode for the SageMaker HyperPod cluster. When set to <code>Automatic</code>, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to <code>None</code>, cluster administrators will need to manually manage any faulty cluster instances.</p>
277 pub fn get_node_recovery(&self) -> &::std::option::Option<crate::types::ClusterNodeRecovery> {
278 self.inner.get_node_recovery()
279 }
280 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
281 /// <ul>
282 /// <li>
283 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
284 /// </ul>
285 pub fn node_provisioning_mode(mut self, input: crate::types::ClusterNodeProvisioningMode) -> Self {
286 self.inner = self.inner.node_provisioning_mode(input);
287 self
288 }
289 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
290 /// <ul>
291 /// <li>
292 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
293 /// </ul>
294 pub fn set_node_provisioning_mode(mut self, input: ::std::option::Option<crate::types::ClusterNodeProvisioningMode>) -> Self {
295 self.inner = self.inner.set_node_provisioning_mode(input);
296 self
297 }
298 /// <p>The mode for provisioning nodes in the cluster. You can specify the following modes:</p>
299 /// <ul>
300 /// <li>
301 /// <p><b>Continuous</b>: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain <code>InService</code> while scaling. This mode is only supported for EKS orchestrated clusters.</p></li>
302 /// </ul>
303 pub fn get_node_provisioning_mode(&self) -> &::std::option::Option<crate::types::ClusterNodeProvisioningMode> {
304 self.inner.get_node_provisioning_mode()
305 }
306 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
307 pub fn cluster_role(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
308 self.inner = self.inner.cluster_role(input.into());
309 self
310 }
311 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
312 pub fn set_cluster_role(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
313 self.inner = self.inner.set_cluster_role(input);
314 self
315 }
316 /// <p>The Amazon Resource Name (ARN) of the IAM role that HyperPod assumes to perform cluster autoscaling operations. This role must have permissions for <code>sagemaker:BatchAddClusterNodes</code> and <code>sagemaker:BatchDeleteClusterNodes</code>. This is only required when autoscaling is enabled and when HyperPod is performing autoscaling operations.</p>
317 pub fn get_cluster_role(&self) -> &::std::option::Option<::std::string::String> {
318 self.inner.get_cluster_role()
319 }
320 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
321 pub fn auto_scaling(mut self, input: crate::types::ClusterAutoScalingConfig) -> Self {
322 self.inner = self.inner.auto_scaling(input);
323 self
324 }
325 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
326 pub fn set_auto_scaling(mut self, input: ::std::option::Option<crate::types::ClusterAutoScalingConfig>) -> Self {
327 self.inner = self.inner.set_auto_scaling(input);
328 self
329 }
330 /// <p>The autoscaling configuration for the cluster. Enables automatic scaling of cluster nodes based on workload demand using a Karpenter-based system.</p>
331 pub fn get_auto_scaling(&self) -> &::std::option::Option<crate::types::ClusterAutoScalingConfig> {
332 self.inner.get_auto_scaling()
333 }
334}