aws_sdk_sagemaker/operation/create_inference_component/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_inference_component::_create_inference_component_output::CreateInferenceComponentOutputBuilder;
3
4pub use crate::operation::create_inference_component::_create_inference_component_input::CreateInferenceComponentInputBuilder;
5
6impl crate::operation::create_inference_component::builders::CreateInferenceComponentInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::create_inference_component::CreateInferenceComponentOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::create_inference_component::CreateInferenceComponentError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.create_inference_component();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `CreateInferenceComponent`.
24///
25/// <p>Creates an inference component, which is a SageMaker AI hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action.</p>
26#[derive(::std::clone::Clone, ::std::fmt::Debug)]
27pub struct CreateInferenceComponentFluentBuilder {
28    handle: ::std::sync::Arc<crate::client::Handle>,
29    inner: crate::operation::create_inference_component::builders::CreateInferenceComponentInputBuilder,
30    config_override: ::std::option::Option<crate::config::Builder>,
31}
32impl
33    crate::client::customize::internal::CustomizableSend<
34        crate::operation::create_inference_component::CreateInferenceComponentOutput,
35        crate::operation::create_inference_component::CreateInferenceComponentError,
36    > for CreateInferenceComponentFluentBuilder
37{
38    fn send(
39        self,
40        config_override: crate::config::Builder,
41    ) -> crate::client::customize::internal::BoxFuture<
42        crate::client::customize::internal::SendResult<
43            crate::operation::create_inference_component::CreateInferenceComponentOutput,
44            crate::operation::create_inference_component::CreateInferenceComponentError,
45        >,
46    > {
47        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
48    }
49}
50impl CreateInferenceComponentFluentBuilder {
51    /// Creates a new `CreateInferenceComponentFluentBuilder`.
52    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
53        Self {
54            handle,
55            inner: ::std::default::Default::default(),
56            config_override: ::std::option::Option::None,
57        }
58    }
59    /// Access the CreateInferenceComponent as a reference.
60    pub fn as_input(&self) -> &crate::operation::create_inference_component::builders::CreateInferenceComponentInputBuilder {
61        &self.inner
62    }
63    /// Sends the request and returns the response.
64    ///
65    /// If an error occurs, an `SdkError` will be returned with additional details that
66    /// can be matched against.
67    ///
68    /// By default, any retryable failures will be retried twice. Retry behavior
69    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
70    /// set when configuring the client.
71    pub async fn send(
72        self,
73    ) -> ::std::result::Result<
74        crate::operation::create_inference_component::CreateInferenceComponentOutput,
75        ::aws_smithy_runtime_api::client::result::SdkError<
76            crate::operation::create_inference_component::CreateInferenceComponentError,
77            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
78        >,
79    > {
80        let input = self
81            .inner
82            .build()
83            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
84        let runtime_plugins = crate::operation::create_inference_component::CreateInferenceComponent::operation_runtime_plugins(
85            self.handle.runtime_plugins.clone(),
86            &self.handle.conf,
87            self.config_override,
88        );
89        crate::operation::create_inference_component::CreateInferenceComponent::orchestrate(&runtime_plugins, input).await
90    }
91
92    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
93    pub fn customize(
94        self,
95    ) -> crate::client::customize::CustomizableOperation<
96        crate::operation::create_inference_component::CreateInferenceComponentOutput,
97        crate::operation::create_inference_component::CreateInferenceComponentError,
98        Self,
99    > {
100        crate::client::customize::CustomizableOperation::new(self)
101    }
102    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
103        self.set_config_override(::std::option::Option::Some(config_override.into()));
104        self
105    }
106
107    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
108        self.config_override = config_override;
109        self
110    }
111    /// <p>A unique name to assign to the inference component.</p>
112    pub fn inference_component_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
113        self.inner = self.inner.inference_component_name(input.into());
114        self
115    }
116    /// <p>A unique name to assign to the inference component.</p>
117    pub fn set_inference_component_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
118        self.inner = self.inner.set_inference_component_name(input);
119        self
120    }
121    /// <p>A unique name to assign to the inference component.</p>
122    pub fn get_inference_component_name(&self) -> &::std::option::Option<::std::string::String> {
123        self.inner.get_inference_component_name()
124    }
125    /// <p>The name of an existing endpoint where you host the inference component.</p>
126    pub fn endpoint_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
127        self.inner = self.inner.endpoint_name(input.into());
128        self
129    }
130    /// <p>The name of an existing endpoint where you host the inference component.</p>
131    pub fn set_endpoint_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
132        self.inner = self.inner.set_endpoint_name(input);
133        self
134    }
135    /// <p>The name of an existing endpoint where you host the inference component.</p>
136    pub fn get_endpoint_name(&self) -> &::std::option::Option<::std::string::String> {
137        self.inner.get_endpoint_name()
138    }
139    /// <p>The name of an existing production variant where you host the inference component.</p>
140    pub fn variant_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
141        self.inner = self.inner.variant_name(input.into());
142        self
143    }
144    /// <p>The name of an existing production variant where you host the inference component.</p>
145    pub fn set_variant_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
146        self.inner = self.inner.set_variant_name(input);
147        self
148    }
149    /// <p>The name of an existing production variant where you host the inference component.</p>
150    pub fn get_variant_name(&self) -> &::std::option::Option<::std::string::String> {
151        self.inner.get_variant_name()
152    }
153    /// <p>Details about the resources to deploy with this inference component, including the model, container, and compute resources.</p>
154    pub fn specification(mut self, input: crate::types::InferenceComponentSpecification) -> Self {
155        self.inner = self.inner.specification(input);
156        self
157    }
158    /// <p>Details about the resources to deploy with this inference component, including the model, container, and compute resources.</p>
159    pub fn set_specification(mut self, input: ::std::option::Option<crate::types::InferenceComponentSpecification>) -> Self {
160        self.inner = self.inner.set_specification(input);
161        self
162    }
163    /// <p>Details about the resources to deploy with this inference component, including the model, container, and compute resources.</p>
164    pub fn get_specification(&self) -> &::std::option::Option<crate::types::InferenceComponentSpecification> {
165        self.inner.get_specification()
166    }
167    /// <p>Runtime settings for a model that is deployed with an inference component.</p>
168    pub fn runtime_config(mut self, input: crate::types::InferenceComponentRuntimeConfig) -> Self {
169        self.inner = self.inner.runtime_config(input);
170        self
171    }
172    /// <p>Runtime settings for a model that is deployed with an inference component.</p>
173    pub fn set_runtime_config(mut self, input: ::std::option::Option<crate::types::InferenceComponentRuntimeConfig>) -> Self {
174        self.inner = self.inner.set_runtime_config(input);
175        self
176    }
177    /// <p>Runtime settings for a model that is deployed with an inference component.</p>
178    pub fn get_runtime_config(&self) -> &::std::option::Option<crate::types::InferenceComponentRuntimeConfig> {
179        self.inner.get_runtime_config()
180    }
181    ///
182    /// Appends an item to `Tags`.
183    ///
184    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
185    ///
186    /// <p>A list of key-value pairs associated with the model. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon Web Services General Reference</i>.</p>
187    pub fn tags(mut self, input: crate::types::Tag) -> Self {
188        self.inner = self.inner.tags(input);
189        self
190    }
191    /// <p>A list of key-value pairs associated with the model. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon Web Services General Reference</i>.</p>
192    pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
193        self.inner = self.inner.set_tags(input);
194        self
195    }
196    /// <p>A list of key-value pairs associated with the model. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon Web Services General Reference</i>.</p>
197    pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
198        self.inner.get_tags()
199    }
200}