aws_sdk_sagemakerruntime/operation/invoke_endpoint_with_response_stream/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::invoke_endpoint_with_response_stream::_invoke_endpoint_with_response_stream_output::InvokeEndpointWithResponseStreamOutputBuilder;
3
4pub use crate::operation::invoke_endpoint_with_response_stream::_invoke_endpoint_with_response_stream_input::InvokeEndpointWithResponseStreamInputBuilder;
5
6impl crate::operation::invoke_endpoint_with_response_stream::builders::InvokeEndpointWithResponseStreamInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.invoke_endpoint_with_response_stream();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `InvokeEndpointWithResponseStream`.
24///
25/// <p>Invokes a model at the specified endpoint to return the inference response as a stream. The inference stream provides the response payload incrementally as a series of parts. Before you can get an inference stream, you must have access to a model that's deployed using Amazon SageMaker hosting services, and the container for that model must support inference streaming.</p>
26/// <p>For more information that can help you use this API, see the following sections in the <i>Amazon SageMaker Developer Guide</i>:</p>
27/// <ul>
28/// <li>
29/// <p>For information about how to add streaming support to a model, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-code-how-containe-serves-requests">How Containers Serve Requests</a>.</p></li>
30/// <li>
31/// <p>For information about how to process the streaming response, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-test-endpoints.html">Invoke real-time endpoints</a>.</p></li>
32/// </ul>
33/// <p>Before you can use this operation, your IAM permissions must allow the <code>sagemaker:InvokeEndpoint</code> action. For more information about Amazon SageMaker actions for IAM policies, see <a href="https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsagemaker.html">Actions, resources, and condition keys for Amazon SageMaker</a> in the <i>IAM Service Authorization Reference</i>.</p>
34/// <p>Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.</p>
35/// <p>Calls to <code>InvokeEndpointWithResponseStream</code> are authenticated by using Amazon Web Services Signature Version 4. For information, see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">Authenticating Requests (Amazon Web Services Signature Version 4)</a> in the <i>Amazon S3 API Reference</i>.</p>
36#[derive(::std::clone::Clone, ::std::fmt::Debug)]
37pub struct InvokeEndpointWithResponseStreamFluentBuilder {
38    handle: ::std::sync::Arc<crate::client::Handle>,
39    inner: crate::operation::invoke_endpoint_with_response_stream::builders::InvokeEndpointWithResponseStreamInputBuilder,
40    config_override: ::std::option::Option<crate::config::Builder>,
41}
42impl
43    crate::client::customize::internal::CustomizableSend<
44        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
45        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
46    > for InvokeEndpointWithResponseStreamFluentBuilder
47{
48    fn send(
49        self,
50        config_override: crate::config::Builder,
51    ) -> crate::client::customize::internal::BoxFuture<
52        crate::client::customize::internal::SendResult<
53            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
54            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
55        >,
56    > {
57        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
58    }
59}
60impl InvokeEndpointWithResponseStreamFluentBuilder {
61    /// Creates a new `InvokeEndpointWithResponseStreamFluentBuilder`.
62    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
63        Self {
64            handle,
65            inner: ::std::default::Default::default(),
66            config_override: ::std::option::Option::None,
67        }
68    }
69    /// Access the InvokeEndpointWithResponseStream as a reference.
70    pub fn as_input(&self) -> &crate::operation::invoke_endpoint_with_response_stream::builders::InvokeEndpointWithResponseStreamInputBuilder {
71        &self.inner
72    }
73    /// Sends the request and returns the response.
74    ///
75    /// If an error occurs, an `SdkError` will be returned with additional details that
76    /// can be matched against.
77    ///
78    /// By default, any retryable failures will be retried twice. Retry behavior
79    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
80    /// set when configuring the client.
81    pub async fn send(
82        self,
83    ) -> ::std::result::Result<
84        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
85        ::aws_smithy_runtime_api::client::result::SdkError<
86            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
87            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
88        >,
89    > {
90        let input = self
91            .inner
92            .build()
93            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
94        let runtime_plugins = crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStream::operation_runtime_plugins(
95            self.handle.runtime_plugins.clone(),
96            &self.handle.conf,
97            self.config_override,
98        );
99        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStream::orchestrate(&runtime_plugins, input).await
100    }
101
102    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
103    pub fn customize(
104        self,
105    ) -> crate::client::customize::CustomizableOperation<
106        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
107        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
108        Self,
109    > {
110        crate::client::customize::CustomizableOperation::new(self)
111    }
112    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
113        self.set_config_override(::std::option::Option::Some(config_override.into()));
114        self
115    }
116
117    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
118        self.config_override = config_override;
119        self
120    }
121    /// <p>The name of the endpoint that you specified when you created the endpoint using the <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html">CreateEndpoint</a> API.</p>
122    pub fn endpoint_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
123        self.inner = self.inner.endpoint_name(input.into());
124        self
125    }
126    /// <p>The name of the endpoint that you specified when you created the endpoint using the <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html">CreateEndpoint</a> API.</p>
127    pub fn set_endpoint_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
128        self.inner = self.inner.set_endpoint_name(input);
129        self
130    }
131    /// <p>The name of the endpoint that you specified when you created the endpoint using the <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html">CreateEndpoint</a> API.</p>
132    pub fn get_endpoint_name(&self) -> &::std::option::Option<::std::string::String> {
133        self.inner.get_endpoint_name()
134    }
135    /// <p>Provides input data, in the format specified in the <code>ContentType</code> request header. Amazon SageMaker passes all of the data in the body to the model.</p>
136    /// <p>For information about the format of the request body, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html">Common Data Formats-Inference</a>.</p>
137    pub fn body(mut self, input: ::aws_smithy_types::Blob) -> Self {
138        self.inner = self.inner.body(input);
139        self
140    }
141    /// <p>Provides input data, in the format specified in the <code>ContentType</code> request header. Amazon SageMaker passes all of the data in the body to the model.</p>
142    /// <p>For information about the format of the request body, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html">Common Data Formats-Inference</a>.</p>
143    pub fn set_body(mut self, input: ::std::option::Option<::aws_smithy_types::Blob>) -> Self {
144        self.inner = self.inner.set_body(input);
145        self
146    }
147    /// <p>Provides input data, in the format specified in the <code>ContentType</code> request header. Amazon SageMaker passes all of the data in the body to the model.</p>
148    /// <p>For information about the format of the request body, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html">Common Data Formats-Inference</a>.</p>
149    pub fn get_body(&self) -> &::std::option::Option<::aws_smithy_types::Blob> {
150        self.inner.get_body()
151    }
152    /// <p>The MIME type of the input data in the request body.</p>
153    pub fn content_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
154        self.inner = self.inner.content_type(input.into());
155        self
156    }
157    /// <p>The MIME type of the input data in the request body.</p>
158    pub fn set_content_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
159        self.inner = self.inner.set_content_type(input);
160        self
161    }
162    /// <p>The MIME type of the input data in the request body.</p>
163    pub fn get_content_type(&self) -> &::std::option::Option<::std::string::String> {
164        self.inner.get_content_type()
165    }
166    /// <p>The desired MIME type of the inference response from the model container.</p>
167    pub fn accept(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
168        self.inner = self.inner.accept(input.into());
169        self
170    }
171    /// <p>The desired MIME type of the inference response from the model container.</p>
172    pub fn set_accept(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
173        self.inner = self.inner.set_accept(input);
174        self
175    }
176    /// <p>The desired MIME type of the inference response from the model container.</p>
177    pub fn get_accept(&self) -> &::std::option::Option<::std::string::String> {
178        self.inner.get_accept()
179    }
180    /// <p>Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in <a href="https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6">Section 3.3.6. Field Value Components</a> of the Hypertext Transfer Protocol (HTTP/1.1).</p>
181    /// <p>The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with <code>Trace ID:</code> in your post-processing function.</p>
182    /// <p>This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.</p>
183    pub fn custom_attributes(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
184        self.inner = self.inner.custom_attributes(input.into());
185        self
186    }
187    /// <p>Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in <a href="https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6">Section 3.3.6. Field Value Components</a> of the Hypertext Transfer Protocol (HTTP/1.1).</p>
188    /// <p>The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with <code>Trace ID:</code> in your post-processing function.</p>
189    /// <p>This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.</p>
190    pub fn set_custom_attributes(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
191        self.inner = self.inner.set_custom_attributes(input);
192        self
193    }
194    /// <p>Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in <a href="https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6">Section 3.3.6. Field Value Components</a> of the Hypertext Transfer Protocol (HTTP/1.1).</p>
195    /// <p>The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with <code>Trace ID:</code> in your post-processing function.</p>
196    /// <p>This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.</p>
197    pub fn get_custom_attributes(&self) -> &::std::option::Option<::std::string::String> {
198        self.inner.get_custom_attributes()
199    }
200    /// <p>Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.</p>
201    /// <p>For information about how to use variant targeting to perform a/b testing, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html">Test models in production</a></p>
202    pub fn target_variant(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
203        self.inner = self.inner.target_variant(input.into());
204        self
205    }
206    /// <p>Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.</p>
207    /// <p>For information about how to use variant targeting to perform a/b testing, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html">Test models in production</a></p>
208    pub fn set_target_variant(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
209        self.inner = self.inner.set_target_variant(input);
210        self
211    }
212    /// <p>Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.</p>
213    /// <p>For information about how to use variant targeting to perform a/b testing, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html">Test models in production</a></p>
214    pub fn get_target_variant(&self) -> &::std::option::Option<::std::string::String> {
215        self.inner.get_target_variant()
216    }
217    /// <p>If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.</p>
218    pub fn target_container_hostname(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
219        self.inner = self.inner.target_container_hostname(input.into());
220        self
221    }
222    /// <p>If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.</p>
223    pub fn set_target_container_hostname(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
224        self.inner = self.inner.set_target_container_hostname(input);
225        self
226    }
227    /// <p>If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.</p>
228    pub fn get_target_container_hostname(&self) -> &::std::option::Option<::std::string::String> {
229        self.inner.get_target_container_hostname()
230    }
231    /// <p>An identifier that you assign to your request.</p>
232    pub fn inference_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
233        self.inner = self.inner.inference_id(input.into());
234        self
235    }
236    /// <p>An identifier that you assign to your request.</p>
237    pub fn set_inference_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
238        self.inner = self.inner.set_inference_id(input);
239        self
240    }
241    /// <p>An identifier that you assign to your request.</p>
242    pub fn get_inference_id(&self) -> &::std::option::Option<::std::string::String> {
243        self.inner.get_inference_id()
244    }
245    /// <p>If the endpoint hosts one or more inference components, this parameter specifies the name of inference component to invoke for a streaming response.</p>
246    pub fn inference_component_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
247        self.inner = self.inner.inference_component_name(input.into());
248        self
249    }
250    /// <p>If the endpoint hosts one or more inference components, this parameter specifies the name of inference component to invoke for a streaming response.</p>
251    pub fn set_inference_component_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
252        self.inner = self.inner.set_inference_component_name(input);
253        self
254    }
255    /// <p>If the endpoint hosts one or more inference components, this parameter specifies the name of inference component to invoke for a streaming response.</p>
256    pub fn get_inference_component_name(&self) -> &::std::option::Option<::std::string::String> {
257        self.inner.get_inference_component_name()
258    }
259    /// <p>The ID of a stateful session to handle your request.</p>
260    /// <p>You can't create a stateful session by using the <code>InvokeEndpointWithResponseStream</code> action. Instead, you can create one by using the <code> <code>InvokeEndpoint</code> </code> action. In your request, you specify <code>NEW_SESSION</code> for the <code>SessionId</code> request parameter. The response to that request provides the session ID for the <code>NewSessionId</code> response parameter.</p>
261    pub fn session_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
262        self.inner = self.inner.session_id(input.into());
263        self
264    }
265    /// <p>The ID of a stateful session to handle your request.</p>
266    /// <p>You can't create a stateful session by using the <code>InvokeEndpointWithResponseStream</code> action. Instead, you can create one by using the <code> <code>InvokeEndpoint</code> </code> action. In your request, you specify <code>NEW_SESSION</code> for the <code>SessionId</code> request parameter. The response to that request provides the session ID for the <code>NewSessionId</code> response parameter.</p>
267    pub fn set_session_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
268        self.inner = self.inner.set_session_id(input);
269        self
270    }
271    /// <p>The ID of a stateful session to handle your request.</p>
272    /// <p>You can't create a stateful session by using the <code>InvokeEndpointWithResponseStream</code> action. Instead, you can create one by using the <code> <code>InvokeEndpoint</code> </code> action. In your request, you specify <code>NEW_SESSION</code> for the <code>SessionId</code> request parameter. The response to that request provides the session ID for the <code>NewSessionId</code> response parameter.</p>
273    pub fn get_session_id(&self) -> &::std::option::Option<::std::string::String> {
274        self.inner.get_session_id()
275    }
276}