aws_sdk_sagemakerruntime/operation/invoke_endpoint_with_response_stream/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::invoke_endpoint_with_response_stream::_invoke_endpoint_with_response_stream_output::InvokeEndpointWithResponseStreamOutputBuilder;
3
4pub use crate::operation::invoke_endpoint_with_response_stream::_invoke_endpoint_with_response_stream_input::InvokeEndpointWithResponseStreamInputBuilder;
5
6impl crate::operation::invoke_endpoint_with_response_stream::builders::InvokeEndpointWithResponseStreamInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.invoke_endpoint_with_response_stream();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `InvokeEndpointWithResponseStream`.
24///
25/// <p>Invokes a model at the specified endpoint to return the inference response as a stream. The inference stream provides the response payload incrementally as a series of parts. Before you can get an inference stream, you must have access to a model that's deployed using Amazon SageMaker AI hosting services, and the container for that model must support inference streaming.</p>
26/// <p>For more information that can help you use this API, see the following sections in the <i>Amazon SageMaker AI Developer Guide</i>:</p>
27/// <ul>
28/// <li>
29/// <p>For information about how to add streaming support to a model, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-code-how-containe-serves-requests">How Containers Serve Requests</a>.</p></li>
30/// <li>
31/// <p>For information about how to process the streaming response, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-test-endpoints.html">Invoke real-time endpoints</a>.</p></li>
32/// </ul>
33/// <p>Before you can use this operation, your IAM permissions must allow the <code>sagemaker:InvokeEndpoint</code> action. For more information about Amazon SageMaker AI actions for IAM policies, see <a href="https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsagemaker.html">Actions, resources, and condition keys for Amazon SageMaker AI</a> in the <i>IAM Service Authorization Reference</i>.</p>
34/// <p>Amazon SageMaker AI strips all POST headers except those supported by the API. Amazon SageMaker AI might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.</p>
35/// <p>Calls to <code>InvokeEndpointWithResponseStream</code> are authenticated by using Amazon Web Services Signature Version 4. For information, see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">Authenticating Requests (Amazon Web Services Signature Version 4)</a> in the <i>Amazon S3 API Reference</i>.</p>
36#[derive(::std::clone::Clone, ::std::fmt::Debug)]
37pub struct InvokeEndpointWithResponseStreamFluentBuilder {
38    handle: ::std::sync::Arc<crate::client::Handle>,
39    inner: crate::operation::invoke_endpoint_with_response_stream::builders::InvokeEndpointWithResponseStreamInputBuilder,
40    config_override: ::std::option::Option<crate::config::Builder>,
41}
42impl
43    crate::client::customize::internal::CustomizableSend<
44        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
45        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
46    > for InvokeEndpointWithResponseStreamFluentBuilder
47{
48    fn send(
49        self,
50        config_override: crate::config::Builder,
51    ) -> crate::client::customize::internal::BoxFuture<
52        crate::client::customize::internal::SendResult<
53            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
54            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
55        >,
56    > {
57        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
58    }
59}
60impl InvokeEndpointWithResponseStreamFluentBuilder {
61    /// Creates a new `InvokeEndpointWithResponseStreamFluentBuilder`.
62    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
63        Self {
64            handle,
65            inner: ::std::default::Default::default(),
66            config_override: ::std::option::Option::None,
67        }
68    }
69    /// Access the InvokeEndpointWithResponseStream as a reference.
70    pub fn as_input(&self) -> &crate::operation::invoke_endpoint_with_response_stream::builders::InvokeEndpointWithResponseStreamInputBuilder {
71        &self.inner
72    }
73    /// Sends the request and returns the response.
74    ///
75    /// If an error occurs, an `SdkError` will be returned with additional details that
76    /// can be matched against.
77    ///
78    /// By default, any retryable failures will be retried twice. Retry behavior
79    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
80    /// set when configuring the client.
81    pub async fn send(
82        self,
83    ) -> ::std::result::Result<
84        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
85        ::aws_smithy_runtime_api::client::result::SdkError<
86            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
87            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
88        >,
89    > {
90        let input = self
91            .inner
92            .build()
93            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
94        let runtime_plugins = crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStream::operation_runtime_plugins(
95            self.handle.runtime_plugins.clone(),
96            &self.handle.conf,
97            self.config_override,
98        );
99        let mut output =
100            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStream::orchestrate(&runtime_plugins, input).await?;
101
102        // Converts any error encountered beyond this point into an `SdkError` response error
103        // with an `HttpResponse`. However, since we have already exited the `orchestrate`
104        // function, the original `HttpResponse` is no longer available and cannot be restored.
105        // This means that header information from the original response has been lost.
106        //
107        // Note that the response body would have been consumed by the deserializer
108        // regardless, even if the initial message was hypothetically processed during
109        // the orchestrator's deserialization phase but later resulted in an error.
110        fn response_error(
111            err: impl ::std::convert::Into<::aws_smithy_runtime_api::box_error::BoxError>,
112        ) -> ::aws_smithy_runtime_api::client::result::SdkError<
113            crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
114            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
115        > {
116            ::aws_smithy_runtime_api::client::result::SdkError::response_error(
117                err,
118                ::aws_smithy_runtime_api::client::orchestrator::HttpResponse::new(
119                    ::aws_smithy_runtime_api::http::StatusCode::try_from(200).expect("valid successful code"),
120                    ::aws_smithy_types::body::SdkBody::empty(),
121                ),
122            )
123        }
124
125        let message = output.body.try_recv_initial_response().await.map_err(response_error)?;
126
127        match message {
128            ::std::option::Option::Some(_message) => ::std::result::Result::Ok(output),
129            ::std::option::Option::None => ::std::result::Result::Ok(output),
130        }
131    }
132
133    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
134    pub fn customize(
135        self,
136    ) -> crate::client::customize::CustomizableOperation<
137        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamOutput,
138        crate::operation::invoke_endpoint_with_response_stream::InvokeEndpointWithResponseStreamError,
139        Self,
140    > {
141        crate::client::customize::CustomizableOperation::new(self)
142    }
143    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
144        self.set_config_override(::std::option::Option::Some(config_override.into()));
145        self
146    }
147
148    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
149        self.config_override = config_override;
150        self
151    }
152    /// <p>The name of the endpoint that you specified when you created the endpoint using the <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html">CreateEndpoint</a> API.</p>
153    pub fn endpoint_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
154        self.inner = self.inner.endpoint_name(input.into());
155        self
156    }
157    /// <p>The name of the endpoint that you specified when you created the endpoint using the <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html">CreateEndpoint</a> API.</p>
158    pub fn set_endpoint_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
159        self.inner = self.inner.set_endpoint_name(input);
160        self
161    }
162    /// <p>The name of the endpoint that you specified when you created the endpoint using the <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html">CreateEndpoint</a> API.</p>
163    pub fn get_endpoint_name(&self) -> &::std::option::Option<::std::string::String> {
164        self.inner.get_endpoint_name()
165    }
166    /// <p>Provides input data, in the format specified in the <code>ContentType</code> request header. Amazon SageMaker AI passes all of the data in the body to the model.</p>
167    /// <p>For information about the format of the request body, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html">Common Data Formats-Inference</a>.</p>
168    pub fn body(mut self, input: ::aws_smithy_types::Blob) -> Self {
169        self.inner = self.inner.body(input);
170        self
171    }
172    /// <p>Provides input data, in the format specified in the <code>ContentType</code> request header. Amazon SageMaker AI passes all of the data in the body to the model.</p>
173    /// <p>For information about the format of the request body, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html">Common Data Formats-Inference</a>.</p>
174    pub fn set_body(mut self, input: ::std::option::Option<::aws_smithy_types::Blob>) -> Self {
175        self.inner = self.inner.set_body(input);
176        self
177    }
178    /// <p>Provides input data, in the format specified in the <code>ContentType</code> request header. Amazon SageMaker AI passes all of the data in the body to the model.</p>
179    /// <p>For information about the format of the request body, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html">Common Data Formats-Inference</a>.</p>
180    pub fn get_body(&self) -> &::std::option::Option<::aws_smithy_types::Blob> {
181        self.inner.get_body()
182    }
183    /// <p>The MIME type of the input data in the request body.</p>
184    pub fn content_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
185        self.inner = self.inner.content_type(input.into());
186        self
187    }
188    /// <p>The MIME type of the input data in the request body.</p>
189    pub fn set_content_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
190        self.inner = self.inner.set_content_type(input);
191        self
192    }
193    /// <p>The MIME type of the input data in the request body.</p>
194    pub fn get_content_type(&self) -> &::std::option::Option<::std::string::String> {
195        self.inner.get_content_type()
196    }
197    /// <p>The desired MIME type of the inference response from the model container.</p>
198    pub fn accept(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
199        self.inner = self.inner.accept(input.into());
200        self
201    }
202    /// <p>The desired MIME type of the inference response from the model container.</p>
203    pub fn set_accept(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
204        self.inner = self.inner.set_accept(input);
205        self
206    }
207    /// <p>The desired MIME type of the inference response from the model container.</p>
208    pub fn get_accept(&self) -> &::std::option::Option<::std::string::String> {
209        self.inner.get_accept()
210    }
211    /// <p>Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker AI endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in <a href="https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6">Section 3.3.6. Field Value Components</a> of the Hypertext Transfer Protocol (HTTP/1.1).</p>
212    /// <p>The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with <code>Trace ID:</code> in your post-processing function.</p>
213    /// <p>This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker AI Python SDK.</p>
214    pub fn custom_attributes(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
215        self.inner = self.inner.custom_attributes(input.into());
216        self
217    }
218    /// <p>Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker AI endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in <a href="https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6">Section 3.3.6. Field Value Components</a> of the Hypertext Transfer Protocol (HTTP/1.1).</p>
219    /// <p>The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with <code>Trace ID:</code> in your post-processing function.</p>
220    /// <p>This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker AI Python SDK.</p>
221    pub fn set_custom_attributes(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
222        self.inner = self.inner.set_custom_attributes(input);
223        self
224    }
225    /// <p>Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker AI endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in <a href="https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.6">Section 3.3.6. Field Value Components</a> of the Hypertext Transfer Protocol (HTTP/1.1).</p>
226    /// <p>The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with <code>Trace ID:</code> in your post-processing function.</p>
227    /// <p>This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker AI Python SDK.</p>
228    pub fn get_custom_attributes(&self) -> &::std::option::Option<::std::string::String> {
229        self.inner.get_custom_attributes()
230    }
231    /// <p>Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.</p>
232    /// <p>For information about how to use variant targeting to perform a/b testing, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html">Test models in production</a></p>
233    pub fn target_variant(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
234        self.inner = self.inner.target_variant(input.into());
235        self
236    }
237    /// <p>Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.</p>
238    /// <p>For information about how to use variant targeting to perform a/b testing, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html">Test models in production</a></p>
239    pub fn set_target_variant(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
240        self.inner = self.inner.set_target_variant(input);
241        self
242    }
243    /// <p>Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.</p>
244    /// <p>For information about how to use variant targeting to perform a/b testing, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html">Test models in production</a></p>
245    pub fn get_target_variant(&self) -> &::std::option::Option<::std::string::String> {
246        self.inner.get_target_variant()
247    }
248    /// <p>If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.</p>
249    pub fn target_container_hostname(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
250        self.inner = self.inner.target_container_hostname(input.into());
251        self
252    }
253    /// <p>If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.</p>
254    pub fn set_target_container_hostname(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
255        self.inner = self.inner.set_target_container_hostname(input);
256        self
257    }
258    /// <p>If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.</p>
259    pub fn get_target_container_hostname(&self) -> &::std::option::Option<::std::string::String> {
260        self.inner.get_target_container_hostname()
261    }
262    /// <p>An identifier that you assign to your request.</p>
263    pub fn inference_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
264        self.inner = self.inner.inference_id(input.into());
265        self
266    }
267    /// <p>An identifier that you assign to your request.</p>
268    pub fn set_inference_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
269        self.inner = self.inner.set_inference_id(input);
270        self
271    }
272    /// <p>An identifier that you assign to your request.</p>
273    pub fn get_inference_id(&self) -> &::std::option::Option<::std::string::String> {
274        self.inner.get_inference_id()
275    }
276    /// <p>If the endpoint hosts one or more inference components, this parameter specifies the name of inference component to invoke for a streaming response.</p>
277    pub fn inference_component_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
278        self.inner = self.inner.inference_component_name(input.into());
279        self
280    }
281    /// <p>If the endpoint hosts one or more inference components, this parameter specifies the name of inference component to invoke for a streaming response.</p>
282    pub fn set_inference_component_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
283        self.inner = self.inner.set_inference_component_name(input);
284        self
285    }
286    /// <p>If the endpoint hosts one or more inference components, this parameter specifies the name of inference component to invoke for a streaming response.</p>
287    pub fn get_inference_component_name(&self) -> &::std::option::Option<::std::string::String> {
288        self.inner.get_inference_component_name()
289    }
290    /// <p>The ID of a stateful session to handle your request.</p>
291    /// <p>You can't create a stateful session by using the <code>InvokeEndpointWithResponseStream</code> action. Instead, you can create one by using the <code> <code>InvokeEndpoint</code> </code> action. In your request, you specify <code>NEW_SESSION</code> for the <code>SessionId</code> request parameter. The response to that request provides the session ID for the <code>NewSessionId</code> response parameter.</p>
292    pub fn session_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
293        self.inner = self.inner.session_id(input.into());
294        self
295    }
296    /// <p>The ID of a stateful session to handle your request.</p>
297    /// <p>You can't create a stateful session by using the <code>InvokeEndpointWithResponseStream</code> action. Instead, you can create one by using the <code> <code>InvokeEndpoint</code> </code> action. In your request, you specify <code>NEW_SESSION</code> for the <code>SessionId</code> request parameter. The response to that request provides the session ID for the <code>NewSessionId</code> response parameter.</p>
298    pub fn set_session_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
299        self.inner = self.inner.set_session_id(input);
300        self
301    }
302    /// <p>The ID of a stateful session to handle your request.</p>
303    /// <p>You can't create a stateful session by using the <code>InvokeEndpointWithResponseStream</code> action. Instead, you can create one by using the <code> <code>InvokeEndpoint</code> </code> action. In your request, you specify <code>NEW_SESSION</code> for the <code>SessionId</code> request parameter. The response to that request provides the session ID for the <code>NewSessionId</code> response parameter.</p>
304    pub fn get_session_id(&self) -> &::std::option::Option<::std::string::String> {
305        self.inner.get_session_id()
306    }
307}