aws_sdk_bedrockruntime/operation/invoke_model_with_response_stream/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::invoke_model_with_response_stream::_invoke_model_with_response_stream_output::InvokeModelWithResponseStreamOutputBuilder;
3
4pub use crate::operation::invoke_model_with_response_stream::_invoke_model_with_response_stream_input::InvokeModelWithResponseStreamInputBuilder;
5
6impl crate::operation::invoke_model_with_response_stream::builders::InvokeModelWithResponseStreamInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.invoke_model_with_response_stream();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `InvokeModelWithResponseStream`.
24///
25/// <p>Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.</p>
26/// <p>To see if a model supports streaming, call <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_GetFoundationModel.html">GetFoundationModel</a> and check the <code>responseStreamingSupported</code> field in the response.</p><note>
27/// <p>The CLI doesn't support streaming operations in Amazon Bedrock, including <code>InvokeModelWithResponseStream</code>.</p>
28/// </note>
29/// <p>For example code, see <i>Invoke model with streaming code example</i> in the <i>Amazon Bedrock User Guide</i>.</p>
30/// <p>This operation requires permissions to perform the <code>bedrock:InvokeModelWithResponseStream</code> action.</p><important>
31/// <p>To deny all inference access to resources that you specify in the modelId field, you need to deny access to the <code>bedrock:InvokeModel</code> and <code>bedrock:InvokeModelWithResponseStream</code> actions. Doing this also denies access to the resource through the Converse API actions (<a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html">Converse</a> and <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ConverseStream.html">ConverseStream</a>). For more information see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/security_iam_id-based-policy-examples.html#security_iam_id-based-policy-examples-deny-inference">Deny access for inference on specific models</a>.</p>
32/// </important>
33/// <p>For troubleshooting some of the common errors you might encounter when using the <code>InvokeModelWithResponseStream</code> API, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/troubleshooting-api-error-codes.html">Troubleshooting Amazon Bedrock API Error Codes</a> in the Amazon Bedrock User Guide</p>
34#[derive(::std::clone::Clone, ::std::fmt::Debug)]
35pub struct InvokeModelWithResponseStreamFluentBuilder {
36    handle: ::std::sync::Arc<crate::client::Handle>,
37    inner: crate::operation::invoke_model_with_response_stream::builders::InvokeModelWithResponseStreamInputBuilder,
38    config_override: ::std::option::Option<crate::config::Builder>,
39}
40impl
41    crate::client::customize::internal::CustomizableSend<
42        crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
43        crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
44    > for InvokeModelWithResponseStreamFluentBuilder
45{
46    fn send(
47        self,
48        config_override: crate::config::Builder,
49    ) -> crate::client::customize::internal::BoxFuture<
50        crate::client::customize::internal::SendResult<
51            crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
52            crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
53        >,
54    > {
55        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
56    }
57}
58impl InvokeModelWithResponseStreamFluentBuilder {
59    /// Creates a new `InvokeModelWithResponseStreamFluentBuilder`.
60    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
61        Self {
62            handle,
63            inner: ::std::default::Default::default(),
64            config_override: ::std::option::Option::None,
65        }
66    }
67    /// Access the InvokeModelWithResponseStream as a reference.
68    pub fn as_input(&self) -> &crate::operation::invoke_model_with_response_stream::builders::InvokeModelWithResponseStreamInputBuilder {
69        &self.inner
70    }
71    /// Sends the request and returns the response.
72    ///
73    /// If an error occurs, an `SdkError` will be returned with additional details that
74    /// can be matched against.
75    ///
76    /// By default, any retryable failures will be retried twice. Retry behavior
77    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
78    /// set when configuring the client.
79    pub async fn send(
80        self,
81    ) -> ::std::result::Result<
82        crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
83        ::aws_smithy_runtime_api::client::result::SdkError<
84            crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
85            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
86        >,
87    > {
88        let input = self
89            .inner
90            .build()
91            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
92        let runtime_plugins = crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStream::operation_runtime_plugins(
93            self.handle.runtime_plugins.clone(),
94            &self.handle.conf,
95            self.config_override,
96        );
97        crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStream::orchestrate(&runtime_plugins, input).await
98    }
99
100    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
101    pub fn customize(
102        self,
103    ) -> crate::client::customize::CustomizableOperation<
104        crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
105        crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
106        Self,
107    > {
108        crate::client::customize::CustomizableOperation::new(self)
109    }
110    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
111        self.set_config_override(::std::option::Option::Some(config_override.into()));
112        self
113    }
114
115    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
116        self.config_override = config_override;
117        self
118    }
119    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
120    pub fn body(mut self, input: ::aws_smithy_types::Blob) -> Self {
121        self.inner = self.inner.body(input);
122        self
123    }
124    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
125    pub fn set_body(mut self, input: ::std::option::Option<::aws_smithy_types::Blob>) -> Self {
126        self.inner = self.inner.set_body(input);
127        self
128    }
129    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
130    pub fn get_body(&self) -> &::std::option::Option<::aws_smithy_types::Blob> {
131        self.inner.get_body()
132    }
133    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
134    pub fn content_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
135        self.inner = self.inner.content_type(input.into());
136        self
137    }
138    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
139    pub fn set_content_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
140        self.inner = self.inner.set_content_type(input);
141        self
142    }
143    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
144    pub fn get_content_type(&self) -> &::std::option::Option<::std::string::String> {
145        self.inner.get_content_type()
146    }
147    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
148    pub fn accept(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
149        self.inner = self.inner.accept(input.into());
150        self
151    }
152    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
153    pub fn set_accept(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
154        self.inner = self.inner.set_accept(input);
155        self
156    }
157    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
158    pub fn get_accept(&self) -> &::std::option::Option<::std::string::String> {
159        self.inner.get_accept()
160    }
161    /// <p>The unique identifier of the model to invoke to run inference.</p>
162    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
163    /// <ul>
164    /// <li>
165    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
166    /// <li>
167    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
168    /// <li>
169    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
170    /// <li>
171    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
172    /// <li>
173    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
174    /// </ul>
175    pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
176        self.inner = self.inner.model_id(input.into());
177        self
178    }
179    /// <p>The unique identifier of the model to invoke to run inference.</p>
180    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
181    /// <ul>
182    /// <li>
183    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
184    /// <li>
185    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
186    /// <li>
187    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
188    /// <li>
189    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
190    /// <li>
191    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
192    /// </ul>
193    pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
194        self.inner = self.inner.set_model_id(input);
195        self
196    }
197    /// <p>The unique identifier of the model to invoke to run inference.</p>
198    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
199    /// <ul>
200    /// <li>
201    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
202    /// <li>
203    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
204    /// <li>
205    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
206    /// <li>
207    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
208    /// <li>
209    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
210    /// </ul>
211    pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
212        self.inner.get_model_id()
213    }
214    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
215    pub fn trace(mut self, input: crate::types::Trace) -> Self {
216        self.inner = self.inner.trace(input);
217        self
218    }
219    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
220    pub fn set_trace(mut self, input: ::std::option::Option<crate::types::Trace>) -> Self {
221        self.inner = self.inner.set_trace(input);
222        self
223    }
224    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
225    pub fn get_trace(&self) -> &::std::option::Option<crate::types::Trace> {
226        self.inner.get_trace()
227    }
228    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
229    /// <p>An error is thrown in the following situations.</p>
230    /// <ul>
231    /// <li>
232    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
233    /// <li>
234    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
235    /// <li>
236    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
237    /// </ul>
238    pub fn guardrail_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
239        self.inner = self.inner.guardrail_identifier(input.into());
240        self
241    }
242    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
243    /// <p>An error is thrown in the following situations.</p>
244    /// <ul>
245    /// <li>
246    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
247    /// <li>
248    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
249    /// <li>
250    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
251    /// </ul>
252    pub fn set_guardrail_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
253        self.inner = self.inner.set_guardrail_identifier(input);
254        self
255    }
256    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
257    /// <p>An error is thrown in the following situations.</p>
258    /// <ul>
259    /// <li>
260    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
261    /// <li>
262    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
263    /// <li>
264    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
265    /// </ul>
266    pub fn get_guardrail_identifier(&self) -> &::std::option::Option<::std::string::String> {
267        self.inner.get_guardrail_identifier()
268    }
269    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
270    pub fn guardrail_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
271        self.inner = self.inner.guardrail_version(input.into());
272        self
273    }
274    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
275    pub fn set_guardrail_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
276        self.inner = self.inner.set_guardrail_version(input);
277        self
278    }
279    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
280    pub fn get_guardrail_version(&self) -> &::std::option::Option<::std::string::String> {
281        self.inner.get_guardrail_version()
282    }
283    /// <p>Model performance settings for the request.</p>
284    pub fn performance_config_latency(mut self, input: crate::types::PerformanceConfigLatency) -> Self {
285        self.inner = self.inner.performance_config_latency(input);
286        self
287    }
288    /// <p>Model performance settings for the request.</p>
289    pub fn set_performance_config_latency(mut self, input: ::std::option::Option<crate::types::PerformanceConfigLatency>) -> Self {
290        self.inner = self.inner.set_performance_config_latency(input);
291        self
292    }
293    /// <p>Model performance settings for the request.</p>
294    pub fn get_performance_config_latency(&self) -> &::std::option::Option<crate::types::PerformanceConfigLatency> {
295        self.inner.get_performance_config_latency()
296    }
297}