aws_sdk_bedrockruntime/operation/invoke_model_with_response_stream/builders.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::invoke_model_with_response_stream::_invoke_model_with_response_stream_output::InvokeModelWithResponseStreamOutputBuilder;
3
4pub use crate::operation::invoke_model_with_response_stream::_invoke_model_with_response_stream_input::InvokeModelWithResponseStreamInputBuilder;
5
6impl crate::operation::invoke_model_with_response_stream::builders::InvokeModelWithResponseStreamInputBuilder {
7 /// Sends a request with this input using the given client.
8 pub async fn send_with(
9 self,
10 client: &crate::Client,
11 ) -> ::std::result::Result<
12 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
13 ::aws_smithy_runtime_api::client::result::SdkError<
14 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
15 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16 >,
17 > {
18 let mut fluent_builder = client.invoke_model_with_response_stream();
19 fluent_builder.inner = self;
20 fluent_builder.send().await
21 }
22}
23/// Fluent builder constructing a request to `InvokeModelWithResponseStream`.
24///
25/// <p>Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.</p>
26/// <p>To see if a model supports streaming, call <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_GetFoundationModel.html">GetFoundationModel</a> and check the <code>responseStreamingSupported</code> field in the response.</p><note>
27/// <p>The CLI doesn't support streaming operations in Amazon Bedrock, including <code>InvokeModelWithResponseStream</code>.</p>
28/// </note>
29/// <p>For example code, see <i>Invoke model with streaming code example</i> in the <i>Amazon Bedrock User Guide</i>.</p>
30/// <p>This operation requires permissions to perform the <code>bedrock:InvokeModelWithResponseStream</code> action.</p><important>
31/// <p>To deny all inference access to resources that you specify in the modelId field, you need to deny access to the <code>bedrock:InvokeModel</code> and <code>bedrock:InvokeModelWithResponseStream</code> actions. Doing this also denies access to the resource through the Converse API actions (<a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html">Converse</a> and <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ConverseStream.html">ConverseStream</a>). For more information see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/security_iam_id-based-policy-examples.html#security_iam_id-based-policy-examples-deny-inference">Deny access for inference on specific models</a>.</p>
32/// </important>
33/// <p>For troubleshooting some of the common errors you might encounter when using the <code>InvokeModelWithResponseStream</code> API, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/troubleshooting-api-error-codes.html">Troubleshooting Amazon Bedrock API Error Codes</a> in the Amazon Bedrock User Guide</p>
34#[derive(::std::clone::Clone, ::std::fmt::Debug)]
35pub struct InvokeModelWithResponseStreamFluentBuilder {
36 handle: ::std::sync::Arc<crate::client::Handle>,
37 inner: crate::operation::invoke_model_with_response_stream::builders::InvokeModelWithResponseStreamInputBuilder,
38 config_override: ::std::option::Option<crate::config::Builder>,
39}
40impl
41 crate::client::customize::internal::CustomizableSend<
42 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
43 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
44 > for InvokeModelWithResponseStreamFluentBuilder
45{
46 fn send(
47 self,
48 config_override: crate::config::Builder,
49 ) -> crate::client::customize::internal::BoxFuture<
50 crate::client::customize::internal::SendResult<
51 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
52 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
53 >,
54 > {
55 ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
56 }
57}
58impl InvokeModelWithResponseStreamFluentBuilder {
59 /// Creates a new `InvokeModelWithResponseStreamFluentBuilder`.
60 pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
61 Self {
62 handle,
63 inner: ::std::default::Default::default(),
64 config_override: ::std::option::Option::None,
65 }
66 }
67 /// Access the InvokeModelWithResponseStream as a reference.
68 pub fn as_input(&self) -> &crate::operation::invoke_model_with_response_stream::builders::InvokeModelWithResponseStreamInputBuilder {
69 &self.inner
70 }
71 /// Sends the request and returns the response.
72 ///
73 /// If an error occurs, an `SdkError` will be returned with additional details that
74 /// can be matched against.
75 ///
76 /// By default, any retryable failures will be retried twice. Retry behavior
77 /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
78 /// set when configuring the client.
79 pub async fn send(
80 self,
81 ) -> ::std::result::Result<
82 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
83 ::aws_smithy_runtime_api::client::result::SdkError<
84 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
85 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
86 >,
87 > {
88 let input = self
89 .inner
90 .build()
91 .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
92 let runtime_plugins = crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStream::operation_runtime_plugins(
93 self.handle.runtime_plugins.clone(),
94 &self.handle.conf,
95 self.config_override,
96 );
97 let mut output =
98 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStream::orchestrate(&runtime_plugins, input).await?;
99
100 // Converts any error encountered beyond this point into an `SdkError` response error
101 // with an `HttpResponse`. However, since we have already exited the `orchestrate`
102 // function, the original `HttpResponse` is no longer available and cannot be restored.
103 // This means that header information from the original response has been lost.
104 //
105 // Note that the response body would have been consumed by the deserializer
106 // regardless, even if the initial message was hypothetically processed during
107 // the orchestrator's deserialization phase but later resulted in an error.
108 fn response_error(
109 err: impl ::std::convert::Into<::aws_smithy_runtime_api::box_error::BoxError>,
110 ) -> ::aws_smithy_runtime_api::client::result::SdkError<
111 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
112 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
113 > {
114 ::aws_smithy_runtime_api::client::result::SdkError::response_error(
115 err,
116 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse::new(
117 ::aws_smithy_runtime_api::http::StatusCode::try_from(200).expect("valid successful code"),
118 ::aws_smithy_types::body::SdkBody::empty(),
119 ),
120 )
121 }
122
123 let message = output.body.try_recv_initial_response().await.map_err(response_error)?;
124
125 match message {
126 ::std::option::Option::Some(_message) => ::std::result::Result::Ok(output),
127 ::std::option::Option::None => ::std::result::Result::Ok(output),
128 }
129 }
130
131 /// Consumes this builder, creating a customizable operation that can be modified before being sent.
132 pub fn customize(
133 self,
134 ) -> crate::client::customize::CustomizableOperation<
135 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamOutput,
136 crate::operation::invoke_model_with_response_stream::InvokeModelWithResponseStreamError,
137 Self,
138 > {
139 crate::client::customize::CustomizableOperation::new(self)
140 }
141 pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
142 self.set_config_override(::std::option::Option::Some(config_override.into()));
143 self
144 }
145
146 pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
147 self.config_override = config_override;
148 self
149 }
150 /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
151 pub fn body(mut self, input: ::aws_smithy_types::Blob) -> Self {
152 self.inner = self.inner.body(input);
153 self
154 }
155 /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
156 pub fn set_body(mut self, input: ::std::option::Option<::aws_smithy_types::Blob>) -> Self {
157 self.inner = self.inner.set_body(input);
158 self
159 }
160 /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
161 pub fn get_body(&self) -> &::std::option::Option<::aws_smithy_types::Blob> {
162 self.inner.get_body()
163 }
164 /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
165 pub fn content_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
166 self.inner = self.inner.content_type(input.into());
167 self
168 }
169 /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
170 pub fn set_content_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
171 self.inner = self.inner.set_content_type(input);
172 self
173 }
174 /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
175 pub fn get_content_type(&self) -> &::std::option::Option<::std::string::String> {
176 self.inner.get_content_type()
177 }
178 /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
179 pub fn accept(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
180 self.inner = self.inner.accept(input.into());
181 self
182 }
183 /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
184 pub fn set_accept(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
185 self.inner = self.inner.set_accept(input);
186 self
187 }
188 /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
189 pub fn get_accept(&self) -> &::std::option::Option<::std::string::String> {
190 self.inner.get_accept()
191 }
192 /// <p>The unique identifier of the model to invoke to run inference.</p>
193 /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
194 /// <ul>
195 /// <li>
196 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
197 /// <li>
198 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
199 /// <li>
200 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
201 /// <li>
202 /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
203 /// <li>
204 /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
205 /// </ul>
206 pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
207 self.inner = self.inner.model_id(input.into());
208 self
209 }
210 /// <p>The unique identifier of the model to invoke to run inference.</p>
211 /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
212 /// <ul>
213 /// <li>
214 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
215 /// <li>
216 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
217 /// <li>
218 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
219 /// <li>
220 /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
221 /// <li>
222 /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
223 /// </ul>
224 pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
225 self.inner = self.inner.set_model_id(input);
226 self
227 }
228 /// <p>The unique identifier of the model to invoke to run inference.</p>
229 /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
230 /// <ul>
231 /// <li>
232 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
233 /// <li>
234 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
235 /// <li>
236 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
237 /// <li>
238 /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
239 /// <li>
240 /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
241 /// </ul>
242 pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
243 self.inner.get_model_id()
244 }
245 /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
246 pub fn trace(mut self, input: crate::types::Trace) -> Self {
247 self.inner = self.inner.trace(input);
248 self
249 }
250 /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
251 pub fn set_trace(mut self, input: ::std::option::Option<crate::types::Trace>) -> Self {
252 self.inner = self.inner.set_trace(input);
253 self
254 }
255 /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
256 pub fn get_trace(&self) -> &::std::option::Option<crate::types::Trace> {
257 self.inner.get_trace()
258 }
259 /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
260 /// <p>An error is thrown in the following situations.</p>
261 /// <ul>
262 /// <li>
263 /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
264 /// <li>
265 /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
266 /// <li>
267 /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
268 /// </ul>
269 pub fn guardrail_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
270 self.inner = self.inner.guardrail_identifier(input.into());
271 self
272 }
273 /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
274 /// <p>An error is thrown in the following situations.</p>
275 /// <ul>
276 /// <li>
277 /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
278 /// <li>
279 /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
280 /// <li>
281 /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
282 /// </ul>
283 pub fn set_guardrail_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
284 self.inner = self.inner.set_guardrail_identifier(input);
285 self
286 }
287 /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
288 /// <p>An error is thrown in the following situations.</p>
289 /// <ul>
290 /// <li>
291 /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
292 /// <li>
293 /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
294 /// <li>
295 /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
296 /// </ul>
297 pub fn get_guardrail_identifier(&self) -> &::std::option::Option<::std::string::String> {
298 self.inner.get_guardrail_identifier()
299 }
300 /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
301 pub fn guardrail_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
302 self.inner = self.inner.guardrail_version(input.into());
303 self
304 }
305 /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
306 pub fn set_guardrail_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
307 self.inner = self.inner.set_guardrail_version(input);
308 self
309 }
310 /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
311 pub fn get_guardrail_version(&self) -> &::std::option::Option<::std::string::String> {
312 self.inner.get_guardrail_version()
313 }
314 /// <p>Model performance settings for the request.</p>
315 pub fn performance_config_latency(mut self, input: crate::types::PerformanceConfigLatency) -> Self {
316 self.inner = self.inner.performance_config_latency(input);
317 self
318 }
319 /// <p>Model performance settings for the request.</p>
320 pub fn set_performance_config_latency(mut self, input: ::std::option::Option<crate::types::PerformanceConfigLatency>) -> Self {
321 self.inner = self.inner.set_performance_config_latency(input);
322 self
323 }
324 /// <p>Model performance settings for the request.</p>
325 pub fn get_performance_config_latency(&self) -> &::std::option::Option<crate::types::PerformanceConfigLatency> {
326 self.inner.get_performance_config_latency()
327 }
328}