aws_sdk_bedrockruntime/operation/converse_stream/builders.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::converse_stream::_converse_stream_output::ConverseStreamOutputBuilder;
3
4pub use crate::operation::converse_stream::_converse_stream_input::ConverseStreamInputBuilder;
5
6impl crate::operation::converse_stream::builders::ConverseStreamInputBuilder {
7 /// Sends a request with this input using the given client.
8 pub async fn send_with(
9 self,
10 client: &crate::Client,
11 ) -> ::std::result::Result<
12 crate::operation::converse_stream::ConverseStreamOutput,
13 ::aws_smithy_runtime_api::client::result::SdkError<
14 crate::operation::converse_stream::ConverseStreamError,
15 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16 >,
17 > {
18 let mut fluent_builder = client.converse_stream();
19 fluent_builder.inner = self;
20 fluent_builder.send().await
21 }
22}
23/// Fluent builder constructing a request to `ConverseStream`.
24///
25/// <p>Sends messages to the specified Amazon Bedrock model and returns the response in a stream. <code>ConverseStream</code> provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model.</p>
26/// <p>To find out if a model supports streaming, call <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_GetFoundationModel.html">GetFoundationModel</a> and check the <code>responseStreamingSupported</code> field in the response.</p><note>
27/// <p>The CLI doesn't support streaming operations in Amazon Bedrock, including <code>ConverseStream</code>.</p>
28/// </note>
29/// <p>Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.</p>
30/// <p>You can submit a prompt by including it in the <code>messages</code> field, specifying the <code>modelId</code> of a foundation model or inference profile to run inference on it, and including any other fields that are relevant to your use case.</p>
31/// <p>You can also submit a prompt from Prompt management by specifying the ARN of the prompt version and including a map of variables to values in the <code>promptVariables</code> field. You can append more messages to the prompt by using the <code>messages</code> field. If you use a prompt from Prompt management, you can't include the following fields in the request: <code>additionalModelRequestFields</code>, <code>inferenceConfig</code>, <code>system</code>, or <code>toolConfig</code>. Instead, these fields must be defined through Prompt management. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management-use.html">Use a prompt from Prompt management</a>.</p>
32/// <p>For information about the Converse API, see <i>Use the Converse API</i> in the <i>Amazon Bedrock User Guide</i>. To use a guardrail, see <i>Use a guardrail with the Converse API</i> in the <i>Amazon Bedrock User Guide</i>. To use a tool with a model, see <i>Tool use (Function calling)</i> in the <i>Amazon Bedrock User Guide</i></p>
33/// <p>For example code, see <i>Conversation streaming example</i> in the <i>Amazon Bedrock User Guide</i>.</p>
34/// <p>This operation requires permission for the <code>bedrock:InvokeModelWithResponseStream</code> action.</p><important>
35/// <p>To deny all inference access to resources that you specify in the modelId field, you need to deny access to the <code>bedrock:InvokeModel</code> and <code>bedrock:InvokeModelWithResponseStream</code> actions. Doing this also denies access to the resource through the base inference actions (<a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html">InvokeModel</a> and <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModelWithResponseStream.html">InvokeModelWithResponseStream</a>). For more information see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/security_iam_id-based-policy-examples.html#security_iam_id-based-policy-examples-deny-inference">Deny access for inference on specific models</a>.</p>
36/// </important>
37/// <p>For troubleshooting some of the common errors you might encounter when using the <code>ConverseStream</code> API, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/troubleshooting-api-error-codes.html">Troubleshooting Amazon Bedrock API Error Codes</a> in the Amazon Bedrock User Guide</p>
38#[derive(::std::clone::Clone, ::std::fmt::Debug)]
39pub struct ConverseStreamFluentBuilder {
40 handle: ::std::sync::Arc<crate::client::Handle>,
41 inner: crate::operation::converse_stream::builders::ConverseStreamInputBuilder,
42 config_override: ::std::option::Option<crate::config::Builder>,
43}
44impl
45 crate::client::customize::internal::CustomizableSend<
46 crate::operation::converse_stream::ConverseStreamOutput,
47 crate::operation::converse_stream::ConverseStreamError,
48 > for ConverseStreamFluentBuilder
49{
50 fn send(
51 self,
52 config_override: crate::config::Builder,
53 ) -> crate::client::customize::internal::BoxFuture<
54 crate::client::customize::internal::SendResult<
55 crate::operation::converse_stream::ConverseStreamOutput,
56 crate::operation::converse_stream::ConverseStreamError,
57 >,
58 > {
59 ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
60 }
61}
62impl ConverseStreamFluentBuilder {
63 /// Creates a new `ConverseStreamFluentBuilder`.
64 pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
65 Self {
66 handle,
67 inner: ::std::default::Default::default(),
68 config_override: ::std::option::Option::None,
69 }
70 }
71 /// Access the ConverseStream as a reference.
72 pub fn as_input(&self) -> &crate::operation::converse_stream::builders::ConverseStreamInputBuilder {
73 &self.inner
74 }
75 /// Sends the request and returns the response.
76 ///
77 /// If an error occurs, an `SdkError` will be returned with additional details that
78 /// can be matched against.
79 ///
80 /// By default, any retryable failures will be retried twice. Retry behavior
81 /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
82 /// set when configuring the client.
83 pub async fn send(
84 self,
85 ) -> ::std::result::Result<
86 crate::operation::converse_stream::ConverseStreamOutput,
87 ::aws_smithy_runtime_api::client::result::SdkError<
88 crate::operation::converse_stream::ConverseStreamError,
89 ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
90 >,
91 > {
92 let input = self
93 .inner
94 .build()
95 .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
96 let runtime_plugins = crate::operation::converse_stream::ConverseStream::operation_runtime_plugins(
97 self.handle.runtime_plugins.clone(),
98 &self.handle.conf,
99 self.config_override,
100 );
101 crate::operation::converse_stream::ConverseStream::orchestrate(&runtime_plugins, input).await
102 }
103
104 /// Consumes this builder, creating a customizable operation that can be modified before being sent.
105 pub fn customize(
106 self,
107 ) -> crate::client::customize::CustomizableOperation<
108 crate::operation::converse_stream::ConverseStreamOutput,
109 crate::operation::converse_stream::ConverseStreamError,
110 Self,
111 > {
112 crate::client::customize::CustomizableOperation::new(self)
113 }
114 pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
115 self.set_config_override(::std::option::Option::Some(config_override.into()));
116 self
117 }
118
119 pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
120 self.config_override = config_override;
121 self
122 }
123 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
124 /// <ul>
125 /// <li>
126 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
127 /// <li>
128 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
129 /// <li>
130 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
131 /// <li>
132 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
133 /// <li>
134 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
135 /// </ul>
136 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
137 pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
138 self.inner = self.inner.model_id(input.into());
139 self
140 }
141 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
142 /// <ul>
143 /// <li>
144 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
145 /// <li>
146 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
147 /// <li>
148 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
149 /// <li>
150 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
151 /// <li>
152 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
153 /// </ul>
154 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
155 pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
156 self.inner = self.inner.set_model_id(input);
157 self
158 }
159 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
160 /// <ul>
161 /// <li>
162 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
163 /// <li>
164 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
165 /// <li>
166 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
167 /// <li>
168 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
169 /// <li>
170 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
171 /// </ul>
172 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
173 pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
174 self.inner.get_model_id()
175 }
176 ///
177 /// Appends an item to `messages`.
178 ///
179 /// To override the contents of this collection use [`set_messages`](Self::set_messages).
180 ///
181 /// <p>The messages that you want to send to the model.</p>
182 pub fn messages(mut self, input: crate::types::Message) -> Self {
183 self.inner = self.inner.messages(input);
184 self
185 }
186 /// <p>The messages that you want to send to the model.</p>
187 pub fn set_messages(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Message>>) -> Self {
188 self.inner = self.inner.set_messages(input);
189 self
190 }
191 /// <p>The messages that you want to send to the model.</p>
192 pub fn get_messages(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Message>> {
193 self.inner.get_messages()
194 }
195 ///
196 /// Appends an item to `system`.
197 ///
198 /// To override the contents of this collection use [`set_system`](Self::set_system).
199 ///
200 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
201 pub fn system(mut self, input: crate::types::SystemContentBlock) -> Self {
202 self.inner = self.inner.system(input);
203 self
204 }
205 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
206 pub fn set_system(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>) -> Self {
207 self.inner = self.inner.set_system(input);
208 self
209 }
210 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
211 pub fn get_system(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>> {
212 self.inner.get_system()
213 }
214 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
215 pub fn inference_config(mut self, input: crate::types::InferenceConfiguration) -> Self {
216 self.inner = self.inner.inference_config(input);
217 self
218 }
219 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
220 pub fn set_inference_config(mut self, input: ::std::option::Option<crate::types::InferenceConfiguration>) -> Self {
221 self.inner = self.inner.set_inference_config(input);
222 self
223 }
224 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
225 pub fn get_inference_config(&self) -> &::std::option::Option<crate::types::InferenceConfiguration> {
226 self.inner.get_inference_config()
227 }
228 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
229 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
230 pub fn tool_config(mut self, input: crate::types::ToolConfiguration) -> Self {
231 self.inner = self.inner.tool_config(input);
232 self
233 }
234 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
235 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
236 pub fn set_tool_config(mut self, input: ::std::option::Option<crate::types::ToolConfiguration>) -> Self {
237 self.inner = self.inner.set_tool_config(input);
238 self
239 }
240 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
241 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
242 pub fn get_tool_config(&self) -> &::std::option::Option<crate::types::ToolConfiguration> {
243 self.inner.get_tool_config()
244 }
245 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
246 pub fn guardrail_config(mut self, input: crate::types::GuardrailStreamConfiguration) -> Self {
247 self.inner = self.inner.guardrail_config(input);
248 self
249 }
250 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
251 pub fn set_guardrail_config(mut self, input: ::std::option::Option<crate::types::GuardrailStreamConfiguration>) -> Self {
252 self.inner = self.inner.set_guardrail_config(input);
253 self
254 }
255 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
256 pub fn get_guardrail_config(&self) -> &::std::option::Option<crate::types::GuardrailStreamConfiguration> {
257 self.inner.get_guardrail_config()
258 }
259 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
260 pub fn additional_model_request_fields(mut self, input: ::aws_smithy_types::Document) -> Self {
261 self.inner = self.inner.additional_model_request_fields(input);
262 self
263 }
264 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
265 pub fn set_additional_model_request_fields(mut self, input: ::std::option::Option<::aws_smithy_types::Document>) -> Self {
266 self.inner = self.inner.set_additional_model_request_fields(input);
267 self
268 }
269 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
270 pub fn get_additional_model_request_fields(&self) -> &::std::option::Option<::aws_smithy_types::Document> {
271 self.inner.get_additional_model_request_fields()
272 }
273 ///
274 /// Adds a key-value pair to `promptVariables`.
275 ///
276 /// To override the contents of this collection use [`set_prompt_variables`](Self::set_prompt_variables).
277 ///
278 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
279 pub fn prompt_variables(mut self, k: impl ::std::convert::Into<::std::string::String>, v: crate::types::PromptVariableValues) -> Self {
280 self.inner = self.inner.prompt_variables(k.into(), v);
281 self
282 }
283 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
284 pub fn set_prompt_variables(
285 mut self,
286 input: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
287 ) -> Self {
288 self.inner = self.inner.set_prompt_variables(input);
289 self
290 }
291 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
292 pub fn get_prompt_variables(
293 &self,
294 ) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
295 self.inner.get_prompt_variables()
296 }
297 ///
298 /// Appends an item to `additionalModelResponseFieldPaths`.
299 ///
300 /// To override the contents of this collection use [`set_additional_model_response_field_paths`](Self::set_additional_model_response_field_paths).
301 ///
302 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
303 /// <p><code>\[ "/stop_sequence" \]</code></p>
304 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
305 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
306 pub fn additional_model_response_field_paths(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
307 self.inner = self.inner.additional_model_response_field_paths(input.into());
308 self
309 }
310 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
311 /// <p><code>\[ "/stop_sequence" \]</code></p>
312 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
313 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
314 pub fn set_additional_model_response_field_paths(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
315 self.inner = self.inner.set_additional_model_response_field_paths(input);
316 self
317 }
318 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
319 /// <p><code>\[ "/stop_sequence" \]</code></p>
320 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
321 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
322 pub fn get_additional_model_response_field_paths(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
323 self.inner.get_additional_model_response_field_paths()
324 }
325 ///
326 /// Adds a key-value pair to `requestMetadata`.
327 ///
328 /// To override the contents of this collection use [`set_request_metadata`](Self::set_request_metadata).
329 ///
330 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
331 pub fn request_metadata(
332 mut self,
333 k: impl ::std::convert::Into<::std::string::String>,
334 v: impl ::std::convert::Into<::std::string::String>,
335 ) -> Self {
336 self.inner = self.inner.request_metadata(k.into(), v.into());
337 self
338 }
339 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
340 pub fn set_request_metadata(
341 mut self,
342 input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
343 ) -> Self {
344 self.inner = self.inner.set_request_metadata(input);
345 self
346 }
347 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
348 pub fn get_request_metadata(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
349 self.inner.get_request_metadata()
350 }
351 /// <p>Model performance settings for the request.</p>
352 pub fn performance_config(mut self, input: crate::types::PerformanceConfiguration) -> Self {
353 self.inner = self.inner.performance_config(input);
354 self
355 }
356 /// <p>Model performance settings for the request.</p>
357 pub fn set_performance_config(mut self, input: ::std::option::Option<crate::types::PerformanceConfiguration>) -> Self {
358 self.inner = self.inner.set_performance_config(input);
359 self
360 }
361 /// <p>Model performance settings for the request.</p>
362 pub fn get_performance_config(&self) -> &::std::option::Option<crate::types::PerformanceConfiguration> {
363 self.inner.get_performance_config()
364 }
365}