aws_sdk_bedrockruntime/operation/converse/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::converse::_converse_output::ConverseOutputBuilder;
3
4pub use crate::operation::converse::_converse_input::ConverseInputBuilder;
5
6impl crate::operation::converse::builders::ConverseInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::converse::ConverseOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::converse::ConverseError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.converse();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `Converse`.
24///
25/// <p>Sends messages to the specified Amazon Bedrock model. <code>Converse</code> provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model.</p>
26/// <p>Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.</p>
27/// <p>You can submit a prompt by including it in the <code>messages</code> field, specifying the <code>modelId</code> of a foundation model or inference profile to run inference on it, and including any other fields that are relevant to your use case.</p>
28/// <p>You can also submit a prompt from Prompt management by specifying the ARN of the prompt version and including a map of variables to values in the <code>promptVariables</code> field. You can append more messages to the prompt by using the <code>messages</code> field. If you use a prompt from Prompt management, you can't include the following fields in the request: <code>additionalModelRequestFields</code>, <code>inferenceConfig</code>, <code>system</code>, or <code>toolConfig</code>. Instead, these fields must be defined through Prompt management. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management-use.html">Use a prompt from Prompt management</a>.</p>
29/// <p>For information about the Converse API, see <i>Use the Converse API</i> in the <i>Amazon Bedrock User Guide</i>. To use a guardrail, see <i>Use a guardrail with the Converse API</i> in the <i>Amazon Bedrock User Guide</i>. To use a tool with a model, see <i>Tool use (Function calling)</i> in the <i>Amazon Bedrock User Guide</i></p>
30/// <p>For example code, see <i>Converse API examples</i> in the <i>Amazon Bedrock User Guide</i>.</p>
31/// <p>This operation requires permission for the <code>bedrock:InvokeModel</code> action.</p><important>
32/// <p>To deny all inference access to resources that you specify in the modelId field, you need to deny access to the <code>bedrock:InvokeModel</code> and <code>bedrock:InvokeModelWithResponseStream</code> actions. Doing this also denies access to the resource through the base inference actions (<a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html">InvokeModel</a> and <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModelWithResponseStream.html">InvokeModelWithResponseStream</a>). For more information see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/security_iam_id-based-policy-examples.html#security_iam_id-based-policy-examples-deny-inference">Deny access for inference on specific models</a>.</p>
33/// </important>
34/// <p>For troubleshooting some of the common errors you might encounter when using the <code>Converse</code> API, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/troubleshooting-api-error-codes.html">Troubleshooting Amazon Bedrock API Error Codes</a> in the Amazon Bedrock User Guide</p>
35#[derive(::std::clone::Clone, ::std::fmt::Debug)]
36pub struct ConverseFluentBuilder {
37    handle: ::std::sync::Arc<crate::client::Handle>,
38    inner: crate::operation::converse::builders::ConverseInputBuilder,
39    config_override: ::std::option::Option<crate::config::Builder>,
40}
41impl crate::client::customize::internal::CustomizableSend<crate::operation::converse::ConverseOutput, crate::operation::converse::ConverseError>
42    for ConverseFluentBuilder
43{
44    fn send(
45        self,
46        config_override: crate::config::Builder,
47    ) -> crate::client::customize::internal::BoxFuture<
48        crate::client::customize::internal::SendResult<crate::operation::converse::ConverseOutput, crate::operation::converse::ConverseError>,
49    > {
50        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
51    }
52}
53impl ConverseFluentBuilder {
54    /// Creates a new `ConverseFluentBuilder`.
55    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
56        Self {
57            handle,
58            inner: ::std::default::Default::default(),
59            config_override: ::std::option::Option::None,
60        }
61    }
62    /// Access the Converse as a reference.
63    pub fn as_input(&self) -> &crate::operation::converse::builders::ConverseInputBuilder {
64        &self.inner
65    }
66    /// Sends the request and returns the response.
67    ///
68    /// If an error occurs, an `SdkError` will be returned with additional details that
69    /// can be matched against.
70    ///
71    /// By default, any retryable failures will be retried twice. Retry behavior
72    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
73    /// set when configuring the client.
74    pub async fn send(
75        self,
76    ) -> ::std::result::Result<
77        crate::operation::converse::ConverseOutput,
78        ::aws_smithy_runtime_api::client::result::SdkError<
79            crate::operation::converse::ConverseError,
80            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
81        >,
82    > {
83        let input = self
84            .inner
85            .build()
86            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
87        let runtime_plugins = crate::operation::converse::Converse::operation_runtime_plugins(
88            self.handle.runtime_plugins.clone(),
89            &self.handle.conf,
90            self.config_override,
91        );
92        crate::operation::converse::Converse::orchestrate(&runtime_plugins, input).await
93    }
94
95    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
96    pub fn customize(
97        self,
98    ) -> crate::client::customize::CustomizableOperation<crate::operation::converse::ConverseOutput, crate::operation::converse::ConverseError, Self>
99    {
100        crate::client::customize::CustomizableOperation::new(self)
101    }
102    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
103        self.set_config_override(::std::option::Option::Some(config_override.into()));
104        self
105    }
106
107    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
108        self.config_override = config_override;
109        self
110    }
111    /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
112    /// <ul>
113    /// <li>
114    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
115    /// <li>
116    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
117    /// <li>
118    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
119    /// <li>
120    /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
121    /// <li>
122    /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
123    /// </ul>
124    /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
125    pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
126        self.inner = self.inner.model_id(input.into());
127        self
128    }
129    /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
130    /// <ul>
131    /// <li>
132    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
133    /// <li>
134    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
135    /// <li>
136    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
137    /// <li>
138    /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
139    /// <li>
140    /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
141    /// </ul>
142    /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
143    pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
144        self.inner = self.inner.set_model_id(input);
145        self
146    }
147    /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
148    /// <ul>
149    /// <li>
150    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
151    /// <li>
152    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
153    /// <li>
154    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
155    /// <li>
156    /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
157    /// <li>
158    /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
159    /// </ul>
160    /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
161    pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
162        self.inner.get_model_id()
163    }
164    ///
165    /// Appends an item to `messages`.
166    ///
167    /// To override the contents of this collection use [`set_messages`](Self::set_messages).
168    ///
169    /// <p>The messages that you want to send to the model.</p>
170    pub fn messages(mut self, input: crate::types::Message) -> Self {
171        self.inner = self.inner.messages(input);
172        self
173    }
174    /// <p>The messages that you want to send to the model.</p>
175    pub fn set_messages(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Message>>) -> Self {
176        self.inner = self.inner.set_messages(input);
177        self
178    }
179    /// <p>The messages that you want to send to the model.</p>
180    pub fn get_messages(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Message>> {
181        self.inner.get_messages()
182    }
183    ///
184    /// Appends an item to `system`.
185    ///
186    /// To override the contents of this collection use [`set_system`](Self::set_system).
187    ///
188    /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
189    pub fn system(mut self, input: crate::types::SystemContentBlock) -> Self {
190        self.inner = self.inner.system(input);
191        self
192    }
193    /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
194    pub fn set_system(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>) -> Self {
195        self.inner = self.inner.set_system(input);
196        self
197    }
198    /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
199    pub fn get_system(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>> {
200        self.inner.get_system()
201    }
202    /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
203    pub fn inference_config(mut self, input: crate::types::InferenceConfiguration) -> Self {
204        self.inner = self.inner.inference_config(input);
205        self
206    }
207    /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
208    pub fn set_inference_config(mut self, input: ::std::option::Option<crate::types::InferenceConfiguration>) -> Self {
209        self.inner = self.inner.set_inference_config(input);
210        self
211    }
212    /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
213    pub fn get_inference_config(&self) -> &::std::option::Option<crate::types::InferenceConfiguration> {
214        self.inner.get_inference_config()
215    }
216    /// <p>Configuration information for the tools that the model can use when generating a response.</p>
217    /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
218    pub fn tool_config(mut self, input: crate::types::ToolConfiguration) -> Self {
219        self.inner = self.inner.tool_config(input);
220        self
221    }
222    /// <p>Configuration information for the tools that the model can use when generating a response.</p>
223    /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
224    pub fn set_tool_config(mut self, input: ::std::option::Option<crate::types::ToolConfiguration>) -> Self {
225        self.inner = self.inner.set_tool_config(input);
226        self
227    }
228    /// <p>Configuration information for the tools that the model can use when generating a response.</p>
229    /// <p>For information about models that support tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
230    pub fn get_tool_config(&self) -> &::std::option::Option<crate::types::ToolConfiguration> {
231        self.inner.get_tool_config()
232    }
233    /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
234    pub fn guardrail_config(mut self, input: crate::types::GuardrailConfiguration) -> Self {
235        self.inner = self.inner.guardrail_config(input);
236        self
237    }
238    /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
239    pub fn set_guardrail_config(mut self, input: ::std::option::Option<crate::types::GuardrailConfiguration>) -> Self {
240        self.inner = self.inner.set_guardrail_config(input);
241        self
242    }
243    /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
244    pub fn get_guardrail_config(&self) -> &::std::option::Option<crate::types::GuardrailConfiguration> {
245        self.inner.get_guardrail_config()
246    }
247    /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
248    pub fn additional_model_request_fields(mut self, input: ::aws_smithy_types::Document) -> Self {
249        self.inner = self.inner.additional_model_request_fields(input);
250        self
251    }
252    /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
253    pub fn set_additional_model_request_fields(mut self, input: ::std::option::Option<::aws_smithy_types::Document>) -> Self {
254        self.inner = self.inner.set_additional_model_request_fields(input);
255        self
256    }
257    /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
258    pub fn get_additional_model_request_fields(&self) -> &::std::option::Option<::aws_smithy_types::Document> {
259        self.inner.get_additional_model_request_fields()
260    }
261    ///
262    /// Adds a key-value pair to `promptVariables`.
263    ///
264    /// To override the contents of this collection use [`set_prompt_variables`](Self::set_prompt_variables).
265    ///
266    /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
267    pub fn prompt_variables(mut self, k: impl ::std::convert::Into<::std::string::String>, v: crate::types::PromptVariableValues) -> Self {
268        self.inner = self.inner.prompt_variables(k.into(), v);
269        self
270    }
271    /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
272    pub fn set_prompt_variables(
273        mut self,
274        input: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
275    ) -> Self {
276        self.inner = self.inner.set_prompt_variables(input);
277        self
278    }
279    /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
280    pub fn get_prompt_variables(
281        &self,
282    ) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
283        self.inner.get_prompt_variables()
284    }
285    ///
286    /// Appends an item to `additionalModelResponseFieldPaths`.
287    ///
288    /// To override the contents of this collection use [`set_additional_model_response_field_paths`](Self::set_additional_model_response_field_paths).
289    ///
290    /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
291    /// <p><code>\[ "/stop_sequence" \]</code></p>
292    /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
293    /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
294    pub fn additional_model_response_field_paths(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
295        self.inner = self.inner.additional_model_response_field_paths(input.into());
296        self
297    }
298    /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
299    /// <p><code>\[ "/stop_sequence" \]</code></p>
300    /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
301    /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
302    pub fn set_additional_model_response_field_paths(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
303        self.inner = self.inner.set_additional_model_response_field_paths(input);
304        self
305    }
306    /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
307    /// <p><code>\[ "/stop_sequence" \]</code></p>
308    /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
309    /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
310    pub fn get_additional_model_response_field_paths(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
311        self.inner.get_additional_model_response_field_paths()
312    }
313    ///
314    /// Adds a key-value pair to `requestMetadata`.
315    ///
316    /// To override the contents of this collection use [`set_request_metadata`](Self::set_request_metadata).
317    ///
318    /// <p>Key-value pairs that you can use to filter invocation logs.</p>
319    pub fn request_metadata(
320        mut self,
321        k: impl ::std::convert::Into<::std::string::String>,
322        v: impl ::std::convert::Into<::std::string::String>,
323    ) -> Self {
324        self.inner = self.inner.request_metadata(k.into(), v.into());
325        self
326    }
327    /// <p>Key-value pairs that you can use to filter invocation logs.</p>
328    pub fn set_request_metadata(
329        mut self,
330        input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
331    ) -> Self {
332        self.inner = self.inner.set_request_metadata(input);
333        self
334    }
335    /// <p>Key-value pairs that you can use to filter invocation logs.</p>
336    pub fn get_request_metadata(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
337        self.inner.get_request_metadata()
338    }
339    /// <p>Model performance settings for the request.</p>
340    pub fn performance_config(mut self, input: crate::types::PerformanceConfiguration) -> Self {
341        self.inner = self.inner.performance_config(input);
342        self
343    }
344    /// <p>Model performance settings for the request.</p>
345    pub fn set_performance_config(mut self, input: ::std::option::Option<crate::types::PerformanceConfiguration>) -> Self {
346        self.inner = self.inner.set_performance_config(input);
347        self
348    }
349    /// <p>Model performance settings for the request.</p>
350    pub fn get_performance_config(&self) -> &::std::option::Option<crate::types::PerformanceConfiguration> {
351        self.inner.get_performance_config()
352    }
353}