aws_sdk_bedrockruntime/operation/converse_stream/_converse_stream_input.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq)]
5pub struct ConverseStreamInput {
6 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
7 /// <ul>
8 /// <li>
9 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
10 /// <li>
11 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
12 /// <li>
13 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
14 /// <li>
15 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
16 /// <li>
17 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
18 /// </ul>
19 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
20 pub model_id: ::std::option::Option<::std::string::String>,
21 /// <p>The messages that you want to send to the model.</p>
22 pub messages: ::std::option::Option<::std::vec::Vec<crate::types::Message>>,
23 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
24 pub system: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>,
25 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
26 pub inference_config: ::std::option::Option<crate::types::InferenceConfiguration>,
27 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
28 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
29 pub tool_config: ::std::option::Option<crate::types::ToolConfiguration>,
30 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
31 pub guardrail_config: ::std::option::Option<crate::types::GuardrailStreamConfiguration>,
32 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
33 pub additional_model_request_fields: ::std::option::Option<::aws_smithy_types::Document>,
34 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
35 pub prompt_variables: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
36 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
37 /// <p><code>\[ "/stop_sequence" \]</code></p>
38 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
39 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
40 pub additional_model_response_field_paths: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
41 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
42 pub request_metadata: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
43 /// <p>Model performance settings for the request.</p>
44 pub performance_config: ::std::option::Option<crate::types::PerformanceConfiguration>,
45 /// <p>Specifies the processing tier configuration used for serving the request.</p>
46 pub service_tier: ::std::option::Option<crate::types::ServiceTier>,
47}
48impl ConverseStreamInput {
49 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
50 /// <ul>
51 /// <li>
52 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
53 /// <li>
54 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
55 /// <li>
56 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
57 /// <li>
58 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
59 /// <li>
60 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
61 /// </ul>
62 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
63 pub fn model_id(&self) -> ::std::option::Option<&str> {
64 self.model_id.as_deref()
65 }
66 /// <p>The messages that you want to send to the model.</p>
67 ///
68 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.messages.is_none()`.
69 pub fn messages(&self) -> &[crate::types::Message] {
70 self.messages.as_deref().unwrap_or_default()
71 }
72 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
73 ///
74 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.system.is_none()`.
75 pub fn system(&self) -> &[crate::types::SystemContentBlock] {
76 self.system.as_deref().unwrap_or_default()
77 }
78 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
79 pub fn inference_config(&self) -> ::std::option::Option<&crate::types::InferenceConfiguration> {
80 self.inference_config.as_ref()
81 }
82 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
83 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
84 pub fn tool_config(&self) -> ::std::option::Option<&crate::types::ToolConfiguration> {
85 self.tool_config.as_ref()
86 }
87 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
88 pub fn guardrail_config(&self) -> ::std::option::Option<&crate::types::GuardrailStreamConfiguration> {
89 self.guardrail_config.as_ref()
90 }
91 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
92 pub fn additional_model_request_fields(&self) -> ::std::option::Option<&::aws_smithy_types::Document> {
93 self.additional_model_request_fields.as_ref()
94 }
95 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
96 pub fn prompt_variables(&self) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
97 self.prompt_variables.as_ref()
98 }
99 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
100 /// <p><code>\[ "/stop_sequence" \]</code></p>
101 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
102 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
103 ///
104 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.additional_model_response_field_paths.is_none()`.
105 pub fn additional_model_response_field_paths(&self) -> &[::std::string::String] {
106 self.additional_model_response_field_paths.as_deref().unwrap_or_default()
107 }
108 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
109 pub fn request_metadata(&self) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, ::std::string::String>> {
110 self.request_metadata.as_ref()
111 }
112 /// <p>Model performance settings for the request.</p>
113 pub fn performance_config(&self) -> ::std::option::Option<&crate::types::PerformanceConfiguration> {
114 self.performance_config.as_ref()
115 }
116 /// <p>Specifies the processing tier configuration used for serving the request.</p>
117 pub fn service_tier(&self) -> ::std::option::Option<&crate::types::ServiceTier> {
118 self.service_tier.as_ref()
119 }
120}
121impl ::std::fmt::Debug for ConverseStreamInput {
122 fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
123 let mut formatter = f.debug_struct("ConverseStreamInput");
124 formatter.field("model_id", &self.model_id);
125 formatter.field("messages", &self.messages);
126 formatter.field("system", &self.system);
127 formatter.field("inference_config", &self.inference_config);
128 formatter.field("tool_config", &self.tool_config);
129 formatter.field("guardrail_config", &self.guardrail_config);
130 formatter.field("additional_model_request_fields", &self.additional_model_request_fields);
131 formatter.field("prompt_variables", &"*** Sensitive Data Redacted ***");
132 formatter.field("additional_model_response_field_paths", &self.additional_model_response_field_paths);
133 formatter.field("request_metadata", &"*** Sensitive Data Redacted ***");
134 formatter.field("performance_config", &self.performance_config);
135 formatter.field("service_tier", &self.service_tier);
136 formatter.finish()
137 }
138}
139impl ConverseStreamInput {
140 /// Creates a new builder-style object to manufacture [`ConverseStreamInput`](crate::operation::converse_stream::ConverseStreamInput).
141 pub fn builder() -> crate::operation::converse_stream::builders::ConverseStreamInputBuilder {
142 crate::operation::converse_stream::builders::ConverseStreamInputBuilder::default()
143 }
144}
145
146/// A builder for [`ConverseStreamInput`](crate::operation::converse_stream::ConverseStreamInput).
147#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default)]
148#[non_exhaustive]
149pub struct ConverseStreamInputBuilder {
150 pub(crate) model_id: ::std::option::Option<::std::string::String>,
151 pub(crate) messages: ::std::option::Option<::std::vec::Vec<crate::types::Message>>,
152 pub(crate) system: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>,
153 pub(crate) inference_config: ::std::option::Option<crate::types::InferenceConfiguration>,
154 pub(crate) tool_config: ::std::option::Option<crate::types::ToolConfiguration>,
155 pub(crate) guardrail_config: ::std::option::Option<crate::types::GuardrailStreamConfiguration>,
156 pub(crate) additional_model_request_fields: ::std::option::Option<::aws_smithy_types::Document>,
157 pub(crate) prompt_variables: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
158 pub(crate) additional_model_response_field_paths: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
159 pub(crate) request_metadata: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
160 pub(crate) performance_config: ::std::option::Option<crate::types::PerformanceConfiguration>,
161 pub(crate) service_tier: ::std::option::Option<crate::types::ServiceTier>,
162}
163impl ConverseStreamInputBuilder {
164 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
165 /// <ul>
166 /// <li>
167 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
168 /// <li>
169 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
170 /// <li>
171 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
172 /// <li>
173 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
174 /// <li>
175 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
176 /// </ul>
177 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
178 /// This field is required.
179 pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
180 self.model_id = ::std::option::Option::Some(input.into());
181 self
182 }
183 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
184 /// <ul>
185 /// <li>
186 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
187 /// <li>
188 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
189 /// <li>
190 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
191 /// <li>
192 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
193 /// <li>
194 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
195 /// </ul>
196 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
197 pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
198 self.model_id = input;
199 self
200 }
201 /// <p>Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:</p>
202 /// <ul>
203 /// <li>
204 /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
205 /// <li>
206 /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
207 /// <li>
208 /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
209 /// <li>
210 /// <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
211 /// <li>
212 /// <p>To include a prompt that was defined in <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html">Prompt management</a>, specify the ARN of the prompt version to use.</p></li>
213 /// </ul>
214 /// <p>The Converse API doesn't support <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported models</a>.</p>
215 pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
216 &self.model_id
217 }
218 /// Appends an item to `messages`.
219 ///
220 /// To override the contents of this collection use [`set_messages`](Self::set_messages).
221 ///
222 /// <p>The messages that you want to send to the model.</p>
223 pub fn messages(mut self, input: crate::types::Message) -> Self {
224 let mut v = self.messages.unwrap_or_default();
225 v.push(input);
226 self.messages = ::std::option::Option::Some(v);
227 self
228 }
229 /// <p>The messages that you want to send to the model.</p>
230 pub fn set_messages(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Message>>) -> Self {
231 self.messages = input;
232 self
233 }
234 /// <p>The messages that you want to send to the model.</p>
235 pub fn get_messages(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Message>> {
236 &self.messages
237 }
238 /// Appends an item to `system`.
239 ///
240 /// To override the contents of this collection use [`set_system`](Self::set_system).
241 ///
242 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
243 pub fn system(mut self, input: crate::types::SystemContentBlock) -> Self {
244 let mut v = self.system.unwrap_or_default();
245 v.push(input);
246 self.system = ::std::option::Option::Some(v);
247 self
248 }
249 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
250 pub fn set_system(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>>) -> Self {
251 self.system = input;
252 self
253 }
254 /// <p>A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.</p>
255 pub fn get_system(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::SystemContentBlock>> {
256 &self.system
257 }
258 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
259 pub fn inference_config(mut self, input: crate::types::InferenceConfiguration) -> Self {
260 self.inference_config = ::std::option::Option::Some(input);
261 self
262 }
263 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
264 pub fn set_inference_config(mut self, input: ::std::option::Option<crate::types::InferenceConfiguration>) -> Self {
265 self.inference_config = input;
266 self
267 }
268 /// <p>Inference parameters to pass to the model. <code>Converse</code> and <code>ConverseStream</code> support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the <code>additionalModelRequestFields</code> request field.</p>
269 pub fn get_inference_config(&self) -> &::std::option::Option<crate::types::InferenceConfiguration> {
270 &self.inference_config
271 }
272 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
273 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
274 pub fn tool_config(mut self, input: crate::types::ToolConfiguration) -> Self {
275 self.tool_config = ::std::option::Option::Some(input);
276 self
277 }
278 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
279 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
280 pub fn set_tool_config(mut self, input: ::std::option::Option<crate::types::ToolConfiguration>) -> Self {
281 self.tool_config = input;
282 self
283 }
284 /// <p>Configuration information for the tools that the model can use when generating a response.</p>
285 /// <p>For information about models that support streaming tool use, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features">Supported models and model features</a>.</p>
286 pub fn get_tool_config(&self) -> &::std::option::Option<crate::types::ToolConfiguration> {
287 &self.tool_config
288 }
289 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
290 pub fn guardrail_config(mut self, input: crate::types::GuardrailStreamConfiguration) -> Self {
291 self.guardrail_config = ::std::option::Option::Some(input);
292 self
293 }
294 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
295 pub fn set_guardrail_config(mut self, input: ::std::option::Option<crate::types::GuardrailStreamConfiguration>) -> Self {
296 self.guardrail_config = input;
297 self
298 }
299 /// <p>Configuration information for a guardrail that you want to use in the request. If you include <code>guardContent</code> blocks in the <code>content</code> field in the <code>messages</code> field, the guardrail operates only on those messages. If you include no <code>guardContent</code> blocks, the guardrail operates on all messages in the request body and in any included prompt resource.</p>
300 pub fn get_guardrail_config(&self) -> &::std::option::Option<crate::types::GuardrailStreamConfiguration> {
301 &self.guardrail_config
302 }
303 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
304 pub fn additional_model_request_fields(mut self, input: ::aws_smithy_types::Document) -> Self {
305 self.additional_model_request_fields = ::std::option::Option::Some(input);
306 self
307 }
308 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
309 pub fn set_additional_model_request_fields(mut self, input: ::std::option::Option<::aws_smithy_types::Document>) -> Self {
310 self.additional_model_request_fields = input;
311 self
312 }
313 /// <p>Additional inference parameters that the model supports, beyond the base set of inference parameters that <code>Converse</code> and <code>ConverseStream</code> support in the <code>inferenceConfig</code> field. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Model parameters</a>.</p>
314 pub fn get_additional_model_request_fields(&self) -> &::std::option::Option<::aws_smithy_types::Document> {
315 &self.additional_model_request_fields
316 }
317 /// Adds a key-value pair to `prompt_variables`.
318 ///
319 /// To override the contents of this collection use [`set_prompt_variables`](Self::set_prompt_variables).
320 ///
321 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
322 pub fn prompt_variables(mut self, k: impl ::std::convert::Into<::std::string::String>, v: crate::types::PromptVariableValues) -> Self {
323 let mut hash_map = self.prompt_variables.unwrap_or_default();
324 hash_map.insert(k.into(), v);
325 self.prompt_variables = ::std::option::Option::Some(hash_map);
326 self
327 }
328 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
329 pub fn set_prompt_variables(
330 mut self,
331 input: ::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>>,
332 ) -> Self {
333 self.prompt_variables = input;
334 self
335 }
336 /// <p>Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the <code>modelId</code> field.</p>
337 pub fn get_prompt_variables(
338 &self,
339 ) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, crate::types::PromptVariableValues>> {
340 &self.prompt_variables
341 }
342 /// Appends an item to `additional_model_response_field_paths`.
343 ///
344 /// To override the contents of this collection use [`set_additional_model_response_field_paths`](Self::set_additional_model_response_field_paths).
345 ///
346 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
347 /// <p><code>\[ "/stop_sequence" \]</code></p>
348 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
349 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
350 pub fn additional_model_response_field_paths(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
351 let mut v = self.additional_model_response_field_paths.unwrap_or_default();
352 v.push(input.into());
353 self.additional_model_response_field_paths = ::std::option::Option::Some(v);
354 self
355 }
356 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
357 /// <p><code>\[ "/stop_sequence" \]</code></p>
358 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
359 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
360 pub fn set_additional_model_response_field_paths(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
361 self.additional_model_response_field_paths = input;
362 self
363 }
364 /// <p>Additional model parameters field paths to return in the response. <code>Converse</code> and <code>ConverseStream</code> return the requested fields as a JSON Pointer object in the <code>additionalModelResponseFields</code> field. The following is example JSON for <code>additionalModelResponseFieldPaths</code>.</p>
365 /// <p><code>\[ "/stop_sequence" \]</code></p>
366 /// <p>For information about the JSON Pointer syntax, see the <a href="https://datatracker.ietf.org/doc/html/rfc6901">Internet Engineering Task Force (IETF)</a> documentation.</p>
367 /// <p><code>Converse</code> and <code>ConverseStream</code> reject an empty JSON Pointer or incorrectly structured JSON Pointer with a <code>400</code> error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by <code>Converse</code>.</p>
368 pub fn get_additional_model_response_field_paths(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
369 &self.additional_model_response_field_paths
370 }
371 /// Adds a key-value pair to `request_metadata`.
372 ///
373 /// To override the contents of this collection use [`set_request_metadata`](Self::set_request_metadata).
374 ///
375 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
376 pub fn request_metadata(
377 mut self,
378 k: impl ::std::convert::Into<::std::string::String>,
379 v: impl ::std::convert::Into<::std::string::String>,
380 ) -> Self {
381 let mut hash_map = self.request_metadata.unwrap_or_default();
382 hash_map.insert(k.into(), v.into());
383 self.request_metadata = ::std::option::Option::Some(hash_map);
384 self
385 }
386 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
387 pub fn set_request_metadata(
388 mut self,
389 input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
390 ) -> Self {
391 self.request_metadata = input;
392 self
393 }
394 /// <p>Key-value pairs that you can use to filter invocation logs.</p>
395 pub fn get_request_metadata(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
396 &self.request_metadata
397 }
398 /// <p>Model performance settings for the request.</p>
399 pub fn performance_config(mut self, input: crate::types::PerformanceConfiguration) -> Self {
400 self.performance_config = ::std::option::Option::Some(input);
401 self
402 }
403 /// <p>Model performance settings for the request.</p>
404 pub fn set_performance_config(mut self, input: ::std::option::Option<crate::types::PerformanceConfiguration>) -> Self {
405 self.performance_config = input;
406 self
407 }
408 /// <p>Model performance settings for the request.</p>
409 pub fn get_performance_config(&self) -> &::std::option::Option<crate::types::PerformanceConfiguration> {
410 &self.performance_config
411 }
412 /// <p>Specifies the processing tier configuration used for serving the request.</p>
413 pub fn service_tier(mut self, input: crate::types::ServiceTier) -> Self {
414 self.service_tier = ::std::option::Option::Some(input);
415 self
416 }
417 /// <p>Specifies the processing tier configuration used for serving the request.</p>
418 pub fn set_service_tier(mut self, input: ::std::option::Option<crate::types::ServiceTier>) -> Self {
419 self.service_tier = input;
420 self
421 }
422 /// <p>Specifies the processing tier configuration used for serving the request.</p>
423 pub fn get_service_tier(&self) -> &::std::option::Option<crate::types::ServiceTier> {
424 &self.service_tier
425 }
426 /// Consumes the builder and constructs a [`ConverseStreamInput`](crate::operation::converse_stream::ConverseStreamInput).
427 pub fn build(
428 self,
429 ) -> ::std::result::Result<crate::operation::converse_stream::ConverseStreamInput, ::aws_smithy_types::error::operation::BuildError> {
430 ::std::result::Result::Ok(crate::operation::converse_stream::ConverseStreamInput {
431 model_id: self.model_id,
432 messages: self.messages,
433 system: self.system,
434 inference_config: self.inference_config,
435 tool_config: self.tool_config,
436 guardrail_config: self.guardrail_config,
437 additional_model_request_fields: self.additional_model_request_fields,
438 prompt_variables: self.prompt_variables,
439 additional_model_response_field_paths: self.additional_model_response_field_paths,
440 request_metadata: self.request_metadata,
441 performance_config: self.performance_config,
442 service_tier: self.service_tier,
443 })
444 }
445}
446impl ::std::fmt::Debug for ConverseStreamInputBuilder {
447 fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
448 let mut formatter = f.debug_struct("ConverseStreamInputBuilder");
449 formatter.field("model_id", &self.model_id);
450 formatter.field("messages", &self.messages);
451 formatter.field("system", &self.system);
452 formatter.field("inference_config", &self.inference_config);
453 formatter.field("tool_config", &self.tool_config);
454 formatter.field("guardrail_config", &self.guardrail_config);
455 formatter.field("additional_model_request_fields", &self.additional_model_request_fields);
456 formatter.field("prompt_variables", &"*** Sensitive Data Redacted ***");
457 formatter.field("additional_model_response_field_paths", &self.additional_model_response_field_paths);
458 formatter.field("request_metadata", &"*** Sensitive Data Redacted ***");
459 formatter.field("performance_config", &self.performance_config);
460 formatter.field("service_tier", &self.service_tier);
461 formatter.finish()
462 }
463}