aws_sdk_bedrockruntime/operation/invoke_model/
_invoke_model_input.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq)]
5pub struct InvokeModelInput {
6    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
7    pub body: ::std::option::Option<::aws_smithy_types::Blob>,
8    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
9    pub content_type: ::std::option::Option<::std::string::String>,
10    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
11    pub accept: ::std::option::Option<::std::string::String>,
12    /// <p>The unique identifier of the model to invoke to run inference.</p>
13    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
14    /// <ul>
15    /// <li>
16    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
17    /// <li>
18    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
19    /// <li>
20    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
21    /// <li>
22    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
23    /// <li>
24    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
25    /// </ul>
26    pub model_id: ::std::option::Option<::std::string::String>,
27    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
28    pub trace: ::std::option::Option<crate::types::Trace>,
29    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
30    /// <p>An error will be thrown in the following situations.</p>
31    /// <ul>
32    /// <li>
33    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
34    /// <li>
35    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
36    /// <li>
37    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
38    /// </ul>
39    pub guardrail_identifier: ::std::option::Option<::std::string::String>,
40    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
41    pub guardrail_version: ::std::option::Option<::std::string::String>,
42    /// <p>Model performance settings for the request.</p>
43    pub performance_config_latency: ::std::option::Option<crate::types::PerformanceConfigLatency>,
44}
45impl InvokeModelInput {
46    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
47    pub fn body(&self) -> ::std::option::Option<&::aws_smithy_types::Blob> {
48        self.body.as_ref()
49    }
50    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
51    pub fn content_type(&self) -> ::std::option::Option<&str> {
52        self.content_type.as_deref()
53    }
54    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
55    pub fn accept(&self) -> ::std::option::Option<&str> {
56        self.accept.as_deref()
57    }
58    /// <p>The unique identifier of the model to invoke to run inference.</p>
59    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
60    /// <ul>
61    /// <li>
62    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
63    /// <li>
64    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
65    /// <li>
66    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
67    /// <li>
68    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
69    /// <li>
70    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
71    /// </ul>
72    pub fn model_id(&self) -> ::std::option::Option<&str> {
73        self.model_id.as_deref()
74    }
75    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
76    pub fn trace(&self) -> ::std::option::Option<&crate::types::Trace> {
77        self.trace.as_ref()
78    }
79    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
80    /// <p>An error will be thrown in the following situations.</p>
81    /// <ul>
82    /// <li>
83    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
84    /// <li>
85    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
86    /// <li>
87    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
88    /// </ul>
89    pub fn guardrail_identifier(&self) -> ::std::option::Option<&str> {
90        self.guardrail_identifier.as_deref()
91    }
92    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
93    pub fn guardrail_version(&self) -> ::std::option::Option<&str> {
94        self.guardrail_version.as_deref()
95    }
96    /// <p>Model performance settings for the request.</p>
97    pub fn performance_config_latency(&self) -> ::std::option::Option<&crate::types::PerformanceConfigLatency> {
98        self.performance_config_latency.as_ref()
99    }
100}
101impl ::std::fmt::Debug for InvokeModelInput {
102    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
103        let mut formatter = f.debug_struct("InvokeModelInput");
104        formatter.field("body", &"*** Sensitive Data Redacted ***");
105        formatter.field("content_type", &self.content_type);
106        formatter.field("accept", &self.accept);
107        formatter.field("model_id", &self.model_id);
108        formatter.field("trace", &self.trace);
109        formatter.field("guardrail_identifier", &self.guardrail_identifier);
110        formatter.field("guardrail_version", &self.guardrail_version);
111        formatter.field("performance_config_latency", &self.performance_config_latency);
112        formatter.finish()
113    }
114}
115impl InvokeModelInput {
116    /// Creates a new builder-style object to manufacture [`InvokeModelInput`](crate::operation::invoke_model::InvokeModelInput).
117    pub fn builder() -> crate::operation::invoke_model::builders::InvokeModelInputBuilder {
118        crate::operation::invoke_model::builders::InvokeModelInputBuilder::default()
119    }
120}
121
122/// A builder for [`InvokeModelInput`](crate::operation::invoke_model::InvokeModelInput).
123#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default)]
124#[non_exhaustive]
125pub struct InvokeModelInputBuilder {
126    pub(crate) body: ::std::option::Option<::aws_smithy_types::Blob>,
127    pub(crate) content_type: ::std::option::Option<::std::string::String>,
128    pub(crate) accept: ::std::option::Option<::std::string::String>,
129    pub(crate) model_id: ::std::option::Option<::std::string::String>,
130    pub(crate) trace: ::std::option::Option<crate::types::Trace>,
131    pub(crate) guardrail_identifier: ::std::option::Option<::std::string::String>,
132    pub(crate) guardrail_version: ::std::option::Option<::std::string::String>,
133    pub(crate) performance_config_latency: ::std::option::Option<crate::types::PerformanceConfigLatency>,
134}
135impl InvokeModelInputBuilder {
136    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
137    pub fn body(mut self, input: ::aws_smithy_types::Blob) -> Self {
138        self.body = ::std::option::Option::Some(input);
139        self
140    }
141    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
142    pub fn set_body(mut self, input: ::std::option::Option<::aws_smithy_types::Blob>) -> Self {
143        self.body = input;
144        self
145    }
146    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
147    pub fn get_body(&self) -> &::std::option::Option<::aws_smithy_types::Blob> {
148        &self.body
149    }
150    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
151    pub fn content_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
152        self.content_type = ::std::option::Option::Some(input.into());
153        self
154    }
155    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
156    pub fn set_content_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
157        self.content_type = input;
158        self
159    }
160    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
161    pub fn get_content_type(&self) -> &::std::option::Option<::std::string::String> {
162        &self.content_type
163    }
164    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
165    pub fn accept(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
166        self.accept = ::std::option::Option::Some(input.into());
167        self
168    }
169    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
170    pub fn set_accept(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
171        self.accept = input;
172        self
173    }
174    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
175    pub fn get_accept(&self) -> &::std::option::Option<::std::string::String> {
176        &self.accept
177    }
178    /// <p>The unique identifier of the model to invoke to run inference.</p>
179    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
180    /// <ul>
181    /// <li>
182    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
183    /// <li>
184    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
185    /// <li>
186    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
187    /// <li>
188    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
189    /// <li>
190    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
191    /// </ul>
192    /// This field is required.
193    pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
194        self.model_id = ::std::option::Option::Some(input.into());
195        self
196    }
197    /// <p>The unique identifier of the model to invoke to run inference.</p>
198    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
199    /// <ul>
200    /// <li>
201    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
202    /// <li>
203    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
204    /// <li>
205    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
206    /// <li>
207    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
208    /// <li>
209    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
210    /// </ul>
211    pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
212        self.model_id = input;
213        self
214    }
215    /// <p>The unique identifier of the model to invoke to run inference.</p>
216    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
217    /// <ul>
218    /// <li>
219    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
220    /// <li>
221    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
222    /// <li>
223    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
224    /// <li>
225    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
226    /// <li>
227    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
228    /// </ul>
229    pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
230        &self.model_id
231    }
232    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
233    pub fn trace(mut self, input: crate::types::Trace) -> Self {
234        self.trace = ::std::option::Option::Some(input);
235        self
236    }
237    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
238    pub fn set_trace(mut self, input: ::std::option::Option<crate::types::Trace>) -> Self {
239        self.trace = input;
240        self
241    }
242    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
243    pub fn get_trace(&self) -> &::std::option::Option<crate::types::Trace> {
244        &self.trace
245    }
246    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
247    /// <p>An error will be thrown in the following situations.</p>
248    /// <ul>
249    /// <li>
250    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
251    /// <li>
252    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
253    /// <li>
254    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
255    /// </ul>
256    pub fn guardrail_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
257        self.guardrail_identifier = ::std::option::Option::Some(input.into());
258        self
259    }
260    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
261    /// <p>An error will be thrown in the following situations.</p>
262    /// <ul>
263    /// <li>
264    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
265    /// <li>
266    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
267    /// <li>
268    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
269    /// </ul>
270    pub fn set_guardrail_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
271        self.guardrail_identifier = input;
272        self
273    }
274    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
275    /// <p>An error will be thrown in the following situations.</p>
276    /// <ul>
277    /// <li>
278    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
279    /// <li>
280    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
281    /// <li>
282    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
283    /// </ul>
284    pub fn get_guardrail_identifier(&self) -> &::std::option::Option<::std::string::String> {
285        &self.guardrail_identifier
286    }
287    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
288    pub fn guardrail_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
289        self.guardrail_version = ::std::option::Option::Some(input.into());
290        self
291    }
292    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
293    pub fn set_guardrail_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
294        self.guardrail_version = input;
295        self
296    }
297    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
298    pub fn get_guardrail_version(&self) -> &::std::option::Option<::std::string::String> {
299        &self.guardrail_version
300    }
301    /// <p>Model performance settings for the request.</p>
302    pub fn performance_config_latency(mut self, input: crate::types::PerformanceConfigLatency) -> Self {
303        self.performance_config_latency = ::std::option::Option::Some(input);
304        self
305    }
306    /// <p>Model performance settings for the request.</p>
307    pub fn set_performance_config_latency(mut self, input: ::std::option::Option<crate::types::PerformanceConfigLatency>) -> Self {
308        self.performance_config_latency = input;
309        self
310    }
311    /// <p>Model performance settings for the request.</p>
312    pub fn get_performance_config_latency(&self) -> &::std::option::Option<crate::types::PerformanceConfigLatency> {
313        &self.performance_config_latency
314    }
315    /// Consumes the builder and constructs a [`InvokeModelInput`](crate::operation::invoke_model::InvokeModelInput).
316    pub fn build(self) -> ::std::result::Result<crate::operation::invoke_model::InvokeModelInput, ::aws_smithy_types::error::operation::BuildError> {
317        ::std::result::Result::Ok(crate::operation::invoke_model::InvokeModelInput {
318            body: self.body,
319            content_type: self.content_type,
320            accept: self.accept,
321            model_id: self.model_id,
322            trace: self.trace,
323            guardrail_identifier: self.guardrail_identifier,
324            guardrail_version: self.guardrail_version,
325            performance_config_latency: self.performance_config_latency,
326        })
327    }
328}
329impl ::std::fmt::Debug for InvokeModelInputBuilder {
330    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
331        let mut formatter = f.debug_struct("InvokeModelInputBuilder");
332        formatter.field("body", &"*** Sensitive Data Redacted ***");
333        formatter.field("content_type", &self.content_type);
334        formatter.field("accept", &self.accept);
335        formatter.field("model_id", &self.model_id);
336        formatter.field("trace", &self.trace);
337        formatter.field("guardrail_identifier", &self.guardrail_identifier);
338        formatter.field("guardrail_version", &self.guardrail_version);
339        formatter.field("performance_config_latency", &self.performance_config_latency);
340        formatter.finish()
341    }
342}