aws_sdk_bedrockruntime/operation/invoke_model/
_invoke_model_input.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq)]
5pub struct InvokeModelInput {
6    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
7    pub body: ::std::option::Option<::aws_smithy_types::Blob>,
8    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
9    pub content_type: ::std::option::Option<::std::string::String>,
10    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
11    pub accept: ::std::option::Option<::std::string::String>,
12    /// <p>The unique identifier of the model to invoke to run inference.</p>
13    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
14    /// <ul>
15    /// <li>
16    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
17    /// <li>
18    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
19    /// <li>
20    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
21    /// <li>
22    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
23    /// <li>
24    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
25    /// </ul>
26    pub model_id: ::std::option::Option<::std::string::String>,
27    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
28    pub trace: ::std::option::Option<crate::types::Trace>,
29    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
30    /// <p>An error will be thrown in the following situations.</p>
31    /// <ul>
32    /// <li>
33    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
34    /// <li>
35    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
36    /// <li>
37    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
38    /// </ul>
39    pub guardrail_identifier: ::std::option::Option<::std::string::String>,
40    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
41    pub guardrail_version: ::std::option::Option<::std::string::String>,
42    /// <p>Model performance settings for the request.</p>
43    pub performance_config_latency: ::std::option::Option<crate::types::PerformanceConfigLatency>,
44    /// <p>Specifies the processing tier type used for serving the request.</p>
45    pub service_tier: ::std::option::Option<crate::types::ServiceTierType>,
46}
47impl InvokeModelInput {
48    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
49    pub fn body(&self) -> ::std::option::Option<&::aws_smithy_types::Blob> {
50        self.body.as_ref()
51    }
52    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
53    pub fn content_type(&self) -> ::std::option::Option<&str> {
54        self.content_type.as_deref()
55    }
56    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
57    pub fn accept(&self) -> ::std::option::Option<&str> {
58        self.accept.as_deref()
59    }
60    /// <p>The unique identifier of the model to invoke to run inference.</p>
61    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
62    /// <ul>
63    /// <li>
64    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
65    /// <li>
66    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
67    /// <li>
68    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
69    /// <li>
70    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
71    /// <li>
72    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
73    /// </ul>
74    pub fn model_id(&self) -> ::std::option::Option<&str> {
75        self.model_id.as_deref()
76    }
77    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
78    pub fn trace(&self) -> ::std::option::Option<&crate::types::Trace> {
79        self.trace.as_ref()
80    }
81    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
82    /// <p>An error will be thrown in the following situations.</p>
83    /// <ul>
84    /// <li>
85    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
86    /// <li>
87    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
88    /// <li>
89    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
90    /// </ul>
91    pub fn guardrail_identifier(&self) -> ::std::option::Option<&str> {
92        self.guardrail_identifier.as_deref()
93    }
94    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
95    pub fn guardrail_version(&self) -> ::std::option::Option<&str> {
96        self.guardrail_version.as_deref()
97    }
98    /// <p>Model performance settings for the request.</p>
99    pub fn performance_config_latency(&self) -> ::std::option::Option<&crate::types::PerformanceConfigLatency> {
100        self.performance_config_latency.as_ref()
101    }
102    /// <p>Specifies the processing tier type used for serving the request.</p>
103    pub fn service_tier(&self) -> ::std::option::Option<&crate::types::ServiceTierType> {
104        self.service_tier.as_ref()
105    }
106}
107impl ::std::fmt::Debug for InvokeModelInput {
108    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
109        let mut formatter = f.debug_struct("InvokeModelInput");
110        formatter.field("body", &"*** Sensitive Data Redacted ***");
111        formatter.field("content_type", &self.content_type);
112        formatter.field("accept", &self.accept);
113        formatter.field("model_id", &self.model_id);
114        formatter.field("trace", &self.trace);
115        formatter.field("guardrail_identifier", &self.guardrail_identifier);
116        formatter.field("guardrail_version", &self.guardrail_version);
117        formatter.field("performance_config_latency", &self.performance_config_latency);
118        formatter.field("service_tier", &self.service_tier);
119        formatter.finish()
120    }
121}
122impl InvokeModelInput {
123    /// Creates a new builder-style object to manufacture [`InvokeModelInput`](crate::operation::invoke_model::InvokeModelInput).
124    pub fn builder() -> crate::operation::invoke_model::builders::InvokeModelInputBuilder {
125        crate::operation::invoke_model::builders::InvokeModelInputBuilder::default()
126    }
127}
128
129/// A builder for [`InvokeModelInput`](crate::operation::invoke_model::InvokeModelInput).
130#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default)]
131#[non_exhaustive]
132pub struct InvokeModelInputBuilder {
133    pub(crate) body: ::std::option::Option<::aws_smithy_types::Blob>,
134    pub(crate) content_type: ::std::option::Option<::std::string::String>,
135    pub(crate) accept: ::std::option::Option<::std::string::String>,
136    pub(crate) model_id: ::std::option::Option<::std::string::String>,
137    pub(crate) trace: ::std::option::Option<crate::types::Trace>,
138    pub(crate) guardrail_identifier: ::std::option::Option<::std::string::String>,
139    pub(crate) guardrail_version: ::std::option::Option<::std::string::String>,
140    pub(crate) performance_config_latency: ::std::option::Option<crate::types::PerformanceConfigLatency>,
141    pub(crate) service_tier: ::std::option::Option<crate::types::ServiceTierType>,
142}
143impl InvokeModelInputBuilder {
144    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
145    pub fn body(mut self, input: ::aws_smithy_types::Blob) -> Self {
146        self.body = ::std::option::Option::Some(input);
147        self
148    }
149    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
150    pub fn set_body(mut self, input: ::std::option::Option<::aws_smithy_types::Blob>) -> Self {
151        self.body = input;
152        self
153    }
154    /// <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
155    pub fn get_body(&self) -> &::std::option::Option<::aws_smithy_types::Blob> {
156        &self.body
157    }
158    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
159    pub fn content_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
160        self.content_type = ::std::option::Option::Some(input.into());
161        self
162    }
163    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
164    pub fn set_content_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
165        self.content_type = input;
166        self
167    }
168    /// <p>The MIME type of the input data in the request. You must specify <code>application/json</code>.</p>
169    pub fn get_content_type(&self) -> &::std::option::Option<::std::string::String> {
170        &self.content_type
171    }
172    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
173    pub fn accept(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
174        self.accept = ::std::option::Option::Some(input.into());
175        self
176    }
177    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
178    pub fn set_accept(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
179        self.accept = input;
180        self
181    }
182    /// <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
183    pub fn get_accept(&self) -> &::std::option::Option<::std::string::String> {
184        &self.accept
185    }
186    /// <p>The unique identifier of the model to invoke to run inference.</p>
187    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
188    /// <ul>
189    /// <li>
190    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
191    /// <li>
192    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
193    /// <li>
194    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
195    /// <li>
196    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
197    /// <li>
198    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
199    /// </ul>
200    /// This field is required.
201    pub fn model_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
202        self.model_id = ::std::option::Option::Some(input.into());
203        self
204    }
205    /// <p>The unique identifier of the model to invoke to run inference.</p>
206    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
207    /// <ul>
208    /// <li>
209    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
210    /// <li>
211    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
212    /// <li>
213    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
214    /// <li>
215    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
216    /// <li>
217    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
218    /// </ul>
219    pub fn set_model_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
220        self.model_id = input;
221        self
222    }
223    /// <p>The unique identifier of the model to invoke to run inference.</p>
224    /// <p>The <code>modelId</code> to provide depends on the type of model or throughput that you use:</p>
225    /// <ul>
226    /// <li>
227    /// <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p></li>
228    /// <li>
229    /// <p>If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html">Supported Regions and models for cross-region inference</a> in the Amazon Bedrock User Guide.</p></li>
230    /// <li>
231    /// <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p></li>
232    /// <li>
233    /// <p>If you use a custom model, specify the ARN of the custom model deployment (for on-demand inference) or the ARN of your provisioned model (for Provisioned Throughput). For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p></li>
234    /// <li>
235    /// <p>If you use an <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html">imported model</a>, specify the ARN of the imported model. You can get the model ARN from a successful call to <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelImportJob.html">CreateModelImportJob</a> or from the Imported models page in the Amazon Bedrock console.</p></li>
236    /// </ul>
237    pub fn get_model_id(&self) -> &::std::option::Option<::std::string::String> {
238        &self.model_id
239    }
240    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
241    pub fn trace(mut self, input: crate::types::Trace) -> Self {
242        self.trace = ::std::option::Option::Some(input);
243        self
244    }
245    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
246    pub fn set_trace(mut self, input: ::std::option::Option<crate::types::Trace>) -> Self {
247        self.trace = input;
248        self
249    }
250    /// <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
251    pub fn get_trace(&self) -> &::std::option::Option<crate::types::Trace> {
252        &self.trace
253    }
254    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
255    /// <p>An error will be thrown in the following situations.</p>
256    /// <ul>
257    /// <li>
258    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
259    /// <li>
260    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
261    /// <li>
262    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
263    /// </ul>
264    pub fn guardrail_identifier(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
265        self.guardrail_identifier = ::std::option::Option::Some(input.into());
266        self
267    }
268    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
269    /// <p>An error will be thrown in the following situations.</p>
270    /// <ul>
271    /// <li>
272    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
273    /// <li>
274    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
275    /// <li>
276    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
277    /// </ul>
278    pub fn set_guardrail_identifier(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
279        self.guardrail_identifier = input;
280        self
281    }
282    /// <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation.</p>
283    /// <p>An error will be thrown in the following situations.</p>
284    /// <ul>
285    /// <li>
286    /// <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p></li>
287    /// <li>
288    /// <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p></li>
289    /// <li>
290    /// <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p></li>
291    /// </ul>
292    pub fn get_guardrail_identifier(&self) -> &::std::option::Option<::std::string::String> {
293        &self.guardrail_identifier
294    }
295    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
296    pub fn guardrail_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
297        self.guardrail_version = ::std::option::Option::Some(input.into());
298        self
299    }
300    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
301    pub fn set_guardrail_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
302        self.guardrail_version = input;
303        self
304    }
305    /// <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
306    pub fn get_guardrail_version(&self) -> &::std::option::Option<::std::string::String> {
307        &self.guardrail_version
308    }
309    /// <p>Model performance settings for the request.</p>
310    pub fn performance_config_latency(mut self, input: crate::types::PerformanceConfigLatency) -> Self {
311        self.performance_config_latency = ::std::option::Option::Some(input);
312        self
313    }
314    /// <p>Model performance settings for the request.</p>
315    pub fn set_performance_config_latency(mut self, input: ::std::option::Option<crate::types::PerformanceConfigLatency>) -> Self {
316        self.performance_config_latency = input;
317        self
318    }
319    /// <p>Model performance settings for the request.</p>
320    pub fn get_performance_config_latency(&self) -> &::std::option::Option<crate::types::PerformanceConfigLatency> {
321        &self.performance_config_latency
322    }
323    /// <p>Specifies the processing tier type used for serving the request.</p>
324    pub fn service_tier(mut self, input: crate::types::ServiceTierType) -> Self {
325        self.service_tier = ::std::option::Option::Some(input);
326        self
327    }
328    /// <p>Specifies the processing tier type used for serving the request.</p>
329    pub fn set_service_tier(mut self, input: ::std::option::Option<crate::types::ServiceTierType>) -> Self {
330        self.service_tier = input;
331        self
332    }
333    /// <p>Specifies the processing tier type used for serving the request.</p>
334    pub fn get_service_tier(&self) -> &::std::option::Option<crate::types::ServiceTierType> {
335        &self.service_tier
336    }
337    /// Consumes the builder and constructs a [`InvokeModelInput`](crate::operation::invoke_model::InvokeModelInput).
338    pub fn build(self) -> ::std::result::Result<crate::operation::invoke_model::InvokeModelInput, ::aws_smithy_types::error::operation::BuildError> {
339        ::std::result::Result::Ok(crate::operation::invoke_model::InvokeModelInput {
340            body: self.body,
341            content_type: self.content_type,
342            accept: self.accept,
343            model_id: self.model_id,
344            trace: self.trace,
345            guardrail_identifier: self.guardrail_identifier,
346            guardrail_version: self.guardrail_version,
347            performance_config_latency: self.performance_config_latency,
348            service_tier: self.service_tier,
349        })
350    }
351}
352impl ::std::fmt::Debug for InvokeModelInputBuilder {
353    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
354        let mut formatter = f.debug_struct("InvokeModelInputBuilder");
355        formatter.field("body", &"*** Sensitive Data Redacted ***");
356        formatter.field("content_type", &self.content_type);
357        formatter.field("accept", &self.accept);
358        formatter.field("model_id", &self.model_id);
359        formatter.field("trace", &self.trace);
360        formatter.field("guardrail_identifier", &self.guardrail_identifier);
361        formatter.field("guardrail_version", &self.guardrail_version);
362        formatter.field("performance_config_latency", &self.performance_config_latency);
363        formatter.field("service_tier", &self.service_tier);
364        formatter.finish()
365    }
366}