aws_sdk_sagemaker/types/_input_config.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2
3/// <p>Contains information about the location of input model artifacts, the name and shape of the expected data inputs, and the framework in which the model was trained.</p>
4#[non_exhaustive]
5#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
6pub struct InputConfig {
7 /// <p>The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).</p>
8 pub s3_uri: ::std::option::Option<::std::string::String>,
9 /// <p>Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are <code>Framework</code> specific.</p>
10 /// <ul>
11 /// <li>
12 /// <p><code>TensorFlow</code>: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
13 /// <ul>
14 /// <li>
15 /// <p>Examples for one input:</p>
16 /// <ul>
17 /// <li>
18 /// <p>If using the console, <code>{"input":\[1,1024,1024,3\]}</code></p></li>
19 /// <li>
20 /// <p>If using the CLI, <code>{\"input\":\[1,1024,1024,3\]}</code></p></li>
21 /// </ul></li>
22 /// <li>
23 /// <p>Examples for two inputs:</p>
24 /// <ul>
25 /// <li>
26 /// <p>If using the console, <code>{"data1": \[1,28,28,1\], "data2":\[1,28,28,1\]}</code></p></li>
27 /// <li>
28 /// <p>If using the CLI, <code>{\"data1\": \[1,28,28,1\], \"data2\":\[1,28,28,1\]}</code></p></li>
29 /// </ul></li>
30 /// </ul></li>
31 /// <li>
32 /// <p><code>KERAS</code>: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, <code>DataInputConfig</code> should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.</p>
33 /// <ul>
34 /// <li>
35 /// <p>Examples for one input:</p>
36 /// <ul>
37 /// <li>
38 /// <p>If using the console, <code>{"input_1":\[1,3,224,224\]}</code></p></li>
39 /// <li>
40 /// <p>If using the CLI, <code>{\"input_1\":\[1,3,224,224\]}</code></p></li>
41 /// </ul></li>
42 /// <li>
43 /// <p>Examples for two inputs:</p>
44 /// <ul>
45 /// <li>
46 /// <p>If using the console, <code>{"input_1": \[1,3,224,224\], "input_2":\[1,3,224,224\]}</code></p></li>
47 /// <li>
48 /// <p>If using the CLI, <code>{\"input_1\": \[1,3,224,224\], \"input_2\":\[1,3,224,224\]}</code></p></li>
49 /// </ul></li>
50 /// </ul></li>
51 /// <li>
52 /// <p><code>MXNET/ONNX/DARKNET</code>: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
53 /// <ul>
54 /// <li>
55 /// <p>Examples for one input:</p>
56 /// <ul>
57 /// <li>
58 /// <p>If using the console, <code>{"data":\[1,3,1024,1024\]}</code></p></li>
59 /// <li>
60 /// <p>If using the CLI, <code>{\"data\":\[1,3,1024,1024\]}</code></p></li>
61 /// </ul></li>
62 /// <li>
63 /// <p>Examples for two inputs:</p>
64 /// <ul>
65 /// <li>
66 /// <p>If using the console, <code>{"var1": \[1,1,28,28\], "var2":\[1,1,28,28\]}</code></p></li>
67 /// <li>
68 /// <p>If using the CLI, <code>{\"var1\": \[1,1,28,28\], \"var2\":\[1,1,28,28\]}</code></p></li>
69 /// </ul></li>
70 /// </ul></li>
71 /// <li>
72 /// <p><code>PyTorch</code>: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.</p>
73 /// <ul>
74 /// <li>
75 /// <p>Examples for one input in dictionary format:</p>
76 /// <ul>
77 /// <li>
78 /// <p>If using the console, <code>{"input0":\[1,3,224,224\]}</code></p></li>
79 /// <li>
80 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\]}</code></p></li>
81 /// </ul></li>
82 /// <li>
83 /// <p>Example for one input in list format: <code>\[\[1,3,224,224\]\]</code></p></li>
84 /// <li>
85 /// <p>Examples for two inputs in dictionary format:</p>
86 /// <ul>
87 /// <li>
88 /// <p>If using the console, <code>{"input0":\[1,3,224,224\], "input1":\[1,3,224,224\]}</code></p></li>
89 /// <li>
90 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\], \"input1\":\[1,3,224,224\]}</code></p></li>
91 /// </ul></li>
92 /// <li>
93 /// <p>Example for two inputs in list format: <code>\[\[1,3,224,224\], \[1,3,224,224\]\]</code></p></li>
94 /// </ul></li>
95 /// <li>
96 /// <p><code>XGBOOST</code>: input data name and shape are not needed.</p></li>
97 /// </ul>
98 /// <p><code>DataInputConfig</code> supports the following parameters for <code>CoreML</code> <code>TargetDevice</code> (ML Model format):</p>
99 /// <ul>
100 /// <li>
101 /// <p><code>shape</code>: Input shape, for example <code>{"input_1": {"shape": \[1,224,224,3\]}}</code>. In addition to static input shapes, CoreML converter supports Flexible input shapes:</p>
102 /// <ul>
103 /// <li>
104 /// <p>Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific interval in that dimension, for example: <code>{"input_1": {"shape": \["1..10", 224, 224, 3\]}}</code></p></li>
105 /// <li>
106 /// <p>Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate all supported input shapes, for example: <code>{"input_1": {"shape": \[\[1, 224, 224, 3\], \[1, 160, 160, 3\]\]}}</code></p></li>
107 /// </ul></li>
108 /// <li>
109 /// <p><code>default_shape</code>: Default input shape. You can set a default shape during conversion for both Range Dimension and Enumerated Shapes. For example <code>{"input_1": {"shape": \["1..10", 224, 224, 3\], "default_shape": \[1, 224, 224, 3\]}}</code></p></li>
110 /// <li>
111 /// <p><code>type</code>: Input type. Allowed values: <code>Image</code> and <code>Tensor</code>. By default, the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. Image input type requires additional input parameters such as <code>bias</code> and <code>scale</code>.</p></li>
112 /// <li>
113 /// <p><code>bias</code>: If the input type is an Image, you need to provide the bias vector.</p></li>
114 /// <li>
115 /// <p><code>scale</code>: If the input type is an Image, you need to provide a scale factor.</p></li>
116 /// </ul>
117 /// <p>CoreML <code>ClassifierConfig</code> parameters can be specified using <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html">OutputConfig</a> <code>CompilerOptions</code>. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:</p>
118 /// <ul>
119 /// <li>
120 /// <p>Tensor type input:</p>
121 /// <ul>
122 /// <li>
123 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\]}}</code></p></li>
124 /// </ul></li>
125 /// <li>
126 /// <p>Tensor type input without input name (PyTorch):</p>
127 /// <ul>
128 /// <li>
129 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\]}\]</code></p></li>
130 /// </ul></li>
131 /// <li>
132 /// <p>Image type input:</p>
133 /// <ul>
134 /// <li>
135 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}}</code></p></li>
136 /// <li>
137 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
138 /// </ul></li>
139 /// <li>
140 /// <p>Image type input without input name (PyTorch):</p>
141 /// <ul>
142 /// <li>
143 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}\]</code></p></li>
144 /// <li>
145 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
146 /// </ul></li>
147 /// </ul>
148 /// <p>Depending on the model format, <code>DataInputConfig</code> requires the following parameters for <code>ml_eia2</code> <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice">OutputConfig:TargetDevice</a>.</p>
149 /// <ul>
150 /// <li>
151 /// <p>For TensorFlow models saved in the SavedModel format, specify the input names from <code>signature_def_key</code> and the input model shapes for <code>DataInputConfig</code>. Specify the <code>signature_def_key</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a> if the model does not use TensorFlow's default signature def key. For example:</p>
152 /// <ul>
153 /// <li>
154 /// <p><code>"DataInputConfig": {"inputs": \[1, 224, 224, 3\]}</code></p></li>
155 /// <li>
156 /// <p><code>"CompilerOptions": {"signature_def_key": "serving_custom"}</code></p></li>
157 /// </ul></li>
158 /// <li>
159 /// <p>For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in <code>DataInputConfig</code> and the output tensor names for <code>output_names</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a>. For example:</p>
160 /// <ul>
161 /// <li>
162 /// <p><code>"DataInputConfig": {"input_tensor:0": \[1, 224, 224, 3\]}</code></p></li>
163 /// <li>
164 /// <p><code>"CompilerOptions": {"output_names": \["output_tensor:0"\]}</code></p></li>
165 /// </ul></li>
166 /// </ul>
167 pub data_input_config: ::std::option::Option<::std::string::String>,
168 /// <p>Identifies the framework in which the model was trained. For example: TENSORFLOW.</p>
169 pub framework: ::std::option::Option<crate::types::Framework>,
170 /// <p>Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.</p>
171 /// <p>For information about framework versions supported for cloud targets and edge devices, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html">Cloud Supported Instance Types and Frameworks</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html">Edge Supported Frameworks</a>.</p>
172 pub framework_version: ::std::option::Option<::std::string::String>,
173}
174impl InputConfig {
175 /// <p>The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).</p>
176 pub fn s3_uri(&self) -> ::std::option::Option<&str> {
177 self.s3_uri.as_deref()
178 }
179 /// <p>Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are <code>Framework</code> specific.</p>
180 /// <ul>
181 /// <li>
182 /// <p><code>TensorFlow</code>: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
183 /// <ul>
184 /// <li>
185 /// <p>Examples for one input:</p>
186 /// <ul>
187 /// <li>
188 /// <p>If using the console, <code>{"input":\[1,1024,1024,3\]}</code></p></li>
189 /// <li>
190 /// <p>If using the CLI, <code>{\"input\":\[1,1024,1024,3\]}</code></p></li>
191 /// </ul></li>
192 /// <li>
193 /// <p>Examples for two inputs:</p>
194 /// <ul>
195 /// <li>
196 /// <p>If using the console, <code>{"data1": \[1,28,28,1\], "data2":\[1,28,28,1\]}</code></p></li>
197 /// <li>
198 /// <p>If using the CLI, <code>{\"data1\": \[1,28,28,1\], \"data2\":\[1,28,28,1\]}</code></p></li>
199 /// </ul></li>
200 /// </ul></li>
201 /// <li>
202 /// <p><code>KERAS</code>: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, <code>DataInputConfig</code> should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.</p>
203 /// <ul>
204 /// <li>
205 /// <p>Examples for one input:</p>
206 /// <ul>
207 /// <li>
208 /// <p>If using the console, <code>{"input_1":\[1,3,224,224\]}</code></p></li>
209 /// <li>
210 /// <p>If using the CLI, <code>{\"input_1\":\[1,3,224,224\]}</code></p></li>
211 /// </ul></li>
212 /// <li>
213 /// <p>Examples for two inputs:</p>
214 /// <ul>
215 /// <li>
216 /// <p>If using the console, <code>{"input_1": \[1,3,224,224\], "input_2":\[1,3,224,224\]}</code></p></li>
217 /// <li>
218 /// <p>If using the CLI, <code>{\"input_1\": \[1,3,224,224\], \"input_2\":\[1,3,224,224\]}</code></p></li>
219 /// </ul></li>
220 /// </ul></li>
221 /// <li>
222 /// <p><code>MXNET/ONNX/DARKNET</code>: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
223 /// <ul>
224 /// <li>
225 /// <p>Examples for one input:</p>
226 /// <ul>
227 /// <li>
228 /// <p>If using the console, <code>{"data":\[1,3,1024,1024\]}</code></p></li>
229 /// <li>
230 /// <p>If using the CLI, <code>{\"data\":\[1,3,1024,1024\]}</code></p></li>
231 /// </ul></li>
232 /// <li>
233 /// <p>Examples for two inputs:</p>
234 /// <ul>
235 /// <li>
236 /// <p>If using the console, <code>{"var1": \[1,1,28,28\], "var2":\[1,1,28,28\]}</code></p></li>
237 /// <li>
238 /// <p>If using the CLI, <code>{\"var1\": \[1,1,28,28\], \"var2\":\[1,1,28,28\]}</code></p></li>
239 /// </ul></li>
240 /// </ul></li>
241 /// <li>
242 /// <p><code>PyTorch</code>: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.</p>
243 /// <ul>
244 /// <li>
245 /// <p>Examples for one input in dictionary format:</p>
246 /// <ul>
247 /// <li>
248 /// <p>If using the console, <code>{"input0":\[1,3,224,224\]}</code></p></li>
249 /// <li>
250 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\]}</code></p></li>
251 /// </ul></li>
252 /// <li>
253 /// <p>Example for one input in list format: <code>\[\[1,3,224,224\]\]</code></p></li>
254 /// <li>
255 /// <p>Examples for two inputs in dictionary format:</p>
256 /// <ul>
257 /// <li>
258 /// <p>If using the console, <code>{"input0":\[1,3,224,224\], "input1":\[1,3,224,224\]}</code></p></li>
259 /// <li>
260 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\], \"input1\":\[1,3,224,224\]}</code></p></li>
261 /// </ul></li>
262 /// <li>
263 /// <p>Example for two inputs in list format: <code>\[\[1,3,224,224\], \[1,3,224,224\]\]</code></p></li>
264 /// </ul></li>
265 /// <li>
266 /// <p><code>XGBOOST</code>: input data name and shape are not needed.</p></li>
267 /// </ul>
268 /// <p><code>DataInputConfig</code> supports the following parameters for <code>CoreML</code> <code>TargetDevice</code> (ML Model format):</p>
269 /// <ul>
270 /// <li>
271 /// <p><code>shape</code>: Input shape, for example <code>{"input_1": {"shape": \[1,224,224,3\]}}</code>. In addition to static input shapes, CoreML converter supports Flexible input shapes:</p>
272 /// <ul>
273 /// <li>
274 /// <p>Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific interval in that dimension, for example: <code>{"input_1": {"shape": \["1..10", 224, 224, 3\]}}</code></p></li>
275 /// <li>
276 /// <p>Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate all supported input shapes, for example: <code>{"input_1": {"shape": \[\[1, 224, 224, 3\], \[1, 160, 160, 3\]\]}}</code></p></li>
277 /// </ul></li>
278 /// <li>
279 /// <p><code>default_shape</code>: Default input shape. You can set a default shape during conversion for both Range Dimension and Enumerated Shapes. For example <code>{"input_1": {"shape": \["1..10", 224, 224, 3\], "default_shape": \[1, 224, 224, 3\]}}</code></p></li>
280 /// <li>
281 /// <p><code>type</code>: Input type. Allowed values: <code>Image</code> and <code>Tensor</code>. By default, the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. Image input type requires additional input parameters such as <code>bias</code> and <code>scale</code>.</p></li>
282 /// <li>
283 /// <p><code>bias</code>: If the input type is an Image, you need to provide the bias vector.</p></li>
284 /// <li>
285 /// <p><code>scale</code>: If the input type is an Image, you need to provide a scale factor.</p></li>
286 /// </ul>
287 /// <p>CoreML <code>ClassifierConfig</code> parameters can be specified using <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html">OutputConfig</a> <code>CompilerOptions</code>. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:</p>
288 /// <ul>
289 /// <li>
290 /// <p>Tensor type input:</p>
291 /// <ul>
292 /// <li>
293 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\]}}</code></p></li>
294 /// </ul></li>
295 /// <li>
296 /// <p>Tensor type input without input name (PyTorch):</p>
297 /// <ul>
298 /// <li>
299 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\]}\]</code></p></li>
300 /// </ul></li>
301 /// <li>
302 /// <p>Image type input:</p>
303 /// <ul>
304 /// <li>
305 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}}</code></p></li>
306 /// <li>
307 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
308 /// </ul></li>
309 /// <li>
310 /// <p>Image type input without input name (PyTorch):</p>
311 /// <ul>
312 /// <li>
313 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}\]</code></p></li>
314 /// <li>
315 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
316 /// </ul></li>
317 /// </ul>
318 /// <p>Depending on the model format, <code>DataInputConfig</code> requires the following parameters for <code>ml_eia2</code> <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice">OutputConfig:TargetDevice</a>.</p>
319 /// <ul>
320 /// <li>
321 /// <p>For TensorFlow models saved in the SavedModel format, specify the input names from <code>signature_def_key</code> and the input model shapes for <code>DataInputConfig</code>. Specify the <code>signature_def_key</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a> if the model does not use TensorFlow's default signature def key. For example:</p>
322 /// <ul>
323 /// <li>
324 /// <p><code>"DataInputConfig": {"inputs": \[1, 224, 224, 3\]}</code></p></li>
325 /// <li>
326 /// <p><code>"CompilerOptions": {"signature_def_key": "serving_custom"}</code></p></li>
327 /// </ul></li>
328 /// <li>
329 /// <p>For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in <code>DataInputConfig</code> and the output tensor names for <code>output_names</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a>. For example:</p>
330 /// <ul>
331 /// <li>
332 /// <p><code>"DataInputConfig": {"input_tensor:0": \[1, 224, 224, 3\]}</code></p></li>
333 /// <li>
334 /// <p><code>"CompilerOptions": {"output_names": \["output_tensor:0"\]}</code></p></li>
335 /// </ul></li>
336 /// </ul>
337 pub fn data_input_config(&self) -> ::std::option::Option<&str> {
338 self.data_input_config.as_deref()
339 }
340 /// <p>Identifies the framework in which the model was trained. For example: TENSORFLOW.</p>
341 pub fn framework(&self) -> ::std::option::Option<&crate::types::Framework> {
342 self.framework.as_ref()
343 }
344 /// <p>Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.</p>
345 /// <p>For information about framework versions supported for cloud targets and edge devices, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html">Cloud Supported Instance Types and Frameworks</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html">Edge Supported Frameworks</a>.</p>
346 pub fn framework_version(&self) -> ::std::option::Option<&str> {
347 self.framework_version.as_deref()
348 }
349}
350impl InputConfig {
351 /// Creates a new builder-style object to manufacture [`InputConfig`](crate::types::InputConfig).
352 pub fn builder() -> crate::types::builders::InputConfigBuilder {
353 crate::types::builders::InputConfigBuilder::default()
354 }
355}
356
357/// A builder for [`InputConfig`](crate::types::InputConfig).
358#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
359#[non_exhaustive]
360pub struct InputConfigBuilder {
361 pub(crate) s3_uri: ::std::option::Option<::std::string::String>,
362 pub(crate) data_input_config: ::std::option::Option<::std::string::String>,
363 pub(crate) framework: ::std::option::Option<crate::types::Framework>,
364 pub(crate) framework_version: ::std::option::Option<::std::string::String>,
365}
366impl InputConfigBuilder {
367 /// <p>The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).</p>
368 /// This field is required.
369 pub fn s3_uri(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
370 self.s3_uri = ::std::option::Option::Some(input.into());
371 self
372 }
373 /// <p>The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).</p>
374 pub fn set_s3_uri(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
375 self.s3_uri = input;
376 self
377 }
378 /// <p>The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).</p>
379 pub fn get_s3_uri(&self) -> &::std::option::Option<::std::string::String> {
380 &self.s3_uri
381 }
382 /// <p>Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are <code>Framework</code> specific.</p>
383 /// <ul>
384 /// <li>
385 /// <p><code>TensorFlow</code>: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
386 /// <ul>
387 /// <li>
388 /// <p>Examples for one input:</p>
389 /// <ul>
390 /// <li>
391 /// <p>If using the console, <code>{"input":\[1,1024,1024,3\]}</code></p></li>
392 /// <li>
393 /// <p>If using the CLI, <code>{\"input\":\[1,1024,1024,3\]}</code></p></li>
394 /// </ul></li>
395 /// <li>
396 /// <p>Examples for two inputs:</p>
397 /// <ul>
398 /// <li>
399 /// <p>If using the console, <code>{"data1": \[1,28,28,1\], "data2":\[1,28,28,1\]}</code></p></li>
400 /// <li>
401 /// <p>If using the CLI, <code>{\"data1\": \[1,28,28,1\], \"data2\":\[1,28,28,1\]}</code></p></li>
402 /// </ul></li>
403 /// </ul></li>
404 /// <li>
405 /// <p><code>KERAS</code>: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, <code>DataInputConfig</code> should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.</p>
406 /// <ul>
407 /// <li>
408 /// <p>Examples for one input:</p>
409 /// <ul>
410 /// <li>
411 /// <p>If using the console, <code>{"input_1":\[1,3,224,224\]}</code></p></li>
412 /// <li>
413 /// <p>If using the CLI, <code>{\"input_1\":\[1,3,224,224\]}</code></p></li>
414 /// </ul></li>
415 /// <li>
416 /// <p>Examples for two inputs:</p>
417 /// <ul>
418 /// <li>
419 /// <p>If using the console, <code>{"input_1": \[1,3,224,224\], "input_2":\[1,3,224,224\]}</code></p></li>
420 /// <li>
421 /// <p>If using the CLI, <code>{\"input_1\": \[1,3,224,224\], \"input_2\":\[1,3,224,224\]}</code></p></li>
422 /// </ul></li>
423 /// </ul></li>
424 /// <li>
425 /// <p><code>MXNET/ONNX/DARKNET</code>: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
426 /// <ul>
427 /// <li>
428 /// <p>Examples for one input:</p>
429 /// <ul>
430 /// <li>
431 /// <p>If using the console, <code>{"data":\[1,3,1024,1024\]}</code></p></li>
432 /// <li>
433 /// <p>If using the CLI, <code>{\"data\":\[1,3,1024,1024\]}</code></p></li>
434 /// </ul></li>
435 /// <li>
436 /// <p>Examples for two inputs:</p>
437 /// <ul>
438 /// <li>
439 /// <p>If using the console, <code>{"var1": \[1,1,28,28\], "var2":\[1,1,28,28\]}</code></p></li>
440 /// <li>
441 /// <p>If using the CLI, <code>{\"var1\": \[1,1,28,28\], \"var2\":\[1,1,28,28\]}</code></p></li>
442 /// </ul></li>
443 /// </ul></li>
444 /// <li>
445 /// <p><code>PyTorch</code>: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.</p>
446 /// <ul>
447 /// <li>
448 /// <p>Examples for one input in dictionary format:</p>
449 /// <ul>
450 /// <li>
451 /// <p>If using the console, <code>{"input0":\[1,3,224,224\]}</code></p></li>
452 /// <li>
453 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\]}</code></p></li>
454 /// </ul></li>
455 /// <li>
456 /// <p>Example for one input in list format: <code>\[\[1,3,224,224\]\]</code></p></li>
457 /// <li>
458 /// <p>Examples for two inputs in dictionary format:</p>
459 /// <ul>
460 /// <li>
461 /// <p>If using the console, <code>{"input0":\[1,3,224,224\], "input1":\[1,3,224,224\]}</code></p></li>
462 /// <li>
463 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\], \"input1\":\[1,3,224,224\]}</code></p></li>
464 /// </ul></li>
465 /// <li>
466 /// <p>Example for two inputs in list format: <code>\[\[1,3,224,224\], \[1,3,224,224\]\]</code></p></li>
467 /// </ul></li>
468 /// <li>
469 /// <p><code>XGBOOST</code>: input data name and shape are not needed.</p></li>
470 /// </ul>
471 /// <p><code>DataInputConfig</code> supports the following parameters for <code>CoreML</code> <code>TargetDevice</code> (ML Model format):</p>
472 /// <ul>
473 /// <li>
474 /// <p><code>shape</code>: Input shape, for example <code>{"input_1": {"shape": \[1,224,224,3\]}}</code>. In addition to static input shapes, CoreML converter supports Flexible input shapes:</p>
475 /// <ul>
476 /// <li>
477 /// <p>Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific interval in that dimension, for example: <code>{"input_1": {"shape": \["1..10", 224, 224, 3\]}}</code></p></li>
478 /// <li>
479 /// <p>Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate all supported input shapes, for example: <code>{"input_1": {"shape": \[\[1, 224, 224, 3\], \[1, 160, 160, 3\]\]}}</code></p></li>
480 /// </ul></li>
481 /// <li>
482 /// <p><code>default_shape</code>: Default input shape. You can set a default shape during conversion for both Range Dimension and Enumerated Shapes. For example <code>{"input_1": {"shape": \["1..10", 224, 224, 3\], "default_shape": \[1, 224, 224, 3\]}}</code></p></li>
483 /// <li>
484 /// <p><code>type</code>: Input type. Allowed values: <code>Image</code> and <code>Tensor</code>. By default, the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. Image input type requires additional input parameters such as <code>bias</code> and <code>scale</code>.</p></li>
485 /// <li>
486 /// <p><code>bias</code>: If the input type is an Image, you need to provide the bias vector.</p></li>
487 /// <li>
488 /// <p><code>scale</code>: If the input type is an Image, you need to provide a scale factor.</p></li>
489 /// </ul>
490 /// <p>CoreML <code>ClassifierConfig</code> parameters can be specified using <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html">OutputConfig</a> <code>CompilerOptions</code>. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:</p>
491 /// <ul>
492 /// <li>
493 /// <p>Tensor type input:</p>
494 /// <ul>
495 /// <li>
496 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\]}}</code></p></li>
497 /// </ul></li>
498 /// <li>
499 /// <p>Tensor type input without input name (PyTorch):</p>
500 /// <ul>
501 /// <li>
502 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\]}\]</code></p></li>
503 /// </ul></li>
504 /// <li>
505 /// <p>Image type input:</p>
506 /// <ul>
507 /// <li>
508 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}}</code></p></li>
509 /// <li>
510 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
511 /// </ul></li>
512 /// <li>
513 /// <p>Image type input without input name (PyTorch):</p>
514 /// <ul>
515 /// <li>
516 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}\]</code></p></li>
517 /// <li>
518 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
519 /// </ul></li>
520 /// </ul>
521 /// <p>Depending on the model format, <code>DataInputConfig</code> requires the following parameters for <code>ml_eia2</code> <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice">OutputConfig:TargetDevice</a>.</p>
522 /// <ul>
523 /// <li>
524 /// <p>For TensorFlow models saved in the SavedModel format, specify the input names from <code>signature_def_key</code> and the input model shapes for <code>DataInputConfig</code>. Specify the <code>signature_def_key</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a> if the model does not use TensorFlow's default signature def key. For example:</p>
525 /// <ul>
526 /// <li>
527 /// <p><code>"DataInputConfig": {"inputs": \[1, 224, 224, 3\]}</code></p></li>
528 /// <li>
529 /// <p><code>"CompilerOptions": {"signature_def_key": "serving_custom"}</code></p></li>
530 /// </ul></li>
531 /// <li>
532 /// <p>For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in <code>DataInputConfig</code> and the output tensor names for <code>output_names</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a>. For example:</p>
533 /// <ul>
534 /// <li>
535 /// <p><code>"DataInputConfig": {"input_tensor:0": \[1, 224, 224, 3\]}</code></p></li>
536 /// <li>
537 /// <p><code>"CompilerOptions": {"output_names": \["output_tensor:0"\]}</code></p></li>
538 /// </ul></li>
539 /// </ul>
540 pub fn data_input_config(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
541 self.data_input_config = ::std::option::Option::Some(input.into());
542 self
543 }
544 /// <p>Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are <code>Framework</code> specific.</p>
545 /// <ul>
546 /// <li>
547 /// <p><code>TensorFlow</code>: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
548 /// <ul>
549 /// <li>
550 /// <p>Examples for one input:</p>
551 /// <ul>
552 /// <li>
553 /// <p>If using the console, <code>{"input":\[1,1024,1024,3\]}</code></p></li>
554 /// <li>
555 /// <p>If using the CLI, <code>{\"input\":\[1,1024,1024,3\]}</code></p></li>
556 /// </ul></li>
557 /// <li>
558 /// <p>Examples for two inputs:</p>
559 /// <ul>
560 /// <li>
561 /// <p>If using the console, <code>{"data1": \[1,28,28,1\], "data2":\[1,28,28,1\]}</code></p></li>
562 /// <li>
563 /// <p>If using the CLI, <code>{\"data1\": \[1,28,28,1\], \"data2\":\[1,28,28,1\]}</code></p></li>
564 /// </ul></li>
565 /// </ul></li>
566 /// <li>
567 /// <p><code>KERAS</code>: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, <code>DataInputConfig</code> should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.</p>
568 /// <ul>
569 /// <li>
570 /// <p>Examples for one input:</p>
571 /// <ul>
572 /// <li>
573 /// <p>If using the console, <code>{"input_1":\[1,3,224,224\]}</code></p></li>
574 /// <li>
575 /// <p>If using the CLI, <code>{\"input_1\":\[1,3,224,224\]}</code></p></li>
576 /// </ul></li>
577 /// <li>
578 /// <p>Examples for two inputs:</p>
579 /// <ul>
580 /// <li>
581 /// <p>If using the console, <code>{"input_1": \[1,3,224,224\], "input_2":\[1,3,224,224\]}</code></p></li>
582 /// <li>
583 /// <p>If using the CLI, <code>{\"input_1\": \[1,3,224,224\], \"input_2\":\[1,3,224,224\]}</code></p></li>
584 /// </ul></li>
585 /// </ul></li>
586 /// <li>
587 /// <p><code>MXNET/ONNX/DARKNET</code>: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
588 /// <ul>
589 /// <li>
590 /// <p>Examples for one input:</p>
591 /// <ul>
592 /// <li>
593 /// <p>If using the console, <code>{"data":\[1,3,1024,1024\]}</code></p></li>
594 /// <li>
595 /// <p>If using the CLI, <code>{\"data\":\[1,3,1024,1024\]}</code></p></li>
596 /// </ul></li>
597 /// <li>
598 /// <p>Examples for two inputs:</p>
599 /// <ul>
600 /// <li>
601 /// <p>If using the console, <code>{"var1": \[1,1,28,28\], "var2":\[1,1,28,28\]}</code></p></li>
602 /// <li>
603 /// <p>If using the CLI, <code>{\"var1\": \[1,1,28,28\], \"var2\":\[1,1,28,28\]}</code></p></li>
604 /// </ul></li>
605 /// </ul></li>
606 /// <li>
607 /// <p><code>PyTorch</code>: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.</p>
608 /// <ul>
609 /// <li>
610 /// <p>Examples for one input in dictionary format:</p>
611 /// <ul>
612 /// <li>
613 /// <p>If using the console, <code>{"input0":\[1,3,224,224\]}</code></p></li>
614 /// <li>
615 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\]}</code></p></li>
616 /// </ul></li>
617 /// <li>
618 /// <p>Example for one input in list format: <code>\[\[1,3,224,224\]\]</code></p></li>
619 /// <li>
620 /// <p>Examples for two inputs in dictionary format:</p>
621 /// <ul>
622 /// <li>
623 /// <p>If using the console, <code>{"input0":\[1,3,224,224\], "input1":\[1,3,224,224\]}</code></p></li>
624 /// <li>
625 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\], \"input1\":\[1,3,224,224\]}</code></p></li>
626 /// </ul></li>
627 /// <li>
628 /// <p>Example for two inputs in list format: <code>\[\[1,3,224,224\], \[1,3,224,224\]\]</code></p></li>
629 /// </ul></li>
630 /// <li>
631 /// <p><code>XGBOOST</code>: input data name and shape are not needed.</p></li>
632 /// </ul>
633 /// <p><code>DataInputConfig</code> supports the following parameters for <code>CoreML</code> <code>TargetDevice</code> (ML Model format):</p>
634 /// <ul>
635 /// <li>
636 /// <p><code>shape</code>: Input shape, for example <code>{"input_1": {"shape": \[1,224,224,3\]}}</code>. In addition to static input shapes, CoreML converter supports Flexible input shapes:</p>
637 /// <ul>
638 /// <li>
639 /// <p>Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific interval in that dimension, for example: <code>{"input_1": {"shape": \["1..10", 224, 224, 3\]}}</code></p></li>
640 /// <li>
641 /// <p>Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate all supported input shapes, for example: <code>{"input_1": {"shape": \[\[1, 224, 224, 3\], \[1, 160, 160, 3\]\]}}</code></p></li>
642 /// </ul></li>
643 /// <li>
644 /// <p><code>default_shape</code>: Default input shape. You can set a default shape during conversion for both Range Dimension and Enumerated Shapes. For example <code>{"input_1": {"shape": \["1..10", 224, 224, 3\], "default_shape": \[1, 224, 224, 3\]}}</code></p></li>
645 /// <li>
646 /// <p><code>type</code>: Input type. Allowed values: <code>Image</code> and <code>Tensor</code>. By default, the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. Image input type requires additional input parameters such as <code>bias</code> and <code>scale</code>.</p></li>
647 /// <li>
648 /// <p><code>bias</code>: If the input type is an Image, you need to provide the bias vector.</p></li>
649 /// <li>
650 /// <p><code>scale</code>: If the input type is an Image, you need to provide a scale factor.</p></li>
651 /// </ul>
652 /// <p>CoreML <code>ClassifierConfig</code> parameters can be specified using <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html">OutputConfig</a> <code>CompilerOptions</code>. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:</p>
653 /// <ul>
654 /// <li>
655 /// <p>Tensor type input:</p>
656 /// <ul>
657 /// <li>
658 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\]}}</code></p></li>
659 /// </ul></li>
660 /// <li>
661 /// <p>Tensor type input without input name (PyTorch):</p>
662 /// <ul>
663 /// <li>
664 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\]}\]</code></p></li>
665 /// </ul></li>
666 /// <li>
667 /// <p>Image type input:</p>
668 /// <ul>
669 /// <li>
670 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}}</code></p></li>
671 /// <li>
672 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
673 /// </ul></li>
674 /// <li>
675 /// <p>Image type input without input name (PyTorch):</p>
676 /// <ul>
677 /// <li>
678 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}\]</code></p></li>
679 /// <li>
680 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
681 /// </ul></li>
682 /// </ul>
683 /// <p>Depending on the model format, <code>DataInputConfig</code> requires the following parameters for <code>ml_eia2</code> <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice">OutputConfig:TargetDevice</a>.</p>
684 /// <ul>
685 /// <li>
686 /// <p>For TensorFlow models saved in the SavedModel format, specify the input names from <code>signature_def_key</code> and the input model shapes for <code>DataInputConfig</code>. Specify the <code>signature_def_key</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a> if the model does not use TensorFlow's default signature def key. For example:</p>
687 /// <ul>
688 /// <li>
689 /// <p><code>"DataInputConfig": {"inputs": \[1, 224, 224, 3\]}</code></p></li>
690 /// <li>
691 /// <p><code>"CompilerOptions": {"signature_def_key": "serving_custom"}</code></p></li>
692 /// </ul></li>
693 /// <li>
694 /// <p>For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in <code>DataInputConfig</code> and the output tensor names for <code>output_names</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a>. For example:</p>
695 /// <ul>
696 /// <li>
697 /// <p><code>"DataInputConfig": {"input_tensor:0": \[1, 224, 224, 3\]}</code></p></li>
698 /// <li>
699 /// <p><code>"CompilerOptions": {"output_names": \["output_tensor:0"\]}</code></p></li>
700 /// </ul></li>
701 /// </ul>
702 pub fn set_data_input_config(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
703 self.data_input_config = input;
704 self
705 }
706 /// <p>Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are <code>Framework</code> specific.</p>
707 /// <ul>
708 /// <li>
709 /// <p><code>TensorFlow</code>: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
710 /// <ul>
711 /// <li>
712 /// <p>Examples for one input:</p>
713 /// <ul>
714 /// <li>
715 /// <p>If using the console, <code>{"input":\[1,1024,1024,3\]}</code></p></li>
716 /// <li>
717 /// <p>If using the CLI, <code>{\"input\":\[1,1024,1024,3\]}</code></p></li>
718 /// </ul></li>
719 /// <li>
720 /// <p>Examples for two inputs:</p>
721 /// <ul>
722 /// <li>
723 /// <p>If using the console, <code>{"data1": \[1,28,28,1\], "data2":\[1,28,28,1\]}</code></p></li>
724 /// <li>
725 /// <p>If using the CLI, <code>{\"data1\": \[1,28,28,1\], \"data2\":\[1,28,28,1\]}</code></p></li>
726 /// </ul></li>
727 /// </ul></li>
728 /// <li>
729 /// <p><code>KERAS</code>: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, <code>DataInputConfig</code> should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.</p>
730 /// <ul>
731 /// <li>
732 /// <p>Examples for one input:</p>
733 /// <ul>
734 /// <li>
735 /// <p>If using the console, <code>{"input_1":\[1,3,224,224\]}</code></p></li>
736 /// <li>
737 /// <p>If using the CLI, <code>{\"input_1\":\[1,3,224,224\]}</code></p></li>
738 /// </ul></li>
739 /// <li>
740 /// <p>Examples for two inputs:</p>
741 /// <ul>
742 /// <li>
743 /// <p>If using the console, <code>{"input_1": \[1,3,224,224\], "input_2":\[1,3,224,224\]}</code></p></li>
744 /// <li>
745 /// <p>If using the CLI, <code>{\"input_1\": \[1,3,224,224\], \"input_2\":\[1,3,224,224\]}</code></p></li>
746 /// </ul></li>
747 /// </ul></li>
748 /// <li>
749 /// <p><code>MXNET/ONNX/DARKNET</code>: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.</p>
750 /// <ul>
751 /// <li>
752 /// <p>Examples for one input:</p>
753 /// <ul>
754 /// <li>
755 /// <p>If using the console, <code>{"data":\[1,3,1024,1024\]}</code></p></li>
756 /// <li>
757 /// <p>If using the CLI, <code>{\"data\":\[1,3,1024,1024\]}</code></p></li>
758 /// </ul></li>
759 /// <li>
760 /// <p>Examples for two inputs:</p>
761 /// <ul>
762 /// <li>
763 /// <p>If using the console, <code>{"var1": \[1,1,28,28\], "var2":\[1,1,28,28\]}</code></p></li>
764 /// <li>
765 /// <p>If using the CLI, <code>{\"var1\": \[1,1,28,28\], \"var2\":\[1,1,28,28\]}</code></p></li>
766 /// </ul></li>
767 /// </ul></li>
768 /// <li>
769 /// <p><code>PyTorch</code>: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.</p>
770 /// <ul>
771 /// <li>
772 /// <p>Examples for one input in dictionary format:</p>
773 /// <ul>
774 /// <li>
775 /// <p>If using the console, <code>{"input0":\[1,3,224,224\]}</code></p></li>
776 /// <li>
777 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\]}</code></p></li>
778 /// </ul></li>
779 /// <li>
780 /// <p>Example for one input in list format: <code>\[\[1,3,224,224\]\]</code></p></li>
781 /// <li>
782 /// <p>Examples for two inputs in dictionary format:</p>
783 /// <ul>
784 /// <li>
785 /// <p>If using the console, <code>{"input0":\[1,3,224,224\], "input1":\[1,3,224,224\]}</code></p></li>
786 /// <li>
787 /// <p>If using the CLI, <code>{\"input0\":\[1,3,224,224\], \"input1\":\[1,3,224,224\]}</code></p></li>
788 /// </ul></li>
789 /// <li>
790 /// <p>Example for two inputs in list format: <code>\[\[1,3,224,224\], \[1,3,224,224\]\]</code></p></li>
791 /// </ul></li>
792 /// <li>
793 /// <p><code>XGBOOST</code>: input data name and shape are not needed.</p></li>
794 /// </ul>
795 /// <p><code>DataInputConfig</code> supports the following parameters for <code>CoreML</code> <code>TargetDevice</code> (ML Model format):</p>
796 /// <ul>
797 /// <li>
798 /// <p><code>shape</code>: Input shape, for example <code>{"input_1": {"shape": \[1,224,224,3\]}}</code>. In addition to static input shapes, CoreML converter supports Flexible input shapes:</p>
799 /// <ul>
800 /// <li>
801 /// <p>Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific interval in that dimension, for example: <code>{"input_1": {"shape": \["1..10", 224, 224, 3\]}}</code></p></li>
802 /// <li>
803 /// <p>Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate all supported input shapes, for example: <code>{"input_1": {"shape": \[\[1, 224, 224, 3\], \[1, 160, 160, 3\]\]}}</code></p></li>
804 /// </ul></li>
805 /// <li>
806 /// <p><code>default_shape</code>: Default input shape. You can set a default shape during conversion for both Range Dimension and Enumerated Shapes. For example <code>{"input_1": {"shape": \["1..10", 224, 224, 3\], "default_shape": \[1, 224, 224, 3\]}}</code></p></li>
807 /// <li>
808 /// <p><code>type</code>: Input type. Allowed values: <code>Image</code> and <code>Tensor</code>. By default, the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. Image input type requires additional input parameters such as <code>bias</code> and <code>scale</code>.</p></li>
809 /// <li>
810 /// <p><code>bias</code>: If the input type is an Image, you need to provide the bias vector.</p></li>
811 /// <li>
812 /// <p><code>scale</code>: If the input type is an Image, you need to provide a scale factor.</p></li>
813 /// </ul>
814 /// <p>CoreML <code>ClassifierConfig</code> parameters can be specified using <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html">OutputConfig</a> <code>CompilerOptions</code>. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:</p>
815 /// <ul>
816 /// <li>
817 /// <p>Tensor type input:</p>
818 /// <ul>
819 /// <li>
820 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\]}}</code></p></li>
821 /// </ul></li>
822 /// <li>
823 /// <p>Tensor type input without input name (PyTorch):</p>
824 /// <ul>
825 /// <li>
826 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\]}\]</code></p></li>
827 /// </ul></li>
828 /// <li>
829 /// <p>Image type input:</p>
830 /// <ul>
831 /// <li>
832 /// <p><code>"DataInputConfig": {"input_1": {"shape": \[\[1,224,224,3\], \[1,160,160,3\]\], "default_shape": \[1,224,224,3\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}}</code></p></li>
833 /// <li>
834 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
835 /// </ul></li>
836 /// <li>
837 /// <p>Image type input without input name (PyTorch):</p>
838 /// <ul>
839 /// <li>
840 /// <p><code>"DataInputConfig": \[{"shape": \[\[1,3,224,224\], \[1,3,160,160\]\], "default_shape": \[1,3,224,224\], "type": "Image", "bias": \[-1,-1,-1\], "scale": 0.007843137255}\]</code></p></li>
841 /// <li>
842 /// <p><code>"CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"}</code></p></li>
843 /// </ul></li>
844 /// </ul>
845 /// <p>Depending on the model format, <code>DataInputConfig</code> requires the following parameters for <code>ml_eia2</code> <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice">OutputConfig:TargetDevice</a>.</p>
846 /// <ul>
847 /// <li>
848 /// <p>For TensorFlow models saved in the SavedModel format, specify the input names from <code>signature_def_key</code> and the input model shapes for <code>DataInputConfig</code>. Specify the <code>signature_def_key</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a> if the model does not use TensorFlow's default signature def key. For example:</p>
849 /// <ul>
850 /// <li>
851 /// <p><code>"DataInputConfig": {"inputs": \[1, 224, 224, 3\]}</code></p></li>
852 /// <li>
853 /// <p><code>"CompilerOptions": {"signature_def_key": "serving_custom"}</code></p></li>
854 /// </ul></li>
855 /// <li>
856 /// <p>For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in <code>DataInputConfig</code> and the output tensor names for <code>output_names</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions"> <code>OutputConfig:CompilerOptions</code> </a>. For example:</p>
857 /// <ul>
858 /// <li>
859 /// <p><code>"DataInputConfig": {"input_tensor:0": \[1, 224, 224, 3\]}</code></p></li>
860 /// <li>
861 /// <p><code>"CompilerOptions": {"output_names": \["output_tensor:0"\]}</code></p></li>
862 /// </ul></li>
863 /// </ul>
864 pub fn get_data_input_config(&self) -> &::std::option::Option<::std::string::String> {
865 &self.data_input_config
866 }
867 /// <p>Identifies the framework in which the model was trained. For example: TENSORFLOW.</p>
868 /// This field is required.
869 pub fn framework(mut self, input: crate::types::Framework) -> Self {
870 self.framework = ::std::option::Option::Some(input);
871 self
872 }
873 /// <p>Identifies the framework in which the model was trained. For example: TENSORFLOW.</p>
874 pub fn set_framework(mut self, input: ::std::option::Option<crate::types::Framework>) -> Self {
875 self.framework = input;
876 self
877 }
878 /// <p>Identifies the framework in which the model was trained. For example: TENSORFLOW.</p>
879 pub fn get_framework(&self) -> &::std::option::Option<crate::types::Framework> {
880 &self.framework
881 }
882 /// <p>Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.</p>
883 /// <p>For information about framework versions supported for cloud targets and edge devices, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html">Cloud Supported Instance Types and Frameworks</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html">Edge Supported Frameworks</a>.</p>
884 pub fn framework_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
885 self.framework_version = ::std::option::Option::Some(input.into());
886 self
887 }
888 /// <p>Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.</p>
889 /// <p>For information about framework versions supported for cloud targets and edge devices, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html">Cloud Supported Instance Types and Frameworks</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html">Edge Supported Frameworks</a>.</p>
890 pub fn set_framework_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
891 self.framework_version = input;
892 self
893 }
894 /// <p>Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.</p>
895 /// <p>For information about framework versions supported for cloud targets and edge devices, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html">Cloud Supported Instance Types and Frameworks</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html">Edge Supported Frameworks</a>.</p>
896 pub fn get_framework_version(&self) -> &::std::option::Option<::std::string::String> {
897 &self.framework_version
898 }
899 /// Consumes the builder and constructs a [`InputConfig`](crate::types::InputConfig).
900 pub fn build(self) -> crate::types::InputConfig {
901 crate::types::InputConfig {
902 s3_uri: self.s3_uri,
903 data_input_config: self.data_input_config,
904 framework: self.framework,
905 framework_version: self.framework_version,
906 }
907 }
908}