aws_sdk_sagemaker/operation/create_auto_ml_job_v2/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::create_auto_ml_job_v2::_create_auto_ml_job_v2_output::CreateAutoMlJobV2OutputBuilder;
3
4pub use crate::operation::create_auto_ml_job_v2::_create_auto_ml_job_v2_input::CreateAutoMlJobV2InputBuilder;
5
6impl crate::operation::create_auto_ml_job_v2::builders::CreateAutoMlJobV2InputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::create_auto_ml_job_v2::CreateAutoMlJobV2Output,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::create_auto_ml_job_v2::CreateAutoMLJobV2Error,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.create_auto_ml_job_v2();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `CreateAutoMLJobV2`.
24///
25/// <p>Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.</p>
26/// <p>An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment.</p>
27/// <p>For more information about AutoML jobs, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html">https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html</a> in the SageMaker AI developer guide.</p>
28/// <p>AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation.</p><note>
29/// <p><a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJobV2.html">CreateAutoMLJobV2</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJobV2.html">DescribeAutoMLJobV2</a> are new versions of <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJob.html">CreateAutoMLJob</a> and <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJob.html">DescribeAutoMLJob</a> which offer backward compatibility.</p>
30/// <p><code>CreateAutoMLJobV2</code> can manage tabular problem types identical to those of its previous version <code>CreateAutoMLJob</code>, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning).</p>
31/// <p>Find guidelines about how to migrate a <code>CreateAutoMLJob</code> to <code>CreateAutoMLJobV2</code> in <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development-create-experiment.html#autopilot-create-experiment-api-migrate-v1-v2">Migrate a CreateAutoMLJob to CreateAutoMLJobV2</a>.</p>
32/// </note>
33/// <p>For the list of available problem types supported by <code>CreateAutoMLJobV2</code>, see <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLProblemTypeConfig.html">AutoMLProblemTypeConfig</a>.</p>
34/// <p>You can find the best-performing model after you run an AutoML job V2 by calling <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeAutoMLJobV2.html">DescribeAutoMLJobV2</a>.</p>
35#[derive(::std::clone::Clone, ::std::fmt::Debug)]
36pub struct CreateAutoMLJobV2FluentBuilder {
37    handle: ::std::sync::Arc<crate::client::Handle>,
38    inner: crate::operation::create_auto_ml_job_v2::builders::CreateAutoMlJobV2InputBuilder,
39    config_override: ::std::option::Option<crate::config::Builder>,
40}
41impl
42    crate::client::customize::internal::CustomizableSend<
43        crate::operation::create_auto_ml_job_v2::CreateAutoMlJobV2Output,
44        crate::operation::create_auto_ml_job_v2::CreateAutoMLJobV2Error,
45    > for CreateAutoMLJobV2FluentBuilder
46{
47    fn send(
48        self,
49        config_override: crate::config::Builder,
50    ) -> crate::client::customize::internal::BoxFuture<
51        crate::client::customize::internal::SendResult<
52            crate::operation::create_auto_ml_job_v2::CreateAutoMlJobV2Output,
53            crate::operation::create_auto_ml_job_v2::CreateAutoMLJobV2Error,
54        >,
55    > {
56        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
57    }
58}
59impl CreateAutoMLJobV2FluentBuilder {
60    /// Creates a new `CreateAutoMLJobV2FluentBuilder`.
61    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
62        Self {
63            handle,
64            inner: ::std::default::Default::default(),
65            config_override: ::std::option::Option::None,
66        }
67    }
68    /// Access the CreateAutoMLJobV2 as a reference.
69    pub fn as_input(&self) -> &crate::operation::create_auto_ml_job_v2::builders::CreateAutoMlJobV2InputBuilder {
70        &self.inner
71    }
72    /// Sends the request and returns the response.
73    ///
74    /// If an error occurs, an `SdkError` will be returned with additional details that
75    /// can be matched against.
76    ///
77    /// By default, any retryable failures will be retried twice. Retry behavior
78    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
79    /// set when configuring the client.
80    pub async fn send(
81        self,
82    ) -> ::std::result::Result<
83        crate::operation::create_auto_ml_job_v2::CreateAutoMlJobV2Output,
84        ::aws_smithy_runtime_api::client::result::SdkError<
85            crate::operation::create_auto_ml_job_v2::CreateAutoMLJobV2Error,
86            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
87        >,
88    > {
89        let input = self
90            .inner
91            .build()
92            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
93        let runtime_plugins = crate::operation::create_auto_ml_job_v2::CreateAutoMLJobV2::operation_runtime_plugins(
94            self.handle.runtime_plugins.clone(),
95            &self.handle.conf,
96            self.config_override,
97        );
98        crate::operation::create_auto_ml_job_v2::CreateAutoMLJobV2::orchestrate(&runtime_plugins, input).await
99    }
100
101    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
102    pub fn customize(
103        self,
104    ) -> crate::client::customize::CustomizableOperation<
105        crate::operation::create_auto_ml_job_v2::CreateAutoMlJobV2Output,
106        crate::operation::create_auto_ml_job_v2::CreateAutoMLJobV2Error,
107        Self,
108    > {
109        crate::client::customize::CustomizableOperation::new(self)
110    }
111    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
112        self.set_config_override(::std::option::Option::Some(config_override.into()));
113        self
114    }
115
116    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
117        self.config_override = config_override;
118        self
119    }
120    /// <p>Identifies an Autopilot job. The name must be unique to your account and is case insensitive.</p>
121    pub fn auto_ml_job_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
122        self.inner = self.inner.auto_ml_job_name(input.into());
123        self
124    }
125    /// <p>Identifies an Autopilot job. The name must be unique to your account and is case insensitive.</p>
126    pub fn set_auto_ml_job_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
127        self.inner = self.inner.set_auto_ml_job_name(input);
128        self
129    }
130    /// <p>Identifies an Autopilot job. The name must be unique to your account and is case insensitive.</p>
131    pub fn get_auto_ml_job_name(&self) -> &::std::option::Option<::std::string::String> {
132        self.inner.get_auto_ml_job_name()
133    }
134    ///
135    /// Appends an item to `AutoMLJobInputDataConfig`.
136    ///
137    /// To override the contents of this collection use [`set_auto_ml_job_input_data_config`](Self::set_auto_ml_job_input_data_config).
138    ///
139    /// <p>An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJob.html#sagemaker-CreateAutoMLJob-request-InputDataConfig">InputDataConfig</a> attribute in the <code>CreateAutoMLJob</code> input parameters. The supported formats depend on the problem type:</p>
140    /// <ul>
141    /// <li>
142    /// <p>For tabular problem types: <code>S3Prefix</code>, <code>ManifestFile</code>.</p></li>
143    /// <li>
144    /// <p>For image classification: <code>S3Prefix</code>, <code>ManifestFile</code>, <code>AugmentedManifestFile</code>.</p></li>
145    /// <li>
146    /// <p>For text classification: <code>S3Prefix</code>.</p></li>
147    /// <li>
148    /// <p>For time-series forecasting: <code>S3Prefix</code>.</p></li>
149    /// <li>
150    /// <p>For text generation (LLMs fine-tuning): <code>S3Prefix</code>.</p></li>
151    /// </ul>
152    pub fn auto_ml_job_input_data_config(mut self, input: crate::types::AutoMlJobChannel) -> Self {
153        self.inner = self.inner.auto_ml_job_input_data_config(input);
154        self
155    }
156    /// <p>An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJob.html#sagemaker-CreateAutoMLJob-request-InputDataConfig">InputDataConfig</a> attribute in the <code>CreateAutoMLJob</code> input parameters. The supported formats depend on the problem type:</p>
157    /// <ul>
158    /// <li>
159    /// <p>For tabular problem types: <code>S3Prefix</code>, <code>ManifestFile</code>.</p></li>
160    /// <li>
161    /// <p>For image classification: <code>S3Prefix</code>, <code>ManifestFile</code>, <code>AugmentedManifestFile</code>.</p></li>
162    /// <li>
163    /// <p>For text classification: <code>S3Prefix</code>.</p></li>
164    /// <li>
165    /// <p>For time-series forecasting: <code>S3Prefix</code>.</p></li>
166    /// <li>
167    /// <p>For text generation (LLMs fine-tuning): <code>S3Prefix</code>.</p></li>
168    /// </ul>
169    pub fn set_auto_ml_job_input_data_config(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::AutoMlJobChannel>>) -> Self {
170        self.inner = self.inner.set_auto_ml_job_input_data_config(input);
171        self
172    }
173    /// <p>An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateAutoMLJob.html#sagemaker-CreateAutoMLJob-request-InputDataConfig">InputDataConfig</a> attribute in the <code>CreateAutoMLJob</code> input parameters. The supported formats depend on the problem type:</p>
174    /// <ul>
175    /// <li>
176    /// <p>For tabular problem types: <code>S3Prefix</code>, <code>ManifestFile</code>.</p></li>
177    /// <li>
178    /// <p>For image classification: <code>S3Prefix</code>, <code>ManifestFile</code>, <code>AugmentedManifestFile</code>.</p></li>
179    /// <li>
180    /// <p>For text classification: <code>S3Prefix</code>.</p></li>
181    /// <li>
182    /// <p>For time-series forecasting: <code>S3Prefix</code>.</p></li>
183    /// <li>
184    /// <p>For text generation (LLMs fine-tuning): <code>S3Prefix</code>.</p></li>
185    /// </ul>
186    pub fn get_auto_ml_job_input_data_config(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::AutoMlJobChannel>> {
187        self.inner.get_auto_ml_job_input_data_config()
188    }
189    /// <p>Provides information about encryption and the Amazon S3 output path needed to store artifacts from an AutoML job.</p>
190    pub fn output_data_config(mut self, input: crate::types::AutoMlOutputDataConfig) -> Self {
191        self.inner = self.inner.output_data_config(input);
192        self
193    }
194    /// <p>Provides information about encryption and the Amazon S3 output path needed to store artifacts from an AutoML job.</p>
195    pub fn set_output_data_config(mut self, input: ::std::option::Option<crate::types::AutoMlOutputDataConfig>) -> Self {
196        self.inner = self.inner.set_output_data_config(input);
197        self
198    }
199    /// <p>Provides information about encryption and the Amazon S3 output path needed to store artifacts from an AutoML job.</p>
200    pub fn get_output_data_config(&self) -> &::std::option::Option<crate::types::AutoMlOutputDataConfig> {
201        self.inner.get_output_data_config()
202    }
203    /// <p>Defines the configuration settings of one of the supported problem types.</p>
204    pub fn auto_ml_problem_type_config(mut self, input: crate::types::AutoMlProblemTypeConfig) -> Self {
205        self.inner = self.inner.auto_ml_problem_type_config(input);
206        self
207    }
208    /// <p>Defines the configuration settings of one of the supported problem types.</p>
209    pub fn set_auto_ml_problem_type_config(mut self, input: ::std::option::Option<crate::types::AutoMlProblemTypeConfig>) -> Self {
210        self.inner = self.inner.set_auto_ml_problem_type_config(input);
211        self
212    }
213    /// <p>Defines the configuration settings of one of the supported problem types.</p>
214    pub fn get_auto_ml_problem_type_config(&self) -> &::std::option::Option<crate::types::AutoMlProblemTypeConfig> {
215        self.inner.get_auto_ml_problem_type_config()
216    }
217    /// <p>The ARN of the role that is used to access the data.</p>
218    pub fn role_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
219        self.inner = self.inner.role_arn(input.into());
220        self
221    }
222    /// <p>The ARN of the role that is used to access the data.</p>
223    pub fn set_role_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
224        self.inner = self.inner.set_role_arn(input);
225        self
226    }
227    /// <p>The ARN of the role that is used to access the data.</p>
228    pub fn get_role_arn(&self) -> &::std::option::Option<::std::string::String> {
229        self.inner.get_role_arn()
230    }
231    ///
232    /// Appends an item to `Tags`.
233    ///
234    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
235    ///
236    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, such as by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web ServicesResources</a>. Tag keys must be unique per resource.</p>
237    pub fn tags(mut self, input: crate::types::Tag) -> Self {
238        self.inner = self.inner.tags(input);
239        self
240    }
241    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, such as by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web ServicesResources</a>. Tag keys must be unique per resource.</p>
242    pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
243        self.inner = self.inner.set_tags(input);
244        self
245    }
246    /// <p>An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, such as by purpose, owner, or environment. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web ServicesResources</a>. Tag keys must be unique per resource.</p>
247    pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
248        self.inner.get_tags()
249    }
250    /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
251    pub fn security_config(mut self, input: crate::types::AutoMlSecurityConfig) -> Self {
252        self.inner = self.inner.security_config(input);
253        self
254    }
255    /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
256    pub fn set_security_config(mut self, input: ::std::option::Option<crate::types::AutoMlSecurityConfig>) -> Self {
257        self.inner = self.inner.set_security_config(input);
258        self
259    }
260    /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
261    pub fn get_security_config(&self) -> &::std::option::Option<crate::types::AutoMlSecurityConfig> {
262        self.inner.get_security_config()
263    }
264    /// <p>Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLJobObjective.html">AutoMLJobObjective</a>.</p><note>
265    /// <ul>
266    /// <li>
267    /// <p>For tabular problem types: You must either provide both the <code>AutoMLJobObjective</code> and indicate the type of supervised learning problem in <code>AutoMLProblemTypeConfig</code> (<code>TabularJobConfig.ProblemType</code>), or none at all.</p></li>
268    /// <li>
269    /// <p>For text generation problem types (LLMs fine-tuning): Fine-tuning language models in Autopilot does not require setting the <code>AutoMLJobObjective</code> field. Autopilot fine-tunes LLMs without requiring multiple candidates to be trained and evaluated. Instead, using your dataset, Autopilot directly fine-tunes your target model to enhance a default objective metric, the cross-entropy loss. After fine-tuning a language model, you can evaluate the quality of its generated text using different metrics. For a list of the available metrics, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-llms-finetuning-metrics.html">Metrics for fine-tuning LLMs in Autopilot</a>.</p></li>
270    /// </ul>
271    /// </note>
272    pub fn auto_ml_job_objective(mut self, input: crate::types::AutoMlJobObjective) -> Self {
273        self.inner = self.inner.auto_ml_job_objective(input);
274        self
275    }
276    /// <p>Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLJobObjective.html">AutoMLJobObjective</a>.</p><note>
277    /// <ul>
278    /// <li>
279    /// <p>For tabular problem types: You must either provide both the <code>AutoMLJobObjective</code> and indicate the type of supervised learning problem in <code>AutoMLProblemTypeConfig</code> (<code>TabularJobConfig.ProblemType</code>), or none at all.</p></li>
280    /// <li>
281    /// <p>For text generation problem types (LLMs fine-tuning): Fine-tuning language models in Autopilot does not require setting the <code>AutoMLJobObjective</code> field. Autopilot fine-tunes LLMs without requiring multiple candidates to be trained and evaluated. Instead, using your dataset, Autopilot directly fine-tunes your target model to enhance a default objective metric, the cross-entropy loss. After fine-tuning a language model, you can evaluate the quality of its generated text using different metrics. For a list of the available metrics, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-llms-finetuning-metrics.html">Metrics for fine-tuning LLMs in Autopilot</a>.</p></li>
282    /// </ul>
283    /// </note>
284    pub fn set_auto_ml_job_objective(mut self, input: ::std::option::Option<crate::types::AutoMlJobObjective>) -> Self {
285        self.inner = self.inner.set_auto_ml_job_objective(input);
286        self
287    }
288    /// <p>Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see <a href="https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AutoMLJobObjective.html">AutoMLJobObjective</a>.</p><note>
289    /// <ul>
290    /// <li>
291    /// <p>For tabular problem types: You must either provide both the <code>AutoMLJobObjective</code> and indicate the type of supervised learning problem in <code>AutoMLProblemTypeConfig</code> (<code>TabularJobConfig.ProblemType</code>), or none at all.</p></li>
292    /// <li>
293    /// <p>For text generation problem types (LLMs fine-tuning): Fine-tuning language models in Autopilot does not require setting the <code>AutoMLJobObjective</code> field. Autopilot fine-tunes LLMs without requiring multiple candidates to be trained and evaluated. Instead, using your dataset, Autopilot directly fine-tunes your target model to enhance a default objective metric, the cross-entropy loss. After fine-tuning a language model, you can evaluate the quality of its generated text using different metrics. For a list of the available metrics, see <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-llms-finetuning-metrics.html">Metrics for fine-tuning LLMs in Autopilot</a>.</p></li>
294    /// </ul>
295    /// </note>
296    pub fn get_auto_ml_job_objective(&self) -> &::std::option::Option<crate::types::AutoMlJobObjective> {
297        self.inner.get_auto_ml_job_objective()
298    }
299    /// <p>Specifies how to generate the endpoint name for an automatic one-click Autopilot model deployment.</p>
300    pub fn model_deploy_config(mut self, input: crate::types::ModelDeployConfig) -> Self {
301        self.inner = self.inner.model_deploy_config(input);
302        self
303    }
304    /// <p>Specifies how to generate the endpoint name for an automatic one-click Autopilot model deployment.</p>
305    pub fn set_model_deploy_config(mut self, input: ::std::option::Option<crate::types::ModelDeployConfig>) -> Self {
306        self.inner = self.inner.set_model_deploy_config(input);
307        self
308    }
309    /// <p>Specifies how to generate the endpoint name for an automatic one-click Autopilot model deployment.</p>
310    pub fn get_model_deploy_config(&self) -> &::std::option::Option<crate::types::ModelDeployConfig> {
311        self.inner.get_model_deploy_config()
312    }
313    /// <p>This structure specifies how to split the data into train and validation datasets.</p>
314    /// <p>The validation and training datasets must contain the same headers. For jobs created by calling <code>CreateAutoMLJob</code>, the validation dataset must be less than 2 GB in size.</p><note>
315    /// <p>This attribute must not be set for the time-series forecasting problem type, as Autopilot automatically splits the input dataset into training and validation sets.</p>
316    /// </note>
317    pub fn data_split_config(mut self, input: crate::types::AutoMlDataSplitConfig) -> Self {
318        self.inner = self.inner.data_split_config(input);
319        self
320    }
321    /// <p>This structure specifies how to split the data into train and validation datasets.</p>
322    /// <p>The validation and training datasets must contain the same headers. For jobs created by calling <code>CreateAutoMLJob</code>, the validation dataset must be less than 2 GB in size.</p><note>
323    /// <p>This attribute must not be set for the time-series forecasting problem type, as Autopilot automatically splits the input dataset into training and validation sets.</p>
324    /// </note>
325    pub fn set_data_split_config(mut self, input: ::std::option::Option<crate::types::AutoMlDataSplitConfig>) -> Self {
326        self.inner = self.inner.set_data_split_config(input);
327        self
328    }
329    /// <p>This structure specifies how to split the data into train and validation datasets.</p>
330    /// <p>The validation and training datasets must contain the same headers. For jobs created by calling <code>CreateAutoMLJob</code>, the validation dataset must be less than 2 GB in size.</p><note>
331    /// <p>This attribute must not be set for the time-series forecasting problem type, as Autopilot automatically splits the input dataset into training and validation sets.</p>
332    /// </note>
333    pub fn get_data_split_config(&self) -> &::std::option::Option<crate::types::AutoMlDataSplitConfig> {
334        self.inner.get_data_split_config()
335    }
336    /// <p>Specifies the compute configuration for the AutoML job V2.</p>
337    pub fn auto_ml_compute_config(mut self, input: crate::types::AutoMlComputeConfig) -> Self {
338        self.inner = self.inner.auto_ml_compute_config(input);
339        self
340    }
341    /// <p>Specifies the compute configuration for the AutoML job V2.</p>
342    pub fn set_auto_ml_compute_config(mut self, input: ::std::option::Option<crate::types::AutoMlComputeConfig>) -> Self {
343        self.inner = self.inner.set_auto_ml_compute_config(input);
344        self
345    }
346    /// <p>Specifies the compute configuration for the AutoML job V2.</p>
347    pub fn get_auto_ml_compute_config(&self) -> &::std::option::Option<crate::types::AutoMlComputeConfig> {
348        self.inner.get_auto_ml_compute_config()
349    }
350}