aws_sdk_sagemaker/types/_auto_ml_job_config.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2
3/// <p>A collection of settings used for an AutoML job.</p>
4#[non_exhaustive]
5#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
6pub struct AutoMlJobConfig {
7 /// <p>How long an AutoML job is allowed to run, or how many candidates a job is allowed to generate.</p>
8 pub completion_criteria: ::std::option::Option<crate::types::AutoMlJobCompletionCriteria>,
9 /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
10 pub security_config: ::std::option::Option<crate::types::AutoMlSecurityConfig>,
11 /// <p>The configuration for generating a candidate for an AutoML job (optional).</p>
12 pub candidate_generation_config: ::std::option::Option<crate::types::AutoMlCandidateGenerationConfig>,
13 /// <p>The configuration for splitting the input training dataset.</p>
14 /// <p>Type: AutoMLDataSplitConfig</p>
15 pub data_split_config: ::std::option::Option<crate::types::AutoMlDataSplitConfig>,
16 /// <p>The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting <code>AUTO</code>. In <code>AUTO</code> mode, Autopilot chooses <code>ENSEMBLING</code> for datasets smaller than 100 MB, and <code>HYPERPARAMETER_TUNING</code> for larger ones.</p>
17 /// <p>The <code>ENSEMBLING</code> mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>ENSEMBLING</code> mode.</p>
18 /// <p>The <code>HYPERPARAMETER_TUNING</code> (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>HYPERPARAMETER_TUNING</code> mode.</p>
19 pub mode: ::std::option::Option<crate::types::AutoMlMode>,
20}
21impl AutoMlJobConfig {
22 /// <p>How long an AutoML job is allowed to run, or how many candidates a job is allowed to generate.</p>
23 pub fn completion_criteria(&self) -> ::std::option::Option<&crate::types::AutoMlJobCompletionCriteria> {
24 self.completion_criteria.as_ref()
25 }
26 /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
27 pub fn security_config(&self) -> ::std::option::Option<&crate::types::AutoMlSecurityConfig> {
28 self.security_config.as_ref()
29 }
30 /// <p>The configuration for generating a candidate for an AutoML job (optional).</p>
31 pub fn candidate_generation_config(&self) -> ::std::option::Option<&crate::types::AutoMlCandidateGenerationConfig> {
32 self.candidate_generation_config.as_ref()
33 }
34 /// <p>The configuration for splitting the input training dataset.</p>
35 /// <p>Type: AutoMLDataSplitConfig</p>
36 pub fn data_split_config(&self) -> ::std::option::Option<&crate::types::AutoMlDataSplitConfig> {
37 self.data_split_config.as_ref()
38 }
39 /// <p>The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting <code>AUTO</code>. In <code>AUTO</code> mode, Autopilot chooses <code>ENSEMBLING</code> for datasets smaller than 100 MB, and <code>HYPERPARAMETER_TUNING</code> for larger ones.</p>
40 /// <p>The <code>ENSEMBLING</code> mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>ENSEMBLING</code> mode.</p>
41 /// <p>The <code>HYPERPARAMETER_TUNING</code> (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>HYPERPARAMETER_TUNING</code> mode.</p>
42 pub fn mode(&self) -> ::std::option::Option<&crate::types::AutoMlMode> {
43 self.mode.as_ref()
44 }
45}
46impl AutoMlJobConfig {
47 /// Creates a new builder-style object to manufacture [`AutoMlJobConfig`](crate::types::AutoMlJobConfig).
48 pub fn builder() -> crate::types::builders::AutoMlJobConfigBuilder {
49 crate::types::builders::AutoMlJobConfigBuilder::default()
50 }
51}
52
53/// A builder for [`AutoMlJobConfig`](crate::types::AutoMlJobConfig).
54#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
55#[non_exhaustive]
56pub struct AutoMlJobConfigBuilder {
57 pub(crate) completion_criteria: ::std::option::Option<crate::types::AutoMlJobCompletionCriteria>,
58 pub(crate) security_config: ::std::option::Option<crate::types::AutoMlSecurityConfig>,
59 pub(crate) candidate_generation_config: ::std::option::Option<crate::types::AutoMlCandidateGenerationConfig>,
60 pub(crate) data_split_config: ::std::option::Option<crate::types::AutoMlDataSplitConfig>,
61 pub(crate) mode: ::std::option::Option<crate::types::AutoMlMode>,
62}
63impl AutoMlJobConfigBuilder {
64 /// <p>How long an AutoML job is allowed to run, or how many candidates a job is allowed to generate.</p>
65 pub fn completion_criteria(mut self, input: crate::types::AutoMlJobCompletionCriteria) -> Self {
66 self.completion_criteria = ::std::option::Option::Some(input);
67 self
68 }
69 /// <p>How long an AutoML job is allowed to run, or how many candidates a job is allowed to generate.</p>
70 pub fn set_completion_criteria(mut self, input: ::std::option::Option<crate::types::AutoMlJobCompletionCriteria>) -> Self {
71 self.completion_criteria = input;
72 self
73 }
74 /// <p>How long an AutoML job is allowed to run, or how many candidates a job is allowed to generate.</p>
75 pub fn get_completion_criteria(&self) -> &::std::option::Option<crate::types::AutoMlJobCompletionCriteria> {
76 &self.completion_criteria
77 }
78 /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
79 pub fn security_config(mut self, input: crate::types::AutoMlSecurityConfig) -> Self {
80 self.security_config = ::std::option::Option::Some(input);
81 self
82 }
83 /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
84 pub fn set_security_config(mut self, input: ::std::option::Option<crate::types::AutoMlSecurityConfig>) -> Self {
85 self.security_config = input;
86 self
87 }
88 /// <p>The security configuration for traffic encryption or Amazon VPC settings.</p>
89 pub fn get_security_config(&self) -> &::std::option::Option<crate::types::AutoMlSecurityConfig> {
90 &self.security_config
91 }
92 /// <p>The configuration for generating a candidate for an AutoML job (optional).</p>
93 pub fn candidate_generation_config(mut self, input: crate::types::AutoMlCandidateGenerationConfig) -> Self {
94 self.candidate_generation_config = ::std::option::Option::Some(input);
95 self
96 }
97 /// <p>The configuration for generating a candidate for an AutoML job (optional).</p>
98 pub fn set_candidate_generation_config(mut self, input: ::std::option::Option<crate::types::AutoMlCandidateGenerationConfig>) -> Self {
99 self.candidate_generation_config = input;
100 self
101 }
102 /// <p>The configuration for generating a candidate for an AutoML job (optional).</p>
103 pub fn get_candidate_generation_config(&self) -> &::std::option::Option<crate::types::AutoMlCandidateGenerationConfig> {
104 &self.candidate_generation_config
105 }
106 /// <p>The configuration for splitting the input training dataset.</p>
107 /// <p>Type: AutoMLDataSplitConfig</p>
108 pub fn data_split_config(mut self, input: crate::types::AutoMlDataSplitConfig) -> Self {
109 self.data_split_config = ::std::option::Option::Some(input);
110 self
111 }
112 /// <p>The configuration for splitting the input training dataset.</p>
113 /// <p>Type: AutoMLDataSplitConfig</p>
114 pub fn set_data_split_config(mut self, input: ::std::option::Option<crate::types::AutoMlDataSplitConfig>) -> Self {
115 self.data_split_config = input;
116 self
117 }
118 /// <p>The configuration for splitting the input training dataset.</p>
119 /// <p>Type: AutoMLDataSplitConfig</p>
120 pub fn get_data_split_config(&self) -> &::std::option::Option<crate::types::AutoMlDataSplitConfig> {
121 &self.data_split_config
122 }
123 /// <p>The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting <code>AUTO</code>. In <code>AUTO</code> mode, Autopilot chooses <code>ENSEMBLING</code> for datasets smaller than 100 MB, and <code>HYPERPARAMETER_TUNING</code> for larger ones.</p>
124 /// <p>The <code>ENSEMBLING</code> mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>ENSEMBLING</code> mode.</p>
125 /// <p>The <code>HYPERPARAMETER_TUNING</code> (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>HYPERPARAMETER_TUNING</code> mode.</p>
126 pub fn mode(mut self, input: crate::types::AutoMlMode) -> Self {
127 self.mode = ::std::option::Option::Some(input);
128 self
129 }
130 /// <p>The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting <code>AUTO</code>. In <code>AUTO</code> mode, Autopilot chooses <code>ENSEMBLING</code> for datasets smaller than 100 MB, and <code>HYPERPARAMETER_TUNING</code> for larger ones.</p>
131 /// <p>The <code>ENSEMBLING</code> mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>ENSEMBLING</code> mode.</p>
132 /// <p>The <code>HYPERPARAMETER_TUNING</code> (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>HYPERPARAMETER_TUNING</code> mode.</p>
133 pub fn set_mode(mut self, input: ::std::option::Option<crate::types::AutoMlMode>) -> Self {
134 self.mode = input;
135 self
136 }
137 /// <p>The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting <code>AUTO</code>. In <code>AUTO</code> mode, Autopilot chooses <code>ENSEMBLING</code> for datasets smaller than 100 MB, and <code>HYPERPARAMETER_TUNING</code> for larger ones.</p>
138 /// <p>The <code>ENSEMBLING</code> mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>ENSEMBLING</code> mode.</p>
139 /// <p>The <code>HYPERPARAMETER_TUNING</code> (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See <a href="https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-model-support-validation.html#autopilot-algorithm-support">Autopilot algorithm support</a> for a list of algorithms supported by <code>HYPERPARAMETER_TUNING</code> mode.</p>
140 pub fn get_mode(&self) -> &::std::option::Option<crate::types::AutoMlMode> {
141 &self.mode
142 }
143 /// Consumes the builder and constructs a [`AutoMlJobConfig`](crate::types::AutoMlJobConfig).
144 pub fn build(self) -> crate::types::AutoMlJobConfig {
145 crate::types::AutoMlJobConfig {
146 completion_criteria: self.completion_criteria,
147 security_config: self.security_config,
148 candidate_generation_config: self.candidate_generation_config,
149 data_split_config: self.data_split_config,
150 mode: self.mode,
151 }
152 }
153}