gcp_bigquery_client/model/training_options.rs
1//! Options used in model training.
2use crate::model::arima_order::ArimaOrder;
3use std::collections::HashMap;
4
5#[derive(Debug, Default, Clone, Serialize, Deserialize)]
6#[serde(rename_all = "camelCase")]
7pub struct TrainingOptions {
8 /// Whether to train a model from the last checkpoint.
9 pub warm_start: Option<bool>,
10 /// L1 regularization coefficient.
11 pub l_1_regularization: Option<f64>,
12 /// Name of input label columns in training data.
13 pub input_label_columns: Option<Vec<String>>,
14 /// Feedback type that specifies which algorithm to run for matrix factorization.
15 pub feedback_type: Option<FeedbackType>,
16 /// Distance type for clustering models.
17 pub distance_type: Option<DistanceType>,
18 /// Learning rate in training. Used only for iterative training algorithms.
19 pub learn_rate: Option<f64>,
20 /// Optimization strategy for training linear regression models.
21 pub optimization_strategy: Option<OptimizationStrategy>,
22 /// The data split type for training and evaluation, e.g. RANDOM.
23 pub data_split_method: Option<DataSplitMethod>,
24 /// Item column specified for matrix factorization models.
25 pub item_column: Option<String>,
26 /// The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
27 pub data_split_eval_fraction: Option<f64>,
28 /// Hidden units for dnn models.
29 pub hidden_units: Option<Vec<i64>>,
30 /// Number of clusters for clustering models.
31 pub num_clusters: Option<i64>,
32 /// Num factors specified for matrix factorization models.
33 pub num_factors: Option<i64>,
34 /// Specifies the initial learning rate for the line search learn rate strategy.
35 pub initial_learn_rate: Option<f64>,
36 /// Type of loss function used during training run.
37 pub loss_type: Option<LossType>,
38 /// When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms.
39 pub min_relative_progress: Option<f64>,
40 /// Dropout probability for dnn models.
41 pub dropout: Option<f64>,
42 /// The number of periods ahead that need to be forecasted.
43 pub horizon: Option<i64>,
44 /// Google Cloud Storage URI from which the model was imported. Only applicable for imported models.
45 pub model_uri: Option<String>,
46 /// Minimum split loss for boosted tree models.
47 pub min_split_loss: Option<f64>,
48 /// Batch size for dnn models.
49 pub batch_size: Option<i64>,
50 /// Column to be designated as time series timestamp for ARIMA model.
51 pub time_series_timestamp_column: Option<String>,
52 /// Whether to enable auto ARIMA or not.
53 pub auto_arima: Option<bool>,
54 /// Hyperparameter for matrix factoration when implicit feedback type is specified.
55 pub wals_alpha: Option<f64>,
56 /// The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM.
57 pub kmeans_initialization_column: Option<String>,
58 /// The maximum number of iterations in training. Used only for iterative training algorithms.
59 pub max_iterations: Option<i64>,
60 /// Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.
61 pub preserve_input_structs: Option<bool>,
62 /// Weights associated with each label class, for rebalancing the training data. Only applicable for classification models.
63 pub label_class_weights: Option<HashMap<String, f64>>,
64 /// The strategy to determine learn rate for the current iteration.
65 pub learn_rate_strategy: Option<LearnRateStrategy>,
66 /// The method used to initialize the centroids for kmeans algorithm.
67 pub kmeans_initialization_method: Option<KmeansInitializationMethod>,
68 /// User column specified for matrix factorization models.
69 pub user_column: Option<String>,
70 /// Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
71 pub subsample: Option<f64>,
72 /// L2 regularization coefficient.
73 pub l_2_regularization: Option<f64>,
74 /// The max value of non-seasonal p and q.
75 pub auto_arima_max_order: Option<i64>,
76 /// Maximum depth of a tree for boosted tree models.
77 pub max_tree_depth: Option<i64>,
78 /// Column to be designated as time series data for ARIMA model.
79 pub time_series_data_column: Option<String>,
80 /// The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
81 pub data_split_column: Option<String>,
82 /// A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order.
83 pub non_seasonal_order: Option<ArimaOrder>,
84 /// Include drift when fitting an ARIMA model.
85 pub include_drift: Option<bool>,
86 /// Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms.
87 pub early_stop: Option<bool>,
88 /// The geographical region based on which the holidays are considered in time series modeling. If a valid value is specified, then holiday effects modeling is enabled.
89 pub holiday_region: Option<HolidayRegion>,
90 /// The data frequency of a time series.
91 pub data_frequency: Option<DataFrequency>,
92 /// The time series id column that was used during ARIMA model training.
93 pub time_series_id_column: Option<String>,
94}
95
96/// Feedback type that specifies which algorithm to run for matrix factorization.
97#[derive(Debug, Clone, Serialize, Deserialize)]
98#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
99pub enum FeedbackType {
100 /// Unspecified feedback type
101 FeedbackTypeUnspecified,
102 /// Use weighted-als for implicit feedback problems.
103 Implicit,
104 /// Use nonweighted-als for explicit feedback problems.
105 Explicit,
106}
107
108/// Distance type for clustering models.
109#[derive(Debug, Clone, Serialize, Deserialize)]
110#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
111pub enum DistanceType {
112 /// Unspecified distance type
113 DistanceTypeUnspecified,
114 /// Eculidean distance.
115 Euclidean,
116 /// Cosine distance.
117 Cosine,
118}
119
120/// Optimization strategy for training linear regression models.
121#[derive(Debug, Clone, Serialize, Deserialize)]
122#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
123pub enum OptimizationStrategy {
124 /// Unspecified optimization strategy
125 OptimizationStrategyUnspecified,
126 /// Uses an iterative batch gradient descent algorithm.
127 BatchGradientDescent,
128 /// Uses a normal equation to solve linear regression problem.
129 NormalEquation,
130}
131
132/// The data split type for training and evaluation, e.g. RANDOM.
133#[derive(Debug, Clone, Serialize, Deserialize)]
134#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
135pub enum DataSplitMethod {
136 /// Unspecified data split method
137 DataSplitMethodUnspecified,
138 /// Splits data randomly.
139 Random,
140 /// Splits data with the user provided tags.
141 Custom,
142 /// Splits data sequentially.
143 Sequential,
144 /// Data split will be skipped.
145 NoSplit,
146 /// Splits data automatically: Uses NO_SPLIT if the data size is small. Otherwise uses RANDOM.
147 AutoSplit,
148}
149
150/// Type of loss function used during training run.
151#[derive(Debug, Clone, Serialize, Deserialize)]
152#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
153pub enum LossType {
154 /// Unspecified loss type
155 LossTypeUnspecified,
156 /// Mean squared loss, used for linear regression.
157 MeanSquaredLoss,
158 /// Mean log loss, used for logistic regression.
159 MeanLogLoss,
160}
161
162/// The strategy to determine learn rate for the current iteration.
163#[derive(Debug, Clone, Serialize, Deserialize)]
164#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
165pub enum LearnRateStrategy {
166 /// Unspecified learn rate strategy
167 LearnRateStrategyUnspecified,
168 /// Use line search to determine learning rate.
169 LineSearch,
170 /// Use a constant learning rate.
171 Constant,
172}
173
174/// The method used to initialize the centroids for kmeans algorithm.
175#[derive(Debug, Clone, Serialize, Deserialize)]
176#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
177pub enum KmeansInitializationMethod {
178 /// Unspecified initialization method.
179 KmeansInitializationMethodUnspecified,
180 /// Initializes the centroids randomly.
181 Random,
182 /// Initializes the centroids using data specified in kmeans_initialization_column.
183 Custom,
184 /// Initializes with kmeans++.
185 KmeansPlusPlu,
186}
187
188/// The geographical region based on which the holidays are considered in time series modeling. If a valid value is specified, then holiday effects modeling is enabled.
189#[derive(Debug, Clone, Serialize, Deserialize)]
190#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
191pub enum HolidayRegion {
192 /// Holiday region unspecified.
193 HolidayRegionUnspecified,
194 /// Global.
195 Global,
196 /// North America.
197 Na,
198 /// Japan and Asia Pacific: Korea, Greater China, India, Australia, and New Zealand.
199 Japac,
200 /// Europe, the Middle East and Africa.
201 Emea,
202 /// Latin America and the Caribbean.
203 Lac,
204 /// United Arab Emirates
205 Ae,
206 /// Argentina
207 Ar,
208 /// Austria
209 At,
210 /// Australia
211 Au,
212 /// Belgium
213 Be,
214 /// Brazil
215 Br,
216 /// Canada
217 Ca,
218 /// Switzerland
219 Ch,
220 /// Chile
221 Cl,
222 /// China
223 Cn,
224 /// Colombia
225 Co,
226 /// Czechoslovakia
227 C,
228 /// Czech Republic
229 Cz,
230 /// Germany
231 De,
232 /// Denmark
233 Dk,
234 /// Algeria
235 Dz,
236 /// Ecuador
237 Ec,
238 /// Estonia
239 Ee,
240 /// Egypt
241 Eg,
242 /// Spain
243 E,
244 /// Finland
245 Fi,
246 /// France
247 Fr,
248 /// Great Britain (United Kingdom)
249 Gb,
250 /// Greece
251 Gr,
252 /// Hong Kong
253 Hk,
254 /// Hungary
255 Hu,
256 /// Indonesia
257 Id,
258 /// Ireland
259 Ie,
260 /// Israel
261 Il,
262 /// India
263 In,
264 /// Iran
265 Ir,
266 /// Italy
267 It,
268 /// Japan
269 Jp,
270 /// Korea (South)
271 Kr,
272 /// Latvia
273 Lv,
274 /// Morocco
275 Ma,
276 /// Mexico
277 Mx,
278 /// Malaysia
279 My,
280 /// Nigeria
281 Ng,
282 /// Netherlands
283 Nl,
284 /// Norway
285 No,
286 /// New Zealand
287 Nz,
288 /// Peru
289 Pe,
290 /// Philippines
291 Ph,
292 /// Pakistan
293 Pk,
294 /// Poland
295 Pl,
296 /// Portugal
297 Pt,
298 /// Romania
299 Ro,
300 /// Serbia
301 R,
302 /// Russian Federation
303 Ru,
304 /// Saudi Arabia
305 Sa,
306 /// Sweden
307 Se,
308 /// Singapore
309 Sg,
310 /// Slovenia
311 Si,
312 /// Slovakia
313 Sk,
314 /// Thailand
315 Th,
316 /// Turkey
317 Tr,
318 /// Taiwan
319 Tw,
320 /// Ukraine
321 Ua,
322 /// United States
323 U,
324 /// Venezuela
325 Ve,
326 /// Viet Nam
327 Vn,
328 /// South Africa
329 Za,
330}
331
332/// The data frequency of a time series.
333#[derive(Debug, Clone, Serialize, Deserialize)]
334#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
335pub enum DataFrequency {
336 /// Unspecified data frequency
337 DataFrequencyUnspecified,
338 /// Automatically inferred from timestamps.
339 AutoFrequency,
340 /// Yearly data.
341 Yearly,
342 /// Quarterly data.
343 Quarterly,
344 /// Monthly data.
345 Monthly,
346 /// Weekly data.
347 Weekly,
348 /// Daily data.
349 Daily,
350 /// Hourly data.
351 Hourly,
352 /// Per-minute data.
353 PerMinute,
354}