Struct google_cloud_bigquery::http::model::TrainingOptions
source · pub struct TrainingOptions {Show 63 fields
pub max_iterations: Option<i64>,
pub loss_type: Option<LossType>,
pub learn_rate: Option<f64>,
pub l1_regularization: Option<f64>,
pub l2_regularization: Option<f64>,
pub min_relative_progress: Option<f64>,
pub warm_start: Option<bool>,
pub early_stop: Option<bool>,
pub input_label_columns: Option<Vec<String>>,
pub data_split_method: Option<DataSplitMethod>,
pub data_split_eval_fraction: Option<f64>,
pub data_split_column: Option<String>,
pub learn_rate_strategy: Option<LearnRateStrategy>,
pub initial_learn_rate: Option<f64>,
pub label_class_weights: Option<HashMap<String, f64>>,
pub user_column: Option<String>,
pub item_column: Option<String>,
pub distance_type: Option<DistanceType>,
pub num_clusters: Option<i64>,
pub model_uri: Option<String>,
pub optimization_strategy: Option<OptimizationStrategy>,
pub hidden_units: Option<Vec<i64>>,
pub batch_size: Option<i64>,
pub dropout: Option<f64>,
pub max_tree_depth: Option<i64>,
pub subsample: Option<f64>,
pub min_split_loss: Option<f64>,
pub booster_type: Option<BoosterType>,
pub num_parallel_tree: Option<i64>,
pub dart_normalize_type: Option<DartNormalizeType>,
pub tree_method: Option<TreeMethod>,
pub min_tree_child_weight: Option<i64>,
pub colsample_bytree: Option<f64>,
pub colsample_bylevel: Option<f64>,
pub colsample_bynode: Option<f64>,
pub num_factors: Option<i64>,
pub feedback_type: Option<FeedbackType>,
pub wals_alpha: Option<f64>,
pub kmeans_initialization_method: Option<KmeansInitializationMethod>,
pub kmeans_initialization_column: Option<String>,
pub time_series_timestamp_column: Option<String>,
pub time_series_data_column: Option<String>,
pub auto_arima: Option<bool>,
pub non_seasonal_order: Option<ArimaOrder>,
pub data_frequency: Option<DataFrequency>,
pub calculate_p_values: Option<bool>,
pub include_drift: Option<bool>,
pub holiday_region: Option<HolidayRegion>,
pub time_series_id_column: Option<String>,
pub time_series_id_columns: Option<Vec<String>>,
pub horizon: Option<i64>,
pub preserve_input_structs: Option<bool>,
pub auto_arima_max_order: Option<i64>,
pub auto_arima_min_order: Option<i64>,
pub num_trials: Option<i64>,
pub max_parallel_trials: Option<i64>,
pub hparam_tuning_objectives: Option<Vec<HparamTuningObjective>>,
pub decompose_time_series: Option<bool>,
pub clean_spikes_and_dips: Option<bool>,
pub adjust_step_changes: Option<bool>,
pub enable_global_explain: Option<bool>,
pub sampled_shapley_num_paths: Option<i64>,
pub integrated_gradients_num_steps: Option<i64>,
}Fields§
§max_iterations: Option<i64>The maximum number of iterations in training. Used only for iterative training algorithms.
loss_type: Option<LossType>Type of loss function used during training run.
learn_rate: Option<f64>Learning rate in training. Used only for iterative training algorithms.
l1_regularization: Option<f64>L1 regularization coefficient.
l2_regularization: Option<f64>L2 regularization coefficient.
min_relative_progress: Option<f64>When earlyStop is true, stops training when accuracy improvement is less than ‘minRelativeProgress’. Used only for iterative training algorithms.
warm_start: Option<bool>Whether to train a model from the last checkpoint.
early_stop: Option<bool>Whether to stop early when the loss doesn’t improve significantly any more (compared to minRelativeProgress). Used only for iterative training algorithms.
input_label_columns: Option<Vec<String>>Name of input label columns in training data.
data_split_method: Option<DataSplitMethod>The data split type for training and evaluation, e.g. RANDOM.
data_split_eval_fraction: Option<f64>The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2.
data_split_column: Option<String>The column to split data with. This column won’t be used as a feature.
- When dataSplitMethod is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data.
- When dataSplitMethod is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties
learn_rate_strategy: Option<LearnRateStrategy>The strategy to determine learn rate for the current iteration.
initial_learn_rate: Option<f64>Specifies the initial learning rate for the line search learn rate strategy.
label_class_weights: Option<HashMap<String, f64>>Weights associated with each label class, for rebalancing the training data. Only applicable for classification models. An object containing a list of “key”: value pairs. Example: { “name”: “wrench”, “mass”: “1.3kg”, “count”: “3” }.
user_column: Option<String>User column specified for matrix factorization models.
item_column: Option<String>Item column specified for matrix factorization models.
distance_type: Option<DistanceType>Distance type for clustering models.
num_clusters: Option<i64>Number of clusters for clustering models.
model_uri: Option<String>Google Cloud Storage URI from which the model was imported. Only applicable for imported models.
optimization_strategy: Option<OptimizationStrategy>Optimization strategy for training linear regression models.
Hidden units for dnn models.
batch_size: Option<i64>Batch size for dnn models.
dropout: Option<f64>Dropout probability for dnn models.
max_tree_depth: Option<i64>Maximum depth of a tree for boosted tree models.
subsample: Option<f64>Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models.
min_split_loss: Option<f64>Minimum split loss for boosted tree models.
booster_type: Option<BoosterType>Booster type for boosted tree models.
num_parallel_tree: Option<i64>Number of parallel trees constructed during each iteration for boosted tree models.
dart_normalize_type: Option<DartNormalizeType>Type of normalization algorithm for boosted tree models using dart booster.
tree_method: Option<TreeMethod>Tree construction algorithm for boosted tree models.
min_tree_child_weight: Option<i64>Minimum sum of instance weight needed in a child for boosted tree models.
colsample_bytree: Option<f64>Subsample ratio of columns when constructing each tree for boosted tree models.
colsample_bylevel: Option<f64>Subsample ratio of columns for each level for boosted tree models.
colsample_bynode: Option<f64>Subsample ratio of columns for each node(split) for boosted tree models.
num_factors: Option<i64>Num factors specified for matrix factorization models.
feedback_type: Option<FeedbackType>Feedback type that specifies which algorithm to run for matrix factorization.
wals_alpha: Option<f64>Hyperparameter for matrix factoration when implicit feedback type is specified.
kmeans_initialization_method: Option<KmeansInitializationMethod>The method used to initialize the centroids for kmeans algorithm.
kmeans_initialization_column: Option<String>The column used to provide the initial centroids for kmeans algorithm when kmeansInitializationMethod is CUSTOM.
time_series_timestamp_column: Option<String>Column to be designated as time series timestamp for ARIMA model.
time_series_data_column: Option<String>Column to be designated as time series data for ARIMA model.
auto_arima: Option<bool>Whether to enable auto ARIMA or not.
non_seasonal_order: Option<ArimaOrder>A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order.
data_frequency: Option<DataFrequency>The data frequency of a time series.
calculate_p_values: Option<bool>Whether or not p-value test should be computed for this model. Only available for linear and logistic regression models.
include_drift: Option<bool>Include drift when fitting an ARIMA model.
holiday_region: Option<HolidayRegion>The geographical region based on which the holidays are considered in time series modeling. If a valid value is specified, then holiday effects modeling is enabled.
time_series_id_column: Option<String>The time series id column that was used during ARIMA model training.
time_series_id_columns: Option<Vec<String>>The time series id columns that were used during ARIMA model training.
horizon: Option<i64>The number of periods ahead that need to be forecasted.
preserve_input_structs: Option<bool>Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.
auto_arima_max_order: Option<i64>The max value of the sum of non-seasonal p and q.
auto_arima_min_order: Option<i64>The min value of the sum of non-seasonal p and q.
num_trials: Option<i64>Number of trials to run this hyperparameter tuning job.
max_parallel_trials: Option<i64>Maximum number of trials to run in parallel.
hparam_tuning_objectives: Option<Vec<HparamTuningObjective>>The target evaluation metrics to optimize the hyperparameters for.
decompose_time_series: Option<bool>If true, perform decompose time series and save the results.
clean_spikes_and_dips: Option<bool>If true, clean spikes and dips in the input time series.
adjust_step_changes: Option<bool>If true, detect step changes and make data adjustment in the input time series.
enable_global_explain: Option<bool>If true, enable global explanation during training.
sampled_shapley_num_paths: Option<i64>Number of paths for the sampled Shapley explain method.
integrated_gradients_num_steps: Option<i64>Number of integral steps for the integrated gradients explain method.
Trait Implementations§
source§impl Clone for TrainingOptions
impl Clone for TrainingOptions
source§fn clone(&self) -> TrainingOptions
fn clone(&self) -> TrainingOptions
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moresource§impl Debug for TrainingOptions
impl Debug for TrainingOptions
source§impl Default for TrainingOptions
impl Default for TrainingOptions
source§fn default() -> TrainingOptions
fn default() -> TrainingOptions
source§impl<'de> Deserialize<'de> for TrainingOptions
impl<'de> Deserialize<'de> for TrainingOptions
source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
source§impl PartialEq for TrainingOptions
impl PartialEq for TrainingOptions
source§fn eq(&self, other: &TrainingOptions) -> bool
fn eq(&self, other: &TrainingOptions) -> bool
self and other values to be equal, and is used
by ==.source§impl Serialize for TrainingOptions
impl Serialize for TrainingOptions
impl StructuralPartialEq for TrainingOptions
Auto Trait Implementations§
impl Freeze for TrainingOptions
impl RefUnwindSafe for TrainingOptions
impl Send for TrainingOptions
impl Sync for TrainingOptions
impl Unpin for TrainingOptions
impl UnwindSafe for TrainingOptions
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
source§impl<T> Instrument for T
impl<T> Instrument for T
source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
source§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T in a tonic::Request