gbrt_rs/boosting/
gradient_booster.rs

1//! Core Gradient Boosting Implementation
2//! 
3//! This module provides the main [`GradientBooster`] algorithm implementation with support for:
4//! 
5//! - **Multiple loss functions**: MSE, MAE, Huber, and LogLoss
6//! - **Stochastic gradient boosting**: Subsampling for improved generalization
7//! - **Early stopping**: With configurable patience and tolerance
8//! - **Validation monitoring**: Optional validation dataset for loss tracking
9//! - **Feature importance**: Computed from tree gain contributions
10//! - **Robust training**: Handles edge cases like NaN loss values
11//! 
12//! # Training Process
13//! 
14//! The booster trains an ensemble of decision trees iteratively:
15//! 1. Initialize predictions with the optimal constant value
16//! 2. For each iteration:
17//!    - Compute gradients and hessians of the loss function
18//!    - Sample data (if subsampling < 1.0)
19//!    - Fit a new tree to the negative gradients
20//!    - Update predictions with shrinkage (learning rate)
21//!    - Monitor validation loss for early stopping
22use crate::core::{GBRTConfig, LossFunction};
23use crate::tree::Tree;
24use crate::data::{Dataset, FeatureMatrix};
25use crate::core::{GradientLoss, create_loss};
26use crate::tree::DecisionTree;
27use serde::{Deserialize, Serialize};
28use thiserror::Error;
29use rand::seq::SliceRandom;
30
31/// Errors that can occur during gradient boosting operations.
32///
33/// This error type covers all failure modes: configuration issues,
34/// training errors, prediction failures, and data validation problems.
35#[derive(Error, Debug)]
36pub enum BoostingError {
37    #[error("Invalid input data: {0}")]
38    InvalidInput(String),
39
40    #[error("Training error: {0}")]
41    TrainingError(String),
42
43    #[error("Prediction error: {0}")]
44    PredictionError(String),
45
46    #[error("Configuration error: {0}")]
47    ConfigError(String),
48
49    #[error("Tree building error: {0}")]
50    TreeError(String),
51
52    #[error("Loss function error: {0}")]
53    LossError(String),
54
55    #[error("Serialization error: {0}")]
56    SerializationError(String),
57}
58
59/// Result type for all gradient boosting operations.
60pub type BoostingResult<T> = std::result::Result<T, BoostingError>;
61
62
63
64/// State of the boosting process at a specific training iteration.
65/// 
66/// Captures performance metrics and model statistics for monitoring
67/// training progress and implementing early stopping.
68#[derive(Debug, Clone, Serialize, Deserialize)]
69pub struct IterationState {
70    /// Current iteration number (0-indexed).
71    pub iteration: usize,
72    /// Training loss at this iteration.
73    pub train_loss: f64,
74    /// Validation loss if validation data is provided.
75    pub validation_loss: Option<f64>,
76    /// Number of trees in the ensemble after this iteration.
77    pub n_trees: usize,
78    /// Number of leaves in the tree added this iteration.
79    pub n_leaves: usize,
80}
81
82
83/// Complete training history and state.
84/// 
85/// Stores all iteration states and tracks the best performing iteration
86/// for early stopping and model selection.
87#[derive(Debug, Clone, Serialize, Deserialize)]
88pub struct TrainingState {
89    /// Per-iteration training state history.
90    pub iterations: Vec<IterationState>,
91    /// Iteration with the best validation loss (if early stopping is used).
92    pub best_iteration: Option<usize>,
93    /// Best validation loss achieved during training.
94    pub best_validation_loss: Option<f64>,
95}
96
97/// Main gradient boosting implementation.
98/// 
99/// [`GradientBooster`] is the core ensemble model that trains a sequence
100/// of decision trees to minimize a given loss function. It supports
101/// various training strategies and provides comprehensive monitoring capabilities.
102/// 
103/// # Key Features
104/// 
105/// - **Loss Functions**: MSE, MAE, Huber (robust regression), LogLoss (classification)
106/// - **Regularization**: Learning rate shrinkage, L2 regularization (`lambda`)
107/// - **Stochastic Boosting**: Row subsampling for variance reduction
108/// - **Early Stopping**: Configurable patience and improvement tolerance
109/// - **Feature Importance**: Gain-based importance scores
110/// 
111/// # Serialization
112/// 
113/// This struct derives `Serialize` and `Deserialize`, allowing trained models
114/// to be saved and loaded using serde-compatible formats.
115#[derive(Debug, Clone, Serialize, Deserialize)]
116pub struct GradientBooster {
117    /// Model configuration and hyperparameters.
118    config: GBRTConfig,
119    /// Sequence of trained decision trees.
120    trees: Vec<Tree>,
121    /// Initial prediction (optimal constant model before boosting iterations).
122    initial_prediction: f64,
123    /// Training history and validation monitoring state.
124    training_state: Option<TrainingState>,
125    /// Feature importance scores (normalized gain).
126    feature_importance: Vec<f64>,
127    /// Whether the model has been trained.
128    is_trained: bool,
129}
130
131impl GradientBooster {
132    /// Creates a new gradient booster with the given configuration.
133    /// 
134    /// Validates the configuration before returning. The booster is untrained
135    /// and ready for [`fit()`] to be called.
136    /// 
137    /// # Parameters
138    /// - `config`: [`GBRTConfig`] with hyperparameters and training options.
139    /// 
140    /// # Errors
141    /// Returns `BoostingError::ConfigError` if configuration validation fails.
142    pub fn new(config: GBRTConfig) -> BoostingResult<Self> {
143        config.validate()
144            .map_err(|e| BoostingError::ConfigError(e))?;
145
146        Ok(Self {
147            config,
148            trees: Vec::new(),
149            initial_prediction: 0.0,
150            training_state: None,
151            feature_importance: Vec::new(),
152            is_trained: false,
153        })
154    }
155
156    /// Trains the model on a dataset with optional validation data.
157    /// 
158    /// This is the main training entry point. It performs the complete
159    /// gradient boosting algorithm including:
160    /// - Data validation and initialization
161    /// - Iterative tree building with gradient optimization
162    /// - Optional stochastic subsampling
163    /// - Validation loss monitoring and early stopping
164    /// - Feature importance computation
165    /// 
166    /// # Parameters
167    /// - `train_data`: Training dataset with features and targets.
168    /// - `validation_data`: Optional validation dataset for early stopping and monitoring.
169    /// 
170    /// # Errors
171    /// Returns errors for:
172    /// - Invalid training data (empty, mismatched dimensions)
173    /// - Configuration issues detected during training
174    /// - Tree building failures
175    /// - Loss computation problems (e.g., NaN values)
176    /// 
177    /// # Early Stopping
178    /// If `config.early_stopping_rounds` is set and `validation_data` is provided,
179    /// training stops when validation loss doesn't improve significantly
180    /// (by more than `config.early_stopping_tolerance`) for the specified number of rounds.
181    pub fn fit(
182        &mut self,
183        train_data: &Dataset,
184        validation_data: Option<&Dataset>,
185    ) -> BoostingResult<()> {
186        self.validate_training_data(train_data, validation_data)?;
187
188        // Reset state
189        self.trees.clear();
190        self.initial_prediction = 0.0;
191        self.is_trained = false;
192        self.training_state = Some(TrainingState {
193            iterations: Vec::new(),
194            best_iteration: None,
195            best_validation_loss: None,
196        });
197
198        // Initialize predictions
199        self.initial_prediction = self.compute_initial_prediction(train_data.targets().as_slice().unwrap())?;
200        let mut predictions = vec![self.initial_prediction; train_data.n_samples()];
201
202        // Create loss function
203        let loss_fn = create_loss(&self.config.loss);
204
205        // Prepare validation data
206        let (val_features, val_targets, mut val_predictions) = 
207            self.prepare_validation_data(validation_data, train_data.n_samples())?;
208
209        // Early stopping state
210        let mut best_val_loss = f64::INFINITY;
211        let mut no_improvement_count = 0;
212        let mut best_iteration = 0;
213
214        // Main boosting loop
215        for iteration in 0..self.config.n_estimators {
216            // Compute gradients and hessians
217            let (gradients, hessians) = loss_fn.gradient_hessian(
218                train_data.targets().as_slice().unwrap(), 
219                &predictions
220            );
221
222            // Convert ndarray results to slices
223            let gradients_slice = gradients.as_slice().unwrap();
224            let hessians_slice = hessians.as_slice().unwrap();
225
226            // Sample data for stochastic gradient boosting
227            let (sampled_features, sampled_gradients, sampled_hessians, sample_indices) = 
228                self.sample_data(train_data.features(), gradients_slice, hessians_slice)?;
229
230            // Fit a new tree to the negative gradients
231            let tree = self.fit_tree(&sampled_features, &sampled_gradients, &sampled_hessians)?;
232
233            // Update predictions with learning rate
234            self.update_predictions(train_data.features(), &tree, &mut predictions, sample_indices.as_ref())?;
235
236            // Update training state and check early stopping
237            let should_stop = self.update_training_state(
238                iteration,
239                train_data,
240                validation_data,
241                &predictions,
242                &mut val_predictions,
243                &tree,
244                &mut best_val_loss,
245                &mut no_improvement_count,
246                &mut best_iteration,
247                &loss_fn,
248            )?;
249
250            self.trees.push(tree);
251
252            if should_stop {
253                println!("Early stopping at iteration {}. Best iteration: {}", iteration, best_iteration);
254                break;
255            }
256        }
257
258        // Finalize training
259        self.compute_feature_importance(train_data.n_features());
260        self.is_trained = true;
261
262        Ok(())
263    }
264
265    /// Makes predictions for a batch of samples.
266    /// 
267    /// Calculates predictions by summing contributions from all trees
268    /// with shrinkage applied. Applies loss-specific transformation
269    /// (e.g., sigmoid for LogLoss).
270    /// 
271    /// # Parameters
272    /// - `features`: Feature matrix with shape `(n_samples, n_features)`.
273    /// 
274    /// # Returns
275    /// Vector of predictions of length `n_samples`.
276    /// 
277    /// # Errors
278    /// - `PredictionError::ModelNotTrained` if called before [`fit()`]
279    /// - `PredictionError::FeatureMismatch` if `features.n_features()` doesn't match training data
280    pub fn predict(&self, features: &FeatureMatrix) -> BoostingResult<Vec<f64>> {
281        if !self.is_trained {
282            return Err(BoostingError::PredictionError("Model not trained".to_string()));
283        }
284
285        if features.n_features() != self.feature_importance.len() {
286            return Err(BoostingError::PredictionError(
287                format!("Expected {} features, got {}", self.feature_importance.len(), features.n_features())
288            ));
289        }
290
291        let mut predictions = vec![self.initial_prediction; features.n_samples()];
292
293        for tree in &self.trees {
294            for (i, pred) in predictions.iter_mut().enumerate() {
295                let sample = features.get_sample(i)
296                    .map_err(|e| BoostingError::PredictionError(e.to_string()))?;
297                *pred += self.config.learning_rate * tree.predict(&sample.to_vec());
298            }
299        }
300
301        // Apply loss-specific transformation
302        let transformed_predictions = self.apply_prediction_transform(&predictions);
303
304        Ok(transformed_predictions)
305    }
306
307    /// Makes a prediction for a single sample.
308    /// 
309    /// More efficient than `predict()` for single-sample inference.
310    /// 
311    /// # Parameters
312    /// - `features`: Slice of feature values of length `n_features`.
313    /// 
314    /// # Returns
315    /// Single prediction value.
316    /// 
317    /// # Errors
318    /// - `PredictionError::ModelNotTrained` if called before [`fit()`]
319    /// - `PredictionError::FeatureMismatch` if `features.len()` doesn't match training data
320    pub fn predict_single(&self, features: &[f64]) -> BoostingResult<f64> {
321        if !self.is_trained {
322            return Err(BoostingError::PredictionError("Model not trained".to_string()));
323        }
324
325        if features.len() != self.feature_importance.len() {
326            return Err(BoostingError::PredictionError(
327                format!("Expected {} features, got {}", self.feature_importance.len(), features.len())
328            ));
329        }
330
331        let mut prediction = self.initial_prediction;
332
333        for tree in &self.trees {
334            prediction += self.config.learning_rate * tree.predict(features);
335        }
336
337        let transformed_prediction = self.apply_single_prediction_transform(prediction);
338
339        Ok(transformed_prediction)
340    }
341
342    /// Returns feature importance scores from the trained model.
343    /// 
344    /// Importance is computed as the normalized total gain contributed
345    /// by splits on each feature across all trees. Scores sum to 1.0.
346    /// 
347    /// # Returns
348    /// Slice of importance scores of length `n_features`.
349    /// Returns zeros if `config.compute_feature_importance` is `false`.
350    /// 
351    /// # Panics
352    /// Will panic if called before any model has been trained (no features are known).
353    pub fn feature_importance(&self) -> &[f64] {
354        &self.feature_importance
355    }
356
357    /// Returns the training history and validation state.
358    /// 
359    /// Contains per-iteration metrics and the best iteration if
360    /// early stopping was used. This is useful for plotting learning curves
361    /// or analyzing training dynamics.
362    /// 
363    /// # Returns
364    /// `Some(TrainingState)` after training, `None` before training.
365    pub fn training_state(&self) -> Option<&TrainingState> {
366        self.training_state.as_ref()
367    }
368
369    /// Returns the number of trees in the ensemble.
370    /// 
371    /// Note: This may be fewer than `config.n_estimators` if early stopping triggered.
372    /// 
373    /// # Returns
374    /// Number of trees after training.
375    pub fn n_trees(&self) -> usize {
376        self.trees.len()
377    }
378
379    /// Returns the configuration used to create this booster.
380    pub fn config(&self) -> &GBRTConfig {
381        &self.config
382    }
383
384    /// Checks whether the model has been trained.
385    pub fn is_trained(&self) -> bool {
386        self.is_trained
387    }
388
389    // Private helper methods
390
391    /// Validates training and validation datasets before fitting.
392    fn validate_training_data(
393        &self,
394        train_data: &Dataset,
395        validation_data: Option<&Dataset>,
396    ) -> BoostingResult<()> {
397        if train_data.n_samples() == 0 {
398            return Err(BoostingError::InvalidInput("Training dataset is empty".to_string()));
399        }
400
401        if let Some(val_data) = validation_data {
402            if val_data.n_features() != train_data.n_features() {
403                return Err(BoostingError::InvalidInput(
404                    "Validation data has different number of features".to_string()
405                ));
406            }
407        }
408
409        Ok(())
410    }
411
412    /// Computes the optimal initial prediction before boosting iterations.
413    /// 
414    /// For MSE/Huber: mean of targets. For MAE: median. For LogLoss: log-odds.
415    fn compute_initial_prediction(&self, targets: &[f64]) -> BoostingResult<f64> {
416        match self.config.loss {
417            LossFunction::MSE | LossFunction::Huber(_) => {
418                Ok(targets.iter().sum::<f64>() / targets.len() as f64)
419            }
420            LossFunction::MAE => {
421                let mut sorted = targets.to_vec();
422                sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
423                Ok(sorted[sorted.len() / 2]) // median
424            }
425            LossFunction::LogLoss => {
426                // For classification, use log-odds of positive class proportion
427                let mean = targets.iter().sum::<f64>() / targets.len() as f64;
428                let mean_clamped = mean.max(1e-15).min(1.0 - 1e-15);
429                Ok((mean_clamped / (1.0 - mean_clamped)).ln())
430            }
431        }
432    }
433
434    /// Prepares validation data structures for loss monitoring.
435    fn prepare_validation_data(
436        &self,
437        validation_data: Option<&Dataset>,
438        n_train_samples: usize,
439    ) -> BoostingResult<(Option<FeatureMatrix>, Option<Vec<f64>>, Vec<f64>)> {
440        match validation_data {
441            Some(val_data) => {
442                let val_predictions = vec![self.initial_prediction; val_data.n_samples()];
443                Ok((
444                    Some(val_data.features().clone()),
445                    Some(val_data.targets().as_slice().unwrap().to_vec()),
446                    val_predictions,
447                ))
448            }
449            None => {
450                Ok((None, None, Vec::new()))
451            }
452        }
453    }
454
455    /// Samples data for stochastic gradient boosting.
456    /// 
457    /// Returns sampled features, gradients, hessians, and original indices.
458    fn sample_data(
459        &self,
460        features: &FeatureMatrix,
461        gradients: &[f64],
462        hessians: &[f64],
463    ) -> BoostingResult<(FeatureMatrix, Vec<f64>, Vec<f64>, Option<Vec<usize>>)> {
464        if self.config.subsample >= 1.0 {
465            // No sampling
466            return Ok((
467                features.clone(),
468                gradients.to_vec(),
469                hessians.to_vec(),
470                None,
471            ));
472        }
473
474        // Stochastic gradient boosting: sample a subset of data
475        let n_samples = (features.n_samples() as f64 * self.config.subsample) as usize;
476        let n_samples = n_samples.max(1).min(features.n_samples());
477
478        let mut rng = rand::thread_rng();
479        let mut indices: Vec<usize> = (0..features.n_samples()).collect();
480        indices.shuffle(&mut rng);
481        indices.truncate(n_samples);
482
483        let sampled_features = features.select_samples(&indices)
484            .map_err(|e| BoostingError::TrainingError(e.to_string()))?;
485        let sampled_gradients: Vec<f64> = indices.iter().map(|&i| gradients[i]).collect();
486        let sampled_hessians: Vec<f64> = indices.iter().map(|&i| hessians[i]).collect();
487
488        Ok((sampled_features, sampled_gradients, sampled_hessians, Some(indices)))
489    }
490
491    /// Trains a single decision tree on the gradient data.
492    fn fit_tree(
493        &self,
494        features: &FeatureMatrix,
495        gradients: &[f64],
496        hessians: &[f64],
497    ) -> BoostingResult<Tree> {
498        let tree_config = &self.config.tree_config;
499
500        let tree_builder = DecisionTree::new(
501            tree_config.max_depth,
502            tree_config.min_samples_split,
503            tree_config.min_samples_leaf,
504            tree_config.min_impurity_decrease,
505            tree_config.max_features,
506            tree_config.lambda,
507        );
508
509        tree_builder.fit(features, gradients, hessians)
510            .map_err(|e| BoostingError::TreeError(format!("Tree fitting failed: {}", e)))
511    }
512
513    /// Updates predictions after fitting a new tree.
514    fn update_predictions(
515        &self,
516        features: &FeatureMatrix,
517        tree: &Tree,
518        predictions: &mut Vec<f64>,
519        sample_indices: Option<&Vec<usize>>,
520    ) -> BoostingResult<()> {
521        match sample_indices {
522            Some(indices) => {
523                // Update only sampled indices for stochastic gradient boosting
524                for &idx in indices {
525                    let sample = features.get_sample(idx)
526                        .map_err(|e| BoostingError::TrainingError(e.to_string()))?;
527                    predictions[idx] += self.config.learning_rate * tree.predict(&sample.to_vec());
528                }
529            }
530            None => {
531                // Update all predictions
532                for (i, pred) in predictions.iter_mut().enumerate() {
533                    let sample = features.get_sample(i)
534                        .map_err(|e| BoostingError::TrainingError(e.to_string()))?;
535                    *pred += self.config.learning_rate * tree.predict(&sample.to_vec());
536                }
537            }
538        }
539
540        Ok(())
541    }
542
543   
544    /// Updates training state and implements early stopping logic.
545    /// 
546    /// Returns `true` if training should stop, `false` otherwise.
547    fn update_training_state(
548        &mut self,
549        iteration: usize,
550        train_data: &Dataset,
551        validation_data: Option<&Dataset>,
552        predictions: &[f64],
553        val_predictions: &mut Vec<f64>,
554        tree: &Tree,
555        best_val_loss: &mut f64,
556        no_improvement_count: &mut usize,
557        best_iteration: &mut usize,
558        loss_fn: &Box<dyn GradientLoss>,
559    ) -> BoostingResult<bool> {
560        let mut iteration_state = IterationState {
561            iteration,
562            train_loss: 0.0,
563            validation_loss: None,
564            n_trees: self.trees.len() + 1,
565            n_leaves: tree.n_leaves(),
566        };
567
568        // Compute training loss
569        iteration_state.train_loss = loss_fn.loss(
570            train_data.targets().as_slice().unwrap(), 
571            predictions
572        );
573
574        let should_stop = if let (Some(val_features), Some(val_targets)) = (
575            validation_data.map(|d| d.features()), 
576            validation_data.map(|d| d.targets())
577        ) {
578            // Update validation predictions
579            for (i, pred) in val_predictions.iter_mut().enumerate() {
580                let sample = val_features.get_sample(i)
581                    .map_err(|e| BoostingError::TrainingError(e.to_string()))?;
582                *pred += self.config.learning_rate * tree.predict(&sample.to_vec());
583            }
584
585            let current_val_loss = loss_fn.loss(
586                val_targets.as_slice().unwrap(), 
587                val_predictions
588            );
589
590            // PROTECT AGAINST NAN/INF
591            if !current_val_loss.is_finite() {
592                eprintln!("Warning: Validation loss became NaN/Inf at iteration {}", iteration);
593                return Ok(true); // Stop immediately
594            }
595
596            iteration_state.validation_loss = Some(current_val_loss);
597
598            // EARLY STOPPING WITH TOLERANCE
599            let improvement_threshold = *best_val_loss * (1.0 + self.config.early_stopping_tolerance);
600            
601            if current_val_loss < improvement_threshold {
602                // Significant improvement
603                *best_val_loss = current_val_loss;
604                *no_improvement_count = 0;
605                *best_iteration = iteration;
606            } else {
607                // No significant improvement
608                *no_improvement_count += 1;
609            }
610
611            // Check patience
612            if let Some(patience) = self.config.early_stopping_rounds {
613                if *no_improvement_count >= patience {
614                    println!("Early stopping triggered at iteration {}. Best validation loss: {:.6} at iteration {}", 
615                             iteration, *best_val_loss, *best_iteration);
616                    if let Some(state) = &mut self.training_state {
617                        state.best_iteration = Some(*best_iteration);
618                        state.best_validation_loss = Some(*best_val_loss);
619                    }
620                    return Ok(true); // Signal to stop
621                }
622            }
623
624            false
625        } else {
626            false
627        };
628
629        // Update training state
630        if let Some(state) = &mut self.training_state {
631            state.iterations.push(iteration_state);
632        }
633
634        Ok(should_stop)
635    }
636
637    /// Computes normalized feature importance from all trees.
638    fn compute_feature_importance(&mut self, n_features: usize) {
639        if !self.config.compute_feature_importance {
640            self.feature_importance = vec![0.0; n_features];
641            return;
642        }
643
644        let mut importance = vec![0.0; n_features];
645        let total_gain: f64 = self.trees.iter()
646            .map(|tree| {
647                tree.feature_importance()
648                    .iter()
649                    .map(|&(feature, gain)| {
650                        importance[feature] += gain;
651                        gain
652                    })
653                    .sum::<f64>()
654            })
655            .sum();
656
657        // Normalize
658        if total_gain > 0.0 {
659            for imp in &mut importance {
660                *imp /= total_gain;
661            }
662        }
663
664        self.feature_importance = importance;
665    }
666
667    /// Applies loss-specific transformations to predictions.
668    fn apply_prediction_transform(&self, predictions: &[f64]) -> Vec<f64> {
669        if matches!(self.config.loss, LossFunction::LogLoss) {
670            let loss_fn = create_loss(&self.config.loss);
671            loss_fn.transform(predictions)
672        } else {
673            predictions.to_vec()
674        }
675    }
676
677    /// Applies loss-specific transformation to a single prediction.
678    fn apply_single_prediction_transform(&self, prediction: f64) -> f64 {
679        if matches!(self.config.loss, LossFunction::LogLoss) {
680            let loss_fn = create_loss(&self.config.loss);
681            loss_fn.transform(&[prediction])[0]
682        } else {
683            prediction
684        }
685    }
686}
687
688impl std::fmt::Display for GradientBooster {
689    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
690        writeln!(f, "GradientBooster")?;
691        writeln!(f, "  Trained: {}", self.is_trained)?;
692        writeln!(f, "  Trees: {}", self.trees.len())?;
693        writeln!(f, "  Loss: {}", self.config.loss)?;
694        writeln!(f, "  Learning Rate: {:.4}", self.config.learning_rate)?;
695        writeln!(f, "  Subsampling: {:.2}", self.config.subsample)?;
696
697        if let Some(state) = &self.training_state {
698            if let Some(iter_state) = state.iterations.last() {
699                writeln!(f, "  Final Training Loss: {:.6}", iter_state.train_loss)?;
700                if let Some(val_loss) = iter_state.validation_loss {
701                    writeln!(f, "  Final Validation Loss: {:.6}", val_loss)?;
702                }
703            }
704        }
705
706        Ok(())
707    }
708}
709