Expand description
Ensemble methods for sklears
This crate provides implementations of ensemble machine learning algorithms including:
- Bagging (Bootstrap Aggregating)
- Gradient Boosting
- AdaBoost (Adaptive Boosting)
- Voting Classifiers/Regressors
- Stacking and Blending
Re-exports§
pub use adaboost::AdaBoostAlgorithm;pub use adaboost::AdaBoostClassifier;pub use adaboost::AdaBoostConfig;pub use adversarial::AdversarialEnsembleClassifier;pub use adversarial::AdversarialEnsembleConfig;pub use adversarial::AdversarialPredictionResults;pub use adversarial::AdversarialStrategy;pub use adversarial::AttackMethod;pub use adversarial::DefensiveStrategy;pub use adversarial::InputPreprocessing;pub use adversarial::RobustnessMetrics;pub use analysis::CalibrationMetrics;pub use analysis::ConfidenceMetrics;pub use analysis::EnsembleAnalyzer;pub use analysis::FeatureImportanceAnalysis;pub use analysis::ImportanceAggregationMethod;pub use analysis::ReliabilityDiagram;pub use analysis::UncertaintyDecomposition;pub use analysis::UncertaintyQuantification;pub use bagging::BaggingClassifier;pub use bagging::BaggingConfig;pub use bagging::BaggingRegressor;pub use compression::AcquisitionFunction;pub use compression::BayesianEnsembleOptimizer;pub use compression::CompressedEnsemble;pub use compression::CompressionConfig;pub use compression::CompressionMetadata;pub use compression::CompressionStats;pub use compression::CompressionStrategy;pub use compression::EnsembleCompressor;pub use compression::EnsemblePruner;pub use compression::KnowledgeDistillationTrainer;pub use compression::QuantizationParams;pub use compression::SparsityInfo;pub use cpu_optimization::CacheOptimizedMatrixOps;pub use cpu_optimization::CpuOptimizationConfig;pub use cpu_optimization::CpuOptimizer;pub use cpu_optimization::LoopOptimizedAlgorithms;pub use cpu_optimization::PerformanceCounters;pub use cpu_optimization::VectorizedEnsembleOps;pub use gpu_acceleration::detect_available_backends;pub use gpu_acceleration::GpuBackend;pub use gpu_acceleration::GpuConfig;pub use gpu_acceleration::GpuContext;pub use gpu_acceleration::GpuDeviceInfo;pub use gpu_acceleration::GpuEnsembleTrainer;pub use gpu_acceleration::GpuTensorOps;pub use gpu_acceleration::ProfilingResults;pub use gradient_boosting::FeatureImportanceMetrics;pub use gradient_boosting::GradientBoostingClassifier;pub use gradient_boosting::GradientBoostingConfig;pub use gradient_boosting::GradientBoostingRegressor;pub use gradient_boosting::GradientBoostingTree;pub use gradient_boosting::LossFunction;pub use imbalanced::CombinationStrategy;pub use imbalanced::CostSensitiveConfig;pub use imbalanced::ImbalancedEnsembleClassifier;pub use imbalanced::ImbalancedEnsembleConfig;pub use imbalanced::SMOTEConfig;pub use imbalanced::SMOTESampler;pub use imbalanced::SamplingQualityMetrics;pub use imbalanced::SamplingResult;pub use imbalanced::SamplingStrategy;pub use imbalanced::ThresholdMovingStrategy;pub use memory_efficient::IncrementalLinearRegression;pub use memory_efficient::IncrementalModel;pub use memory_efficient::MemoryEfficientConfig;pub use memory_efficient::MemoryEfficientEnsemble;pub use mixed_precision::AMPContext;pub use mixed_precision::GradientScaler;pub use mixed_precision::Half;pub use mixed_precision::MixedPrecisionArray;pub use mixed_precision::MixedPrecisionConfig;pub use mixed_precision::MixedPrecisionGradientAccumulator;pub use mixed_precision::MixedPrecisionTrainer;pub use mixed_precision::ScalerState;pub use monitoring::DegradationIndicators;pub use monitoring::DriftDetectionResult;pub use monitoring::DriftType;pub use monitoring::EnsembleMonitor;pub use monitoring::ModelHealth;pub use monitoring::MonitoringConfig;pub use monitoring::MonitoringResults;pub use monitoring::PerformanceDataPoint;pub use monitoring::PerformanceMetric;pub use monitoring::PerformanceTrend;pub use monitoring::RecommendedAction;pub use multi_label::LabelCorrelationMethod;pub use multi_label::LabelTransformationStrategy;pub use multi_label::MultiLabelAggregationMethod;pub use multi_label::MultiLabelEnsembleClassifier;pub use multi_label::MultiLabelEnsembleConfig;pub use multi_label::MultiLabelPredictionResults;pub use multi_label::MultiLabelTrainingResults;pub use multi_task::CrossTaskValidation;pub use multi_task::MultiTaskEnsembleClassifier;pub use multi_task::MultiTaskEnsembleConfig;pub use multi_task::MultiTaskEnsembleRegressor;pub use multi_task::MultiTaskFeatureSelector;pub use multi_task::MultiTaskTrainingResults;pub use multi_task::TaskData;pub use multi_task::TaskHierarchy;pub use multi_task::TaskMetrics;pub use multi_task::TaskSharingStrategy;pub use multi_task::TaskSimilarityMetric;pub use multi_task::TaskWeightingStrategy;pub use parallel::AsyncEnsembleCoordinator;pub use parallel::DataPartition;pub use parallel::FederatedEnsembleCoordinator;pub use parallel::ParallelConfig;pub use parallel::ParallelEnsembleTrainer;pub use parallel::ParallelPerformanceMetrics;pub use parallel::ParallelStrategy;pub use parallel::ParallelTrainable;pub use regularized::DropoutEnsemble;pub use regularized::OptimizerState;pub use regularized::RegularizationStep;pub use regularized::RegularizedEnsembleClassifier;pub use regularized::RegularizedEnsembleConfig;pub use regularized::RegularizedEnsembleRegressor;pub use regularized::WeightOptimizer;pub use simd_ops::SimdOps;pub use simd_stacking::simd_aggregate_predictions;pub use simd_stacking::simd_batch_linear_predictions;pub use simd_stacking::simd_compute_ensemble_diversity;pub use simd_stacking::simd_compute_gradients;pub use simd_stacking::simd_dot_product;pub use simd_stacking::simd_generate_meta_features;pub use simd_stacking::simd_linear_prediction;pub use simd_stacking::simd_train_stacking_ensemble;pub use simd_stacking::StackingEnsembleModel;pub use stacking::BaseEstimator;pub use stacking::BlendingClassifier;pub use stacking::MetaEstimator;pub use stacking::MetaFeatureStrategy;pub use stacking::MetaLearningStrategy;pub use stacking::MultiLayerStackingClassifier;pub use stacking::MultiLayerStackingConfig;pub use stacking::SimpleStackingClassifier;pub use stacking::StackingClassifier;pub use stacking::StackingConfig;pub use stacking::StackingLayerConfig;pub use streaming::AdaptiveStreamingEnsemble;pub use streaming::ConceptDriftDetector;pub use streaming::StreamingConfig;pub use streaming::StreamingEnsemble;pub use tensor_ops::ActivationType;pub use tensor_ops::AggregationType;pub use tensor_ops::ComputationGraph;pub use tensor_ops::EnsembleTensorOps;pub use tensor_ops::GraphNode;pub use tensor_ops::MemoryLayout;pub use tensor_ops::ReductionType;pub use tensor_ops::Tensor;pub use tensor_ops::TensorConfig;pub use tensor_ops::TensorDevice;pub use tensor_ops::TensorOperation;pub use tensor_ops::TensorOpsContext;pub use tensor_ops::TensorShape;pub use time_series::AdwinDriftDetector;pub use time_series::DriftAdaptationStrategy;pub use time_series::DriftStatistics;pub use time_series::SeasonalComponents;pub use time_series::TemporalAggregationMethod;pub use time_series::TimeSeriesCVStrategy;pub use time_series::TimeSeriesEnsembleClassifier;pub use time_series::TimeSeriesEnsembleConfig;pub use time_series::TimeSeriesEnsembleRegressor;pub use voting::EnsembleMember;pub use voting::EnsembleSizeAnalysis;pub use voting::EnsembleSizeRecommendations;pub use voting::VotingClassifier;pub use voting::VotingClassifierConfig;pub use voting::VotingStrategy;
Modules§
- adaboost
- AdaBoost ensemble methods
- adversarial
- Adversarial Training for Ensemble Methods
- analysis
- Advanced ensemble analysis and interpretation tools
- bagging
- Bagging ensemble methods
- compression
- Model compression techniques for large ensembles
- cpu_
optimization - Specialized CPU optimizations for ensemble methods
- gpu_
acceleration - GPU acceleration for ensemble methods
- gradient_
boosting - Gradient Boosting implementation
- imbalanced
- Imbalanced Learning Ensemble Methods
- memory_
efficient - Memory-efficient ensemble methods for large-scale machine learning
- mixed_
precision - Mixed-precision training support for ensemble methods
- monitoring
- Performance monitoring and tracking system for ensemble methods
- multi_
label - Multi-Label Ensemble Methods
- multi_
task - Multi-Task Ensemble Methods
- parallel
- Data-parallel ensemble training framework
- prelude
- Prelude module for convenient imports
- regularized
- Regularized Ensemble Methods
- simd_
ops - SIMD optimizations for ensemble operations
- simd_
stacking - SIMD-accelerated stacking ensemble operations (scalar implementations)
- stacking
- Stacking ensemble methods
- streaming
- Streaming ensemble methods for online machine learning
- tensor_
ops - Tensor operations for ensemble methods
- time_
series - Time Series Ensemble Methods
- voting
- Modular voting ensemble methods with high-performance SIMD implementations