Skip to main content

tensorlogic_train/
lib.rs

1//! Training scaffolds: loss wiring, schedules, callbacks.
2//!
3//! **Version**: 0.1.0-beta.1 | **Status**: Production Ready
4//!
5//! This crate provides comprehensive training infrastructure for Tensorlogic models:
6//! - Loss functions (standard and logical constraint-based)
7//! - Optimizer wrappers around SciRS2
8//! - Training loops with callbacks
9//! - Batch management
10//! - Validation and metrics
11//! - Regularization techniques
12//! - Data augmentation
13//! - Logging and monitoring
14//! - Curriculum learning strategies
15//! - Transfer learning utilities
16//! - Hyperparameter optimization (grid search, random search)
17//! - Cross-validation utilities
18//! - Model ensembling
19//! - Model pruning and compression
20//! - Model quantization (int8, int4, int2)
21//! - Mixed precision training (FP16, BF16)
22//! - Advanced sampling strategies
23
24mod augmentation;
25mod batch;
26mod callbacks;
27mod crossval;
28mod curriculum;
29mod data;
30mod distillation;
31mod dropblock;
32mod ensemble;
33mod error;
34mod few_shot;
35mod gradient_centralization;
36mod hyperparameter;
37mod label_smoothing;
38mod logging;
39mod loss;
40mod memory;
41mod meta_learning;
42mod metrics;
43mod mixed_precision;
44mod model;
45mod multitask;
46mod optimizer;
47mod optimizers;
48mod pruning;
49mod quantization;
50mod regularization;
51mod sampling;
52mod scheduler;
53mod stochastic_depth;
54mod trainer;
55mod transfer;
56mod utils;
57
58#[cfg(feature = "structured-logging")]
59pub mod structured_logging;
60
61pub use augmentation::{
62    CompositeAugmenter, CutMixAugmenter, CutOutAugmenter, DataAugmenter, MixupAugmenter,
63    NoAugmentation, NoiseAugmenter, RandomErasingAugmenter, RotationAugmenter, ScaleAugmenter,
64};
65pub use batch::{extract_batch, BatchConfig, BatchIterator, DataShuffler};
66pub use callbacks::{
67    BatchCallback, Callback, CallbackList, CheckpointCallback, CheckpointCompression,
68    EarlyStoppingCallback, EpochCallback, GradientAccumulationCallback, GradientAccumulationStats,
69    GradientMonitor, GradientScalingStrategy, GradientSummary, HistogramCallback, HistogramStats,
70    LearningRateFinder, ModelEMACallback, ProfilingCallback, ProfilingStats,
71    ReduceLrOnPlateauCallback, SWACallback, TrainingCheckpoint, ValidationCallback,
72};
73pub use error::{TrainError, TrainResult};
74pub use logging::{
75    ConsoleLogger, CsvLogger, FileLogger, JsonlLogger, LoggingBackend, MetricsLogger,
76    TensorBoardLogger,
77};
78pub use loss::{
79    BCEWithLogitsLoss, ConstraintViolationLoss, ContrastiveLoss, CrossEntropyLoss, DiceLoss,
80    FocalLoss, HingeLoss, HuberLoss, KLDivergenceLoss, LogicalLoss, Loss, LossConfig, MseLoss,
81    PolyLoss, RuleSatisfactionLoss, TripletLoss, TverskyLoss,
82};
83pub use metrics::{
84    Accuracy, BalancedAccuracy, CohensKappa, ConfusionMatrix, DiceCoefficient,
85    ExpectedCalibrationError, F1Score, IoU, MatthewsCorrelationCoefficient,
86    MaximumCalibrationError, MeanAveragePrecision, MeanIoU, Metric, MetricTracker,
87    NormalizedDiscountedCumulativeGain, PerClassMetrics, Precision, Recall, RocCurve, TopKAccuracy,
88};
89pub use model::{AutodiffModel, DynamicModel, LinearModel, Model};
90pub use optimizer::{
91    AdaBeliefOptimizer, AdaMaxOptimizer, AdagradOptimizer, AdamOptimizer, AdamPOptimizer,
92    AdamWOptimizer, GradClipMode, LambOptimizer, LarsOptimizer, LionConfig, LionOptimizer,
93    LookaheadOptimizer, NAdamOptimizer, Optimizer, OptimizerConfig, ProdigyConfig,
94    ProdigyOptimizer, RAdamOptimizer, RMSpropOptimizer, SamOptimizer, ScheduleFreeAdamW,
95    ScheduleFreeConfig, SgdOptimizer, SophiaConfig, SophiaOptimizer, SophiaVariant,
96};
97pub use regularization::{
98    CompositeRegularization, ElasticNetRegularization, GroupLassoRegularization, L1Regularization,
99    L2Regularization, MaxNormRegularization, OrthogonalRegularization, Regularizer,
100    SpectralNormalization,
101};
102pub use scheduler::{
103    CosineAnnealingLrScheduler, CyclicLrMode, CyclicLrScheduler, ExponentialLrScheduler,
104    LrScheduler, MultiStepLrScheduler, NoamScheduler, OneCycleLrScheduler, PlateauMode,
105    PolynomialDecayLrScheduler, ReduceLROnPlateauScheduler, SgdrScheduler, StepLrScheduler,
106    WarmupCosineLrScheduler,
107};
108pub use trainer::{Trainer, TrainerConfig, TrainingHistory, TrainingState};
109
110// Curriculum learning
111pub use curriculum::{
112    CompetenceCurriculum, CurriculumManager, CurriculumStrategy, ExponentialCurriculum,
113    LinearCurriculum, SelfPacedCurriculum, TaskCurriculum,
114};
115
116// Transfer learning
117pub use transfer::{
118    DiscriminativeFineTuning, FeatureExtractorMode, LayerFreezingConfig, ProgressiveUnfreezing,
119    TransferLearningManager,
120};
121
122// Hyperparameter optimization
123pub use hyperparameter::{
124    AcquisitionFunction, BayesianOptimization, GaussianProcess, GpKernel, GridSearch,
125    HyperparamConfig, HyperparamResult, HyperparamSpace, HyperparamValue, RandomSearch,
126};
127
128// Cross-validation
129pub use crossval::{
130    CrossValidationResults, CrossValidationSplit, KFold, LeaveOneOut, StratifiedKFold,
131    TimeSeriesSplit,
132};
133
134// Ensembling
135pub use ensemble::{
136    AveragingEnsemble, BaggingHelper, Ensemble, ModelSoup, SoupRecipe, StackingEnsemble,
137    VotingEnsemble, VotingMode,
138};
139
140// Multi-task learning
141pub use multitask::{MultiTaskLoss, PCGrad, TaskWeightingStrategy};
142
143// Knowledge distillation
144pub use distillation::{AttentionTransferLoss, DistillationLoss, FeatureDistillationLoss};
145
146// Label smoothing
147pub use label_smoothing::{LabelSmoothingLoss, MixupLoss};
148
149// Memory management and profiling
150pub use memory::{
151    CheckpointStrategy, GradientCheckpointConfig, MemoryBudgetManager, MemoryEfficientTraining,
152    MemoryProfilerCallback, MemorySettings, MemoryStats,
153};
154
155// Data loading and preprocessing
156pub use data::{
157    CsvLoader, DataPreprocessor, Dataset, LabelEncoder, OneHotEncoder, PreprocessingMethod,
158};
159
160// Utilities for model introspection and analysis
161pub use utils::{
162    compare_models, compute_gradient_stats, format_duration, print_gradient_report, GradientStats,
163    LrRangeTestAnalyzer, ModelSummary, ParameterDifference, ParameterStats, TimeEstimator,
164};
165
166// Model pruning and compression
167pub use pruning::{
168    GlobalPruner, GradientPruner, LayerPruningStats, MagnitudePruner, Pruner, PruningConfig,
169    PruningMask, PruningStats, StructuredPruner, StructuredPruningAxis,
170};
171
172// Advanced sampling strategies
173pub use sampling::{
174    BatchReweighter, ClassBalancedSampler, CurriculumSampler, FocalSampler, HardNegativeMiner,
175    ImportanceSampler, MiningStrategy, OnlineHardExampleMiner, ReweightingStrategy,
176};
177
178// Model quantization and compression
179pub use quantization::{
180    BitWidth, DynamicRangeCalibrator, Granularity, QuantizationAwareTraining, QuantizationConfig,
181    QuantizationMode, QuantizationParams, QuantizedTensor, Quantizer,
182};
183
184// Mixed precision training
185pub use mixed_precision::{
186    AutocastContext, GradientScaler, LossScaler, MixedPrecisionStats, MixedPrecisionTrainer,
187    PrecisionMode,
188};
189
190// Few-shot learning
191pub use few_shot::{
192    DistanceMetric, EpisodeSampler, FewShotAccuracy, MatchingNetwork, PrototypicalDistance,
193    ShotType, SupportSet,
194};
195
196// Meta-learning
197pub use meta_learning::{
198    MAMLConfig, MetaLearner, MetaStats, MetaTask, Reptile, ReptileConfig, MAML,
199};
200
201// Gradient centralization
202pub use gradient_centralization::{GcConfig, GcStats, GcStrategy, GradientCentralization};
203
204// Stochastic Depth (DropPath)
205pub use stochastic_depth::{DropPath, ExponentialStochasticDepth, LinearStochasticDepth};
206
207// DropBlock regularization
208pub use dropblock::{DropBlock, LinearDropBlockScheduler};