1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
//! Mixed-precision training utilities
//!
//! Provides support for training with reduced precision (fp16/bf16) while
//! maintaining numerical stability through loss scaling and master weights.
//!
//! ## Overview
//!
//! Mixed-precision training uses lower precision (fp16/bf16) for:
//! - Forward pass activations (memory savings)
//! - Gradient computation (compute speedup)
//!
//! While maintaining full precision (fp32) for:
//! - Master weights (numerical stability)
//! - Loss scaling (gradient underflow prevention)
//!
//! ## Example
//!
//! ```ignore
//! use entrenar::autograd::precision::{MixedPrecisionConfig, Precision, GradScaler};
//!
//! let config = MixedPrecisionConfig::bf16();
//! let mut scaler = GradScaler::new(config.initial_scale);
//!
//! // Forward pass in reduced precision
//! let loss = model.forward(&input);
//!
//! // Scale loss before backward
//! let scaled_loss = scaler.scale(loss);
//! backward(&mut scaled_loss, None);
//!
//! // Unscale and update
//! scaler.unscale_grads(&mut params);
//! optimizer.step(&mut params);
//! scaler.update();
//! ```
// Re-export all public types and functions
pub use MixedPrecisionConfig;
pub use ;
pub use Precision;
pub use GradScaler;