Skip to main content

tensorlogic_train/optimizers/
radam.rs

1//! RAdam optimizer (Rectified Adam) with variance warmup (ICLR 2020).
2//!
3//! RAdam addresses the bad convergence problem of Adam in the early stages
4//! by rectifying the variance of the adaptive learning rate. It provides
5//! a variance warmup mechanism that stabilizes training.
6//!
7//! Reference: Liu et al. "On the Variance of the Adaptive Learning Rate and Beyond" (ICLR 2020)
8
9use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
10use crate::{TrainError, TrainResult};
11use scirs2_core::ndarray::{Array, Ix2};
12use std::collections::HashMap;
13
14/// RAdam optimizer (Rectified Adam) with variance warmup (ICLR 2020).
15///
16/// RAdam addresses the bad convergence problem of Adam in the early stages
17/// by rectifying the variance of the adaptive learning rate. It provides
18/// a variance warmup mechanism that stabilizes training.
19///
20/// Reference: Liu et al. "On the Variance of the Adaptive Learning Rate and Beyond" (ICLR 2020)
21#[derive(Debug)]
22pub struct RAdamOptimizer {
23    config: OptimizerConfig,
24    /// First moment estimates.
25    m: HashMap<String, Array<f64, Ix2>>,
26    /// Second moment estimates.
27    v: HashMap<String, Array<f64, Ix2>>,
28    /// Timestep counter.
29    t: usize,
30}
31
32impl RAdamOptimizer {
33    /// Create a new RAdam optimizer.
34    pub fn new(config: OptimizerConfig) -> Self {
35        Self {
36            config,
37            m: HashMap::new(),
38            v: HashMap::new(),
39            t: 0,
40        }
41    }
42
43    /// Apply gradient clipping if configured.
44    fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
45        if let Some(clip_value) = self.config.grad_clip {
46            match self.config.grad_clip_mode {
47                GradClipMode::Value => {
48                    for grad in gradients.values_mut() {
49                        grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
50                    }
51                }
52                GradClipMode::Norm => {
53                    let total_norm = compute_gradient_norm(gradients);
54                    if total_norm > clip_value {
55                        let scale = clip_value / total_norm;
56                        for grad in gradients.values_mut() {
57                            grad.mapv_inplace(|g| g * scale);
58                        }
59                    }
60                }
61            }
62        }
63    }
64
65    /// Compute the variance rectification term.
66    fn compute_rectification(&self) -> (bool, f64) {
67        let beta2 = self.config.beta2;
68        let t = self.t as f64;
69        let rho_inf = 2.0 / (1.0 - beta2) - 1.0;
70        let rho_t = rho_inf - 2.0 * t * beta2.powf(t) / (1.0 - beta2.powf(t));
71        if rho_t > 5.0 {
72            let rect = ((rho_t - 4.0) * (rho_t - 2.0) * rho_inf)
73                / ((rho_inf - 4.0) * (rho_inf - 2.0) * rho_t);
74            (true, rect.sqrt())
75        } else {
76            (false, 0.0)
77        }
78    }
79}
80
81impl Optimizer for RAdamOptimizer {
82    fn step(
83        &mut self,
84        parameters: &mut HashMap<String, Array<f64, Ix2>>,
85        gradients: &HashMap<String, Array<f64, Ix2>>,
86    ) -> TrainResult<()> {
87        let mut clipped_gradients = gradients.clone();
88        self.clip_gradients(&mut clipped_gradients);
89        self.t += 1;
90        let lr = self.config.learning_rate;
91        let beta1 = self.config.beta1;
92        let beta2 = self.config.beta2;
93        let eps = self.config.epsilon;
94        let bias_correction1 = 1.0 - beta1.powi(self.t as i32);
95        let (use_adaptive, rect) = self.compute_rectification();
96        for (name, param) in parameters.iter_mut() {
97            let grad = clipped_gradients.get(name).ok_or_else(|| {
98                TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
99            })?;
100            if !self.m.contains_key(name) {
101                self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
102                self.v.insert(name.clone(), Array::zeros(param.raw_dim()));
103            }
104            let m = self.m.get_mut(name).unwrap();
105            let v = self.v.get_mut(name).unwrap();
106            *m = &*m * beta1 + &(grad * (1.0 - beta1));
107            let grad_squared = grad.mapv(|g| g * g);
108            *v = &*v * beta2 + &(grad_squared * (1.0 - beta2));
109            let m_hat = &*m / bias_correction1;
110            if use_adaptive {
111                let bias_correction2 = 1.0 - beta2.powi(self.t as i32);
112                let v_hat = &*v / bias_correction2;
113                let update = m_hat / (v_hat.mapv(|val| val.sqrt()) + eps);
114                *param = &*param - &(update * (lr * rect));
115            } else {
116                *param = &*param - &(m_hat * lr);
117            }
118        }
119        Ok(())
120    }
121
122    fn zero_grad(&mut self) {}
123
124    fn get_lr(&self) -> f64 {
125        self.config.learning_rate
126    }
127
128    fn set_lr(&mut self, lr: f64) {
129        self.config.learning_rate = lr;
130    }
131
132    fn state_dict(&self) -> HashMap<String, Vec<f64>> {
133        let mut state = HashMap::new();
134        state.insert("t".to_string(), vec![self.t as f64]);
135        for (name, m_val) in &self.m {
136            state.insert(format!("m_{}", name), m_val.iter().copied().collect());
137        }
138        for (name, v_val) in &self.v {
139            state.insert(format!("v_{}", name), v_val.iter().copied().collect());
140        }
141        state
142    }
143
144    fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
145        if let Some(t_val) = state.get("t") {
146            self.t = t_val[0] as usize;
147        }
148        for (key, values) in state {
149            if let Some(name) = key.strip_prefix("m_") {
150                if let Some(m_array) = self.m.get(name) {
151                    let shape = m_array.raw_dim();
152                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
153                        self.m.insert(name.to_string(), arr);
154                    }
155                }
156            } else if let Some(name) = key.strip_prefix("v_") {
157                if let Some(v_array) = self.v.get(name) {
158                    let shape = v_array.raw_dim();
159                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
160                        self.v.insert(name.to_string(), arr);
161                    }
162                }
163            }
164        }
165    }
166}
167
168#[cfg(test)]
169mod tests {
170    use super::*;
171    use scirs2_core::ndarray::array;
172
173    #[test]
174    fn test_radam_optimizer() {
175        let config = OptimizerConfig {
176            learning_rate: 0.001,
177            ..Default::default()
178        };
179        let mut optimizer = RAdamOptimizer::new(config);
180        let mut params = HashMap::new();
181        params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
182        let mut grads = HashMap::new();
183        grads.insert("w".to_string(), array![[0.1, 0.1], [0.1, 0.1]]);
184        for _ in 0..10 {
185            optimizer.step(&mut params, &grads).unwrap();
186        }
187        let w = params.get("w").unwrap();
188        assert!(w[[0, 0]] < 1.0);
189        assert!(w[[0, 1]] < 2.0);
190        let state = optimizer.state_dict();
191        assert!(state.contains_key("t"));
192        assert!(state.contains_key("m_w"));
193        assert!(state.contains_key("v_w"));
194    }
195}