Skip to main content

tensorlogic_train/optimizers/
nadam.rs

1//! NAdam optimizer (Nesterov-accelerated Adam).
2//!
3//! NAdam combines Adam with Nesterov momentum, which can provide better
4//! convergence in some scenarios by looking ahead before computing the gradient.
5//!
6//! Reference: Dozat, "Incorporating Nesterov Momentum into Adam", ICLR 2016
7
8use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
9use crate::{TrainError, TrainResult};
10use scirs2_core::ndarray::{Array, Ix2};
11use std::collections::HashMap;
12
13/// NAdam optimizer (Nesterov-accelerated Adam).
14#[derive(Debug)]
15pub struct NAdamOptimizer {
16    config: OptimizerConfig,
17    /// First moment estimates.
18    m: HashMap<String, Array<f64, Ix2>>,
19    /// Second moment estimates.
20    v: HashMap<String, Array<f64, Ix2>>,
21    /// Timestep counter.
22    t: usize,
23}
24
25impl NAdamOptimizer {
26    /// Create a new NAdam optimizer.
27    pub fn new(config: OptimizerConfig) -> Self {
28        Self {
29            config,
30            m: HashMap::new(),
31            v: HashMap::new(),
32            t: 0,
33        }
34    }
35
36    /// Apply gradient clipping if configured.
37    fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
38        if let Some(clip_value) = self.config.grad_clip {
39            match self.config.grad_clip_mode {
40                GradClipMode::Value => {
41                    for grad in gradients.values_mut() {
42                        grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
43                    }
44                }
45                GradClipMode::Norm => {
46                    let total_norm = compute_gradient_norm(gradients);
47                    if total_norm > clip_value {
48                        let scale = clip_value / total_norm;
49                        for grad in gradients.values_mut() {
50                            grad.mapv_inplace(|g| g * scale);
51                        }
52                    }
53                }
54            }
55        }
56    }
57}
58
59impl Optimizer for NAdamOptimizer {
60    fn step(
61        &mut self,
62        parameters: &mut HashMap<String, Array<f64, Ix2>>,
63        gradients: &HashMap<String, Array<f64, Ix2>>,
64    ) -> TrainResult<()> {
65        let mut clipped_gradients = gradients.clone();
66        self.clip_gradients(&mut clipped_gradients);
67        self.t += 1;
68        let lr = self.config.learning_rate;
69        let beta1 = self.config.beta1;
70        let beta2 = self.config.beta2;
71        let eps = self.config.epsilon;
72        let mu_t = beta1 * (1.0 - 0.5 * 0.96_f64.powi(self.t as i32));
73        let mu_t_next = beta1 * (1.0 - 0.5 * 0.96_f64.powi((self.t + 1) as i32));
74        for (name, param) in parameters.iter_mut() {
75            let grad = clipped_gradients.get(name).ok_or_else(|| {
76                TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
77            })?;
78            if !self.m.contains_key(name) {
79                self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
80                self.v.insert(name.clone(), Array::zeros(param.raw_dim()));
81            }
82            let m = self.m.get_mut(name).unwrap();
83            let v = self.v.get_mut(name).unwrap();
84            *m = &*m * beta1 + &(grad * (1.0 - beta1));
85            let grad_squared = grad.mapv(|g| g * g);
86            *v = &*v * beta2 + &(grad_squared * (1.0 - beta2));
87            let m_hat = &*m / (1.0 - beta1.powi(self.t as i32));
88            let v_hat = &*v / (1.0 - beta2.powi(self.t as i32));
89            let m_bar =
90                &m_hat * mu_t_next / (1.0 - mu_t_next) + &(grad * (1.0 - mu_t) / (1.0 - mu_t_next));
91            let update = m_bar / &v_hat.mapv(|v_val| v_val.sqrt() + eps);
92            *param = &*param - &(update * lr);
93        }
94        Ok(())
95    }
96
97    fn zero_grad(&mut self) {}
98
99    fn get_lr(&self) -> f64 {
100        self.config.learning_rate
101    }
102
103    fn set_lr(&mut self, lr: f64) {
104        self.config.learning_rate = lr;
105    }
106
107    fn state_dict(&self) -> HashMap<String, Vec<f64>> {
108        let mut state = HashMap::new();
109        state.insert("t".to_string(), vec![self.t as f64]);
110        for (name, m_val) in &self.m {
111            state.insert(format!("m_{}", name), m_val.iter().copied().collect());
112        }
113        for (name, v_val) in &self.v {
114            state.insert(format!("v_{}", name), v_val.iter().copied().collect());
115        }
116        state
117    }
118
119    fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
120        if let Some(t_vals) = state.get("t") {
121            self.t = t_vals[0] as usize;
122        }
123        for (key, values) in state {
124            if let Some(name) = key.strip_prefix("m_") {
125                if let Some(m) = self.m.get(name) {
126                    let shape = m.raw_dim();
127                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
128                        self.m.insert(name.to_string(), arr);
129                    }
130                }
131            } else if let Some(name) = key.strip_prefix("v_") {
132                if let Some(v) = self.v.get(name) {
133                    let shape = v.raw_dim();
134                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
135                        self.v.insert(name.to_string(), arr);
136                    }
137                }
138            }
139        }
140    }
141}
142
143#[cfg(test)]
144mod tests {
145    use super::*;
146    use scirs2_core::ndarray::array;
147
148    #[test]
149    fn test_nadam_optimizer() {
150        let config = OptimizerConfig {
151            learning_rate: 0.002,
152            ..Default::default()
153        };
154        let mut optimizer = NAdamOptimizer::new(config);
155        let mut params = HashMap::new();
156        params.insert("w".to_string(), array![[1.0, 2.0]]);
157        let mut grads = HashMap::new();
158        grads.insert("w".to_string(), array![[0.1, 0.1]]);
159        optimizer.step(&mut params, &grads).unwrap();
160        let w = params.get("w").unwrap();
161        assert!(w[[0, 0]] < 1.0);
162    }
163}