Skip to main content

tensorlogic_train/optimizers/
adam.rs

1//! Adam optimizer (Adaptive Moment Estimation).
2//!
3//! Adam combines the benefits of AdaGrad and RMSProp by maintaining both
4//! first-order (momentum) and second-order moment estimates of gradients.
5//!
6//! Reference: Kingma & Ba, "Adam: A Method for Stochastic Optimization", ICLR 2015
7
8use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
9use crate::{TrainError, TrainResult};
10use scirs2_core::ndarray::{Array, Ix2};
11use std::collections::HashMap;
12
13/// Adam optimizer.
14#[derive(Debug)]
15pub struct AdamOptimizer {
16    config: OptimizerConfig,
17    /// First moment estimates (exponential moving average of gradients).
18    m: HashMap<String, Array<f64, Ix2>>,
19    /// Second moment estimates (exponential moving average of squared gradients).
20    v: HashMap<String, Array<f64, Ix2>>,
21    /// Timestep counter.
22    t: usize,
23}
24
25impl AdamOptimizer {
26    /// Create a new Adam optimizer.
27    pub fn new(config: OptimizerConfig) -> Self {
28        Self {
29            config,
30            m: HashMap::new(),
31            v: HashMap::new(),
32            t: 0,
33        }
34    }
35
36    /// Apply gradient clipping if configured.
37    fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
38        if let Some(clip_value) = self.config.grad_clip {
39            match self.config.grad_clip_mode {
40                GradClipMode::Value => {
41                    for grad in gradients.values_mut() {
42                        grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
43                    }
44                }
45                GradClipMode::Norm => {
46                    let total_norm = compute_gradient_norm(gradients);
47                    if total_norm > clip_value {
48                        let scale = clip_value / total_norm;
49                        for grad in gradients.values_mut() {
50                            grad.mapv_inplace(|g| g * scale);
51                        }
52                    }
53                }
54            }
55        }
56    }
57}
58
59impl Optimizer for AdamOptimizer {
60    fn step(
61        &mut self,
62        parameters: &mut HashMap<String, Array<f64, Ix2>>,
63        gradients: &HashMap<String, Array<f64, Ix2>>,
64    ) -> TrainResult<()> {
65        let mut clipped_gradients = gradients.clone();
66        self.clip_gradients(&mut clipped_gradients);
67        self.t += 1;
68        let lr = self.config.learning_rate;
69        let beta1 = self.config.beta1;
70        let beta2 = self.config.beta2;
71        let eps = self.config.epsilon;
72        let lr_t =
73            lr * ((1.0 - beta2.powi(self.t as i32)).sqrt()) / (1.0 - beta1.powi(self.t as i32));
74        for (name, param) in parameters.iter_mut() {
75            let grad = clipped_gradients.get(name).ok_or_else(|| {
76                TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
77            })?;
78            if !self.m.contains_key(name) {
79                self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
80                self.v.insert(name.clone(), Array::zeros(param.raw_dim()));
81            }
82            let m = self
83                .m
84                .get_mut(name)
85                .expect("m initialized for all parameters");
86            let v = self
87                .v
88                .get_mut(name)
89                .expect("v initialized for all parameters");
90            *m = &*m * beta1 + &(grad * (1.0 - beta1));
91            let grad_squared = grad.mapv(|g| g * g);
92            *v = &*v * beta2 + &(grad_squared * (1.0 - beta2));
93            let update = m.mapv(|m_val| m_val * lr_t) / &v.mapv(|v_val| v_val.sqrt() + eps);
94            *param = &*param - &update;
95        }
96        Ok(())
97    }
98
99    fn zero_grad(&mut self) {}
100
101    fn get_lr(&self) -> f64 {
102        self.config.learning_rate
103    }
104
105    fn set_lr(&mut self, lr: f64) {
106        self.config.learning_rate = lr;
107    }
108
109    fn state_dict(&self) -> HashMap<String, Vec<f64>> {
110        let mut state = HashMap::new();
111        state.insert("t".to_string(), vec![self.t as f64]);
112        for (name, m_val) in &self.m {
113            state.insert(format!("m_{}", name), m_val.iter().copied().collect());
114        }
115        for (name, v_val) in &self.v {
116            state.insert(format!("v_{}", name), v_val.iter().copied().collect());
117        }
118        state
119    }
120
121    fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
122        if let Some(t_vals) = state.get("t") {
123            self.t = t_vals[0] as usize;
124        }
125        for (key, values) in state {
126            if let Some(name) = key.strip_prefix("m_") {
127                if let Some(m) = self.m.get(name) {
128                    let shape = m.raw_dim();
129                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
130                        self.m.insert(name.to_string(), arr);
131                    }
132                }
133            } else if let Some(name) = key.strip_prefix("v_") {
134                if let Some(v) = self.v.get(name) {
135                    let shape = v.raw_dim();
136                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
137                        self.v.insert(name.to_string(), arr);
138                    }
139                }
140            }
141        }
142    }
143}
144
145#[cfg(test)]
146mod tests {
147    use super::*;
148    use scirs2_core::ndarray::array;
149
150    #[test]
151    fn test_adam_optimizer() {
152        let config = OptimizerConfig {
153            learning_rate: 0.001,
154            ..Default::default()
155        };
156        let mut optimizer = AdamOptimizer::new(config);
157        let mut params = HashMap::new();
158        params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
159        let mut grads = HashMap::new();
160        grads.insert("w".to_string(), array![[0.1, 0.1], [0.1, 0.1]]);
161        optimizer.step(&mut params, &grads).expect("unwrap");
162        let w = params.get("w").expect("unwrap");
163        assert!(w[[0, 0]] < 1.0);
164    }
165}