Skip to main content

tensorlogic_train/optimizers/
adamax.rs

1//! AdaMax optimizer (variant of Adam with infinity norm).
2//!
3//! AdaMax uses the infinity norm of gradients instead of L2 norm, making it
4//! more robust to large gradients and outliers.
5//!
6//! Reference: Kingma & Ba, "Adam: A Method for Stochastic Optimization", ICLR 2015
7
8use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
9use crate::{TrainError, TrainResult};
10use scirs2_core::ndarray::{Array, Ix2};
11use std::collections::HashMap;
12
13/// AdaMax optimizer (variant of Adam with infinity norm).
14///
15/// Uses the infinity norm of gradients instead of L2 norm, making it more robust
16/// to large gradients and outliers.
17///
18/// Reference: Kingma & Ba, "Adam: A Method for Stochastic Optimization", ICLR 2015
19#[derive(Debug)]
20pub struct AdaMaxOptimizer {
21    config: OptimizerConfig,
22    /// First moment estimates (exponential moving average of gradients).
23    m: HashMap<String, Array<f64, Ix2>>,
24    /// Exponentially weighted infinity norm.
25    u: HashMap<String, Array<f64, Ix2>>,
26    /// Timestep counter.
27    t: usize,
28}
29
30impl AdaMaxOptimizer {
31    /// Create a new AdaMax optimizer.
32    pub fn new(config: OptimizerConfig) -> Self {
33        Self {
34            config,
35            m: HashMap::new(),
36            u: HashMap::new(),
37            t: 0,
38        }
39    }
40
41    /// Apply gradient clipping if configured.
42    fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
43        if let Some(clip_value) = self.config.grad_clip {
44            match self.config.grad_clip_mode {
45                GradClipMode::Value => {
46                    for grad in gradients.values_mut() {
47                        grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
48                    }
49                }
50                GradClipMode::Norm => {
51                    let total_norm = compute_gradient_norm(gradients);
52                    if total_norm > clip_value {
53                        let scale = clip_value / total_norm;
54                        for grad in gradients.values_mut() {
55                            grad.mapv_inplace(|g| g * scale);
56                        }
57                    }
58                }
59            }
60        }
61    }
62}
63
64impl Optimizer for AdaMaxOptimizer {
65    fn step(
66        &mut self,
67        parameters: &mut HashMap<String, Array<f64, Ix2>>,
68        gradients: &HashMap<String, Array<f64, Ix2>>,
69    ) -> TrainResult<()> {
70        let mut clipped_gradients = gradients.clone();
71        self.clip_gradients(&mut clipped_gradients);
72        self.t += 1;
73        let lr = self.config.learning_rate;
74        let beta1 = self.config.beta1;
75        let beta2 = self.config.beta2;
76        for (name, param) in parameters.iter_mut() {
77            let grad = clipped_gradients.get(name).ok_or_else(|| {
78                TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
79            })?;
80            if !self.m.contains_key(name) {
81                self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
82                self.u.insert(name.clone(), Array::zeros(param.raw_dim()));
83            }
84            let m = self.m.get_mut(name).unwrap();
85            let u = self.u.get_mut(name).unwrap();
86            *m = &*m * beta1 + &(grad * (1.0 - beta1));
87            for i in 0..u.nrows() {
88                for j in 0..u.ncols() {
89                    u[[i, j]] = (beta2 * u[[i, j]]).max(grad[[i, j]].abs());
90                }
91            }
92            let bias_correction = 1.0 - beta1.powi(self.t as i32);
93            let lr_t = lr / bias_correction;
94            for i in 0..param.nrows() {
95                for j in 0..param.ncols() {
96                    let update = lr_t * m[[i, j]] / (u[[i, j]] + self.config.epsilon);
97                    param[[i, j]] -= update;
98                }
99            }
100        }
101        Ok(())
102    }
103
104    fn zero_grad(&mut self) {}
105
106    fn get_lr(&self) -> f64 {
107        self.config.learning_rate
108    }
109
110    fn set_lr(&mut self, lr: f64) {
111        self.config.learning_rate = lr;
112    }
113
114    fn state_dict(&self) -> HashMap<String, Vec<f64>> {
115        let mut state = HashMap::new();
116        state.insert("t".to_string(), vec![self.t as f64]);
117        for (name, m_val) in &self.m {
118            state.insert(format!("m_{}", name), m_val.iter().copied().collect());
119        }
120        for (name, u_val) in &self.u {
121            state.insert(format!("u_{}", name), u_val.iter().copied().collect());
122        }
123        state
124    }
125
126    fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
127        if let Some(t_vals) = state.get("t") {
128            self.t = t_vals[0] as usize;
129        }
130        for (key, values) in state {
131            if let Some(name) = key.strip_prefix("m_") {
132                if let Some(m) = self.m.get(name) {
133                    let shape = m.raw_dim();
134                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
135                        self.m.insert(name.to_string(), arr);
136                    }
137                }
138            } else if let Some(name) = key.strip_prefix("u_") {
139                if let Some(u) = self.u.get(name) {
140                    let shape = u.raw_dim();
141                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
142                        self.u.insert(name.to_string(), arr);
143                    }
144                }
145            }
146        }
147    }
148}
149
150#[cfg(test)]
151mod tests {
152    use super::*;
153    use scirs2_core::ndarray::array;
154
155    #[test]
156    fn test_adamax_optimizer() {
157        let config = OptimizerConfig {
158            learning_rate: 0.002,
159            ..Default::default()
160        };
161        let mut optimizer = AdaMaxOptimizer::new(config);
162        let mut params = HashMap::new();
163        params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
164        let mut grads = HashMap::new();
165        grads.insert("w".to_string(), array![[0.1, 0.2], [0.3, 0.4]]);
166        for _ in 0..3 {
167            optimizer.step(&mut params, &grads).unwrap();
168        }
169        let w = params.get("w").unwrap();
170        assert!(w[[0, 0]] < 1.0);
171        assert!(w[[0, 1]] < 2.0);
172        assert!(w[[1, 0]] < 3.0);
173        assert!(w[[1, 1]] < 4.0);
174        let state = optimizer.state_dict();
175        assert!(state.contains_key("t"));
176        assert!(state.contains_key("m_w"));
177        assert!(state.contains_key("u_w"));
178    }
179}