Skip to main content

tensorlogic_train/optimizers/
adamw.rs

1//! AdamW optimizer (Adam with decoupled weight decay).
2//!
3//! AdamW modifies Adam by decoupling weight decay from the gradient-based update,
4//! which has been shown to improve generalization in many cases.
5//!
6//! Reference: Loshchilov & Hutter, "Decoupled Weight Decay Regularization", ICLR 2019
7
8use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
9use crate::{TrainError, TrainResult};
10use scirs2_core::ndarray::{Array, Ix2};
11use std::collections::HashMap;
12
13/// AdamW optimizer (Adam with decoupled weight decay).
14#[derive(Debug)]
15pub struct AdamWOptimizer {
16    config: OptimizerConfig,
17    /// First moment estimates.
18    m: HashMap<String, Array<f64, Ix2>>,
19    /// Second moment estimates.
20    v: HashMap<String, Array<f64, Ix2>>,
21    /// Timestep counter.
22    t: usize,
23}
24
25impl AdamWOptimizer {
26    /// Create a new AdamW optimizer.
27    pub fn new(config: OptimizerConfig) -> Self {
28        Self {
29            config,
30            m: HashMap::new(),
31            v: HashMap::new(),
32            t: 0,
33        }
34    }
35
36    /// Apply gradient clipping if configured.
37    fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
38        if let Some(clip_value) = self.config.grad_clip {
39            match self.config.grad_clip_mode {
40                GradClipMode::Value => {
41                    for grad in gradients.values_mut() {
42                        grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
43                    }
44                }
45                GradClipMode::Norm => {
46                    let total_norm = compute_gradient_norm(gradients);
47                    if total_norm > clip_value {
48                        let scale = clip_value / total_norm;
49                        for grad in gradients.values_mut() {
50                            grad.mapv_inplace(|g| g * scale);
51                        }
52                    }
53                }
54            }
55        }
56    }
57}
58
59impl Optimizer for AdamWOptimizer {
60    fn step(
61        &mut self,
62        parameters: &mut HashMap<String, Array<f64, Ix2>>,
63        gradients: &HashMap<String, Array<f64, Ix2>>,
64    ) -> TrainResult<()> {
65        let mut clipped_gradients = gradients.clone();
66        self.clip_gradients(&mut clipped_gradients);
67        self.t += 1;
68        let lr = self.config.learning_rate;
69        let beta1 = self.config.beta1;
70        let beta2 = self.config.beta2;
71        let eps = self.config.epsilon;
72        let weight_decay = self.config.weight_decay;
73        let lr_t =
74            lr * ((1.0 - beta2.powi(self.t as i32)).sqrt()) / (1.0 - beta1.powi(self.t as i32));
75        for (name, param) in parameters.iter_mut() {
76            let grad = clipped_gradients.get(name).ok_or_else(|| {
77                TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
78            })?;
79            if !self.m.contains_key(name) {
80                self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
81                self.v.insert(name.clone(), Array::zeros(param.raw_dim()));
82            }
83            let m = self
84                .m
85                .get_mut(name)
86                .expect("m initialized for all parameters");
87            let v = self
88                .v
89                .get_mut(name)
90                .expect("v initialized for all parameters");
91            *m = &*m * beta1 + &(grad * (1.0 - beta1));
92            let grad_squared = grad.mapv(|g| g * g);
93            *v = &*v * beta2 + &(grad_squared * (1.0 - beta2));
94            let update = m.mapv(|m_val| m_val * lr_t) / &v.mapv(|v_val| v_val.sqrt() + eps);
95            let decay = param.mapv(|p| p * lr * weight_decay);
96            *param = &*param - &update - &decay;
97        }
98        Ok(())
99    }
100
101    fn zero_grad(&mut self) {}
102
103    fn get_lr(&self) -> f64 {
104        self.config.learning_rate
105    }
106
107    fn set_lr(&mut self, lr: f64) {
108        self.config.learning_rate = lr;
109    }
110
111    fn state_dict(&self) -> HashMap<String, Vec<f64>> {
112        let mut state = HashMap::new();
113        state.insert("t".to_string(), vec![self.t as f64]);
114        for (name, m_val) in &self.m {
115            state.insert(format!("m_{}", name), m_val.iter().copied().collect());
116        }
117        for (name, v_val) in &self.v {
118            state.insert(format!("v_{}", name), v_val.iter().copied().collect());
119        }
120        state
121    }
122
123    fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
124        if let Some(t_vals) = state.get("t") {
125            self.t = t_vals[0] as usize;
126        }
127        for (key, values) in state {
128            if let Some(name) = key.strip_prefix("m_") {
129                if let Some(m) = self.m.get(name) {
130                    let shape = m.raw_dim();
131                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
132                        self.m.insert(name.to_string(), arr);
133                    }
134                }
135            } else if let Some(name) = key.strip_prefix("v_") {
136                if let Some(v) = self.v.get(name) {
137                    let shape = v.raw_dim();
138                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
139                        self.v.insert(name.to_string(), arr);
140                    }
141                }
142            }
143        }
144    }
145}
146
147#[cfg(test)]
148mod tests {
149    use super::*;
150    use scirs2_core::ndarray::array;
151
152    #[test]
153    fn test_adamw_optimizer() {
154        let config = OptimizerConfig {
155            learning_rate: 0.001,
156            weight_decay: 0.01,
157            ..Default::default()
158        };
159        let mut optimizer = AdamWOptimizer::new(config);
160        let mut params = HashMap::new();
161        params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
162        let mut grads = HashMap::new();
163        grads.insert("w".to_string(), array![[0.1, 0.1], [0.1, 0.1]]);
164        optimizer.step(&mut params, &grads).expect("unwrap");
165        let w = params.get("w").expect("unwrap");
166        assert!(w[[0, 0]] < 1.0);
167    }
168}