tensorlogic_train/optimizers/
adamw.rs1use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
9use crate::{TrainError, TrainResult};
10use scirs2_core::ndarray::{Array, Ix2};
11use std::collections::HashMap;
12
13#[derive(Debug)]
15pub struct AdamWOptimizer {
16 config: OptimizerConfig,
17 m: HashMap<String, Array<f64, Ix2>>,
19 v: HashMap<String, Array<f64, Ix2>>,
21 t: usize,
23}
24
25impl AdamWOptimizer {
26 pub fn new(config: OptimizerConfig) -> Self {
28 Self {
29 config,
30 m: HashMap::new(),
31 v: HashMap::new(),
32 t: 0,
33 }
34 }
35
36 fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
38 if let Some(clip_value) = self.config.grad_clip {
39 match self.config.grad_clip_mode {
40 GradClipMode::Value => {
41 for grad in gradients.values_mut() {
42 grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
43 }
44 }
45 GradClipMode::Norm => {
46 let total_norm = compute_gradient_norm(gradients);
47 if total_norm > clip_value {
48 let scale = clip_value / total_norm;
49 for grad in gradients.values_mut() {
50 grad.mapv_inplace(|g| g * scale);
51 }
52 }
53 }
54 }
55 }
56 }
57}
58
59impl Optimizer for AdamWOptimizer {
60 fn step(
61 &mut self,
62 parameters: &mut HashMap<String, Array<f64, Ix2>>,
63 gradients: &HashMap<String, Array<f64, Ix2>>,
64 ) -> TrainResult<()> {
65 let mut clipped_gradients = gradients.clone();
66 self.clip_gradients(&mut clipped_gradients);
67 self.t += 1;
68 let lr = self.config.learning_rate;
69 let beta1 = self.config.beta1;
70 let beta2 = self.config.beta2;
71 let eps = self.config.epsilon;
72 let weight_decay = self.config.weight_decay;
73 let lr_t =
74 lr * ((1.0 - beta2.powi(self.t as i32)).sqrt()) / (1.0 - beta1.powi(self.t as i32));
75 for (name, param) in parameters.iter_mut() {
76 let grad = clipped_gradients.get(name).ok_or_else(|| {
77 TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
78 })?;
79 if !self.m.contains_key(name) {
80 self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
81 self.v.insert(name.clone(), Array::zeros(param.raw_dim()));
82 }
83 let m = self.m.get_mut(name).unwrap();
84 let v = self.v.get_mut(name).unwrap();
85 *m = &*m * beta1 + &(grad * (1.0 - beta1));
86 let grad_squared = grad.mapv(|g| g * g);
87 *v = &*v * beta2 + &(grad_squared * (1.0 - beta2));
88 let update = m.mapv(|m_val| m_val * lr_t) / &v.mapv(|v_val| v_val.sqrt() + eps);
89 let decay = param.mapv(|p| p * lr * weight_decay);
90 *param = &*param - &update - &decay;
91 }
92 Ok(())
93 }
94
95 fn zero_grad(&mut self) {}
96
97 fn get_lr(&self) -> f64 {
98 self.config.learning_rate
99 }
100
101 fn set_lr(&mut self, lr: f64) {
102 self.config.learning_rate = lr;
103 }
104
105 fn state_dict(&self) -> HashMap<String, Vec<f64>> {
106 let mut state = HashMap::new();
107 state.insert("t".to_string(), vec![self.t as f64]);
108 for (name, m_val) in &self.m {
109 state.insert(format!("m_{}", name), m_val.iter().copied().collect());
110 }
111 for (name, v_val) in &self.v {
112 state.insert(format!("v_{}", name), v_val.iter().copied().collect());
113 }
114 state
115 }
116
117 fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
118 if let Some(t_vals) = state.get("t") {
119 self.t = t_vals[0] as usize;
120 }
121 for (key, values) in state {
122 if let Some(name) = key.strip_prefix("m_") {
123 if let Some(m) = self.m.get(name) {
124 let shape = m.raw_dim();
125 if let Ok(arr) = Array::from_shape_vec(shape, values) {
126 self.m.insert(name.to_string(), arr);
127 }
128 }
129 } else if let Some(name) = key.strip_prefix("v_") {
130 if let Some(v) = self.v.get(name) {
131 let shape = v.raw_dim();
132 if let Ok(arr) = Array::from_shape_vec(shape, values) {
133 self.v.insert(name.to_string(), arr);
134 }
135 }
136 }
137 }
138 }
139}
140
141#[cfg(test)]
142mod tests {
143 use super::*;
144 use scirs2_core::ndarray::array;
145
146 #[test]
147 fn test_adamw_optimizer() {
148 let config = OptimizerConfig {
149 learning_rate: 0.001,
150 weight_decay: 0.01,
151 ..Default::default()
152 };
153 let mut optimizer = AdamWOptimizer::new(config);
154 let mut params = HashMap::new();
155 params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
156 let mut grads = HashMap::new();
157 grads.insert("w".to_string(), array![[0.1, 0.1], [0.1, 0.1]]);
158 optimizer.step(&mut params, &grads).unwrap();
159 let w = params.get("w").unwrap();
160 assert!(w[[0, 0]] < 1.0);
161 }
162}