tensorlogic_train/optimizers/
adamax.rs1use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
9use crate::{TrainError, TrainResult};
10use scirs2_core::ndarray::{Array, Ix2};
11use std::collections::HashMap;
12
13#[derive(Debug)]
20pub struct AdaMaxOptimizer {
21 config: OptimizerConfig,
22 m: HashMap<String, Array<f64, Ix2>>,
24 u: HashMap<String, Array<f64, Ix2>>,
26 t: usize,
28}
29
30impl AdaMaxOptimizer {
31 pub fn new(config: OptimizerConfig) -> Self {
33 Self {
34 config,
35 m: HashMap::new(),
36 u: HashMap::new(),
37 t: 0,
38 }
39 }
40
41 fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
43 if let Some(clip_value) = self.config.grad_clip {
44 match self.config.grad_clip_mode {
45 GradClipMode::Value => {
46 for grad in gradients.values_mut() {
47 grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
48 }
49 }
50 GradClipMode::Norm => {
51 let total_norm = compute_gradient_norm(gradients);
52 if total_norm > clip_value {
53 let scale = clip_value / total_norm;
54 for grad in gradients.values_mut() {
55 grad.mapv_inplace(|g| g * scale);
56 }
57 }
58 }
59 }
60 }
61 }
62}
63
64impl Optimizer for AdaMaxOptimizer {
65 fn step(
66 &mut self,
67 parameters: &mut HashMap<String, Array<f64, Ix2>>,
68 gradients: &HashMap<String, Array<f64, Ix2>>,
69 ) -> TrainResult<()> {
70 let mut clipped_gradients = gradients.clone();
71 self.clip_gradients(&mut clipped_gradients);
72 self.t += 1;
73 let lr = self.config.learning_rate;
74 let beta1 = self.config.beta1;
75 let beta2 = self.config.beta2;
76 for (name, param) in parameters.iter_mut() {
77 let grad = clipped_gradients.get(name).ok_or_else(|| {
78 TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
79 })?;
80 if !self.m.contains_key(name) {
81 self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
82 self.u.insert(name.clone(), Array::zeros(param.raw_dim()));
83 }
84 let m = self
85 .m
86 .get_mut(name)
87 .expect("m initialized for all parameters");
88 let u = self
89 .u
90 .get_mut(name)
91 .expect("u initialized for all parameters");
92 *m = &*m * beta1 + &(grad * (1.0 - beta1));
93 for i in 0..u.nrows() {
94 for j in 0..u.ncols() {
95 u[[i, j]] = (beta2 * u[[i, j]]).max(grad[[i, j]].abs());
96 }
97 }
98 let bias_correction = 1.0 - beta1.powi(self.t as i32);
99 let lr_t = lr / bias_correction;
100 for i in 0..param.nrows() {
101 for j in 0..param.ncols() {
102 let update = lr_t * m[[i, j]] / (u[[i, j]] + self.config.epsilon);
103 param[[i, j]] -= update;
104 }
105 }
106 }
107 Ok(())
108 }
109
110 fn zero_grad(&mut self) {}
111
112 fn get_lr(&self) -> f64 {
113 self.config.learning_rate
114 }
115
116 fn set_lr(&mut self, lr: f64) {
117 self.config.learning_rate = lr;
118 }
119
120 fn state_dict(&self) -> HashMap<String, Vec<f64>> {
121 let mut state = HashMap::new();
122 state.insert("t".to_string(), vec![self.t as f64]);
123 for (name, m_val) in &self.m {
124 state.insert(format!("m_{}", name), m_val.iter().copied().collect());
125 }
126 for (name, u_val) in &self.u {
127 state.insert(format!("u_{}", name), u_val.iter().copied().collect());
128 }
129 state
130 }
131
132 fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
133 if let Some(t_vals) = state.get("t") {
134 self.t = t_vals[0] as usize;
135 }
136 for (key, values) in state {
137 if let Some(name) = key.strip_prefix("m_") {
138 if let Some(m) = self.m.get(name) {
139 let shape = m.raw_dim();
140 if let Ok(arr) = Array::from_shape_vec(shape, values) {
141 self.m.insert(name.to_string(), arr);
142 }
143 }
144 } else if let Some(name) = key.strip_prefix("u_") {
145 if let Some(u) = self.u.get(name) {
146 let shape = u.raw_dim();
147 if let Ok(arr) = Array::from_shape_vec(shape, values) {
148 self.u.insert(name.to_string(), arr);
149 }
150 }
151 }
152 }
153 }
154}
155
156#[cfg(test)]
157mod tests {
158 use super::*;
159 use scirs2_core::ndarray::array;
160
161 #[test]
162 fn test_adamax_optimizer() {
163 let config = OptimizerConfig {
164 learning_rate: 0.002,
165 ..Default::default()
166 };
167 let mut optimizer = AdaMaxOptimizer::new(config);
168 let mut params = HashMap::new();
169 params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
170 let mut grads = HashMap::new();
171 grads.insert("w".to_string(), array![[0.1, 0.2], [0.3, 0.4]]);
172 for _ in 0..3 {
173 optimizer.step(&mut params, &grads).expect("unwrap");
174 }
175 let w = params.get("w").expect("unwrap");
176 assert!(w[[0, 0]] < 1.0);
177 assert!(w[[0, 1]] < 2.0);
178 assert!(w[[1, 0]] < 3.0);
179 assert!(w[[1, 1]] < 4.0);
180 let state = optimizer.state_dict();
181 assert!(state.contains_key("t"));
182 assert!(state.contains_key("m_w"));
183 assert!(state.contains_key("u_w"));
184 }
185}