tensorlogic_train/optimizers/
lamb.rs1use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
10use crate::{TrainError, TrainResult};
11use scirs2_core::ndarray::{Array, Ix2};
12use std::collections::HashMap;
13
14#[derive(Debug)]
17pub struct LambOptimizer {
18 config: OptimizerConfig,
19 m: HashMap<String, Array<f64, Ix2>>,
21 v: HashMap<String, Array<f64, Ix2>>,
23 t: usize,
25}
26
27impl LambOptimizer {
28 pub fn new(config: OptimizerConfig) -> Self {
30 Self {
31 config,
32 m: HashMap::new(),
33 v: HashMap::new(),
34 t: 0,
35 }
36 }
37
38 fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
40 if let Some(clip_value) = self.config.grad_clip {
41 match self.config.grad_clip_mode {
42 GradClipMode::Value => {
43 for grad in gradients.values_mut() {
44 grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
45 }
46 }
47 GradClipMode::Norm => {
48 let total_norm = compute_gradient_norm(gradients);
49 if total_norm > clip_value {
50 let scale = clip_value / total_norm;
51 for grad in gradients.values_mut() {
52 grad.mapv_inplace(|g| g * scale);
53 }
54 }
55 }
56 }
57 }
58 }
59
60 fn compute_norm(arr: &Array<f64, Ix2>) -> f64 {
62 arr.iter().map(|&x| x * x).sum::<f64>().sqrt()
63 }
64}
65
66impl Optimizer for LambOptimizer {
67 fn step(
68 &mut self,
69 parameters: &mut HashMap<String, Array<f64, Ix2>>,
70 gradients: &HashMap<String, Array<f64, Ix2>>,
71 ) -> TrainResult<()> {
72 let mut clipped_gradients = gradients.clone();
73 self.clip_gradients(&mut clipped_gradients);
74 self.t += 1;
75 let lr = self.config.learning_rate;
76 let beta1 = self.config.beta1;
77 let beta2 = self.config.beta2;
78 let eps = self.config.epsilon;
79 let weight_decay = self.config.weight_decay;
80 for (name, param) in parameters.iter_mut() {
81 let grad = clipped_gradients.get(name).ok_or_else(|| {
82 TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
83 })?;
84 if !self.m.contains_key(name) {
85 self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
86 self.v.insert(name.clone(), Array::zeros(param.raw_dim()));
87 }
88 let m = self.m.get_mut(name).unwrap();
89 let v = self.v.get_mut(name).unwrap();
90 *m = &*m * beta1 + &(grad * (1.0 - beta1));
91 let grad_squared = grad.mapv(|g| g * g);
92 *v = &*v * beta2 + &(grad_squared * (1.0 - beta2));
93 let m_hat = &*m / (1.0 - beta1.powi(self.t as i32));
94 let v_hat = &*v / (1.0 - beta2.powi(self.t as i32));
95 let adam_step = &m_hat / &v_hat.mapv(|v_val| v_val.sqrt() + eps);
96 let update = &adam_step + ¶m.mapv(|p| p * weight_decay);
97 let param_norm = Self::compute_norm(param);
98 let update_norm = Self::compute_norm(&update);
99 let trust_ratio = if param_norm > 0.0 && update_norm > 0.0 {
100 param_norm / update_norm
101 } else {
102 1.0
103 };
104 *param = &*param - &(update * (lr * trust_ratio));
105 }
106 Ok(())
107 }
108
109 fn zero_grad(&mut self) {}
110
111 fn get_lr(&self) -> f64 {
112 self.config.learning_rate
113 }
114
115 fn set_lr(&mut self, lr: f64) {
116 self.config.learning_rate = lr;
117 }
118
119 fn state_dict(&self) -> HashMap<String, Vec<f64>> {
120 let mut state = HashMap::new();
121 state.insert("t".to_string(), vec![self.t as f64]);
122 for (name, m_val) in &self.m {
123 state.insert(format!("m_{}", name), m_val.iter().copied().collect());
124 }
125 for (name, v_val) in &self.v {
126 state.insert(format!("v_{}", name), v_val.iter().copied().collect());
127 }
128 state
129 }
130
131 fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
132 if let Some(t_vals) = state.get("t") {
133 self.t = t_vals[0] as usize;
134 }
135 for (key, values) in state {
136 if let Some(name) = key.strip_prefix("m_") {
137 if let Some(m) = self.m.get(name) {
138 let shape = m.raw_dim();
139 if let Ok(arr) = Array::from_shape_vec(shape, values) {
140 self.m.insert(name.to_string(), arr);
141 }
142 }
143 } else if let Some(name) = key.strip_prefix("v_") {
144 if let Some(v) = self.v.get(name) {
145 let shape = v.raw_dim();
146 if let Ok(arr) = Array::from_shape_vec(shape, values) {
147 self.v.insert(name.to_string(), arr);
148 }
149 }
150 }
151 }
152 }
153}
154
155#[cfg(test)]
156mod tests {
157 use super::*;
158 use scirs2_core::ndarray::array;
159
160 #[test]
161 fn test_lamb_optimizer() {
162 let config = OptimizerConfig {
163 learning_rate: 0.001,
164 weight_decay: 0.01,
165 ..Default::default()
166 };
167 let mut optimizer = LambOptimizer::new(config);
168 let mut params = HashMap::new();
169 params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
170 let mut grads = HashMap::new();
171 grads.insert("w".to_string(), array![[0.1, 0.1], [0.1, 0.1]]);
172 optimizer.step(&mut params, &grads).unwrap();
173 let w = params.get("w").unwrap();
174 assert!(w[[0, 0]] < 1.0);
175 }
176}