Skip to main content

tensorlogic_train/optimizers/
lars.rs

1//! LARS optimizer (Layer-wise Adaptive Rate Scaling).
2//!
3//! LARS scales the learning rate for each layer based on the ratio of the parameter norm
4//! to the gradient norm. This is particularly effective for large batch training.
5//!
6//! Reference: You et al. "Large Batch Training of Convolutional Networks" (2017)
7
8use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
9use crate::{TrainError, TrainResult};
10use scirs2_core::ndarray::{Array, Ix2};
11use std::collections::HashMap;
12
13/// LARS optimizer (Layer-wise Adaptive Rate Scaling).
14///
15/// LARS scales the learning rate for each layer based on the ratio of the parameter norm
16/// to the gradient norm. This is particularly effective for large batch training.
17///
18/// Reference: You et al. "Large Batch Training of Convolutional Networks" (2017)
19#[derive(Debug)]
20pub struct LarsOptimizer {
21    config: OptimizerConfig,
22    /// Momentum buffers for each parameter.
23    velocity: HashMap<String, Array<f64, Ix2>>,
24    /// Trust coefficient for layer-wise LR adaptation (typically 0.001).
25    trust_coef: f64,
26    /// Whether to apply LARS to bias parameters.
27    exclude_bias: bool,
28}
29
30impl LarsOptimizer {
31    /// Create a new LARS optimizer.
32    ///
33    /// # Arguments
34    /// * `config` - Optimizer configuration
35    /// * `trust_coef` - Trust coefficient for adaptive LR (default: 0.001)
36    /// * `exclude_bias` - Whether to exclude bias from LARS adaptation (default: true)
37    pub fn new(config: OptimizerConfig, trust_coef: f64, exclude_bias: bool) -> Self {
38        Self {
39            config,
40            velocity: HashMap::new(),
41            trust_coef,
42            exclude_bias,
43        }
44    }
45
46    /// Apply gradient clipping if configured.
47    fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
48        if let Some(clip_value) = self.config.grad_clip {
49            match self.config.grad_clip_mode {
50                GradClipMode::Value => {
51                    for grad in gradients.values_mut() {
52                        grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
53                    }
54                }
55                GradClipMode::Norm => {
56                    let total_norm = compute_gradient_norm(gradients);
57                    if total_norm > clip_value {
58                        let scale = clip_value / total_norm;
59                        for grad in gradients.values_mut() {
60                            grad.mapv_inplace(|g| g * scale);
61                        }
62                    }
63                }
64            }
65        }
66    }
67
68    /// Compute layer-wise adaptive learning rate.
69    fn compute_adaptive_lr(
70        &self,
71        param: &Array<f64, Ix2>,
72        grad: &Array<f64, Ix2>,
73        name: &str,
74    ) -> f64 {
75        if self.exclude_bias && (name.contains("bias") || name.contains("b")) {
76            return self.config.learning_rate;
77        }
78        let param_norm: f64 = param.iter().map(|&p| p * p).sum::<f64>().sqrt();
79        let grad_norm: f64 = grad.iter().map(|&g| g * g).sum::<f64>().sqrt();
80        if param_norm == 0.0 || grad_norm == 0.0 {
81            return self.config.learning_rate;
82        }
83        let local_lr = self.trust_coef * param_norm / grad_norm;
84        self.config.learning_rate * local_lr
85    }
86}
87
88impl Optimizer for LarsOptimizer {
89    fn step(
90        &mut self,
91        parameters: &mut HashMap<String, Array<f64, Ix2>>,
92        gradients: &HashMap<String, Array<f64, Ix2>>,
93    ) -> TrainResult<()> {
94        let mut clipped_gradients = gradients.clone();
95        self.clip_gradients(&mut clipped_gradients);
96        for (name, param) in parameters.iter_mut() {
97            let grad = clipped_gradients.get(name).ok_or_else(|| {
98                TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
99            })?;
100            let adaptive_lr = self.compute_adaptive_lr(param, grad, name);
101            let mut effective_grad = grad.clone();
102            if self.config.weight_decay > 0.0 {
103                effective_grad += &(&*param * self.config.weight_decay);
104            }
105            if !self.velocity.contains_key(name) {
106                self.velocity
107                    .insert(name.clone(), Array::zeros(param.raw_dim()));
108            }
109            let velocity = self.velocity.get_mut(name).unwrap();
110            velocity.mapv_inplace(|v| self.config.momentum * v);
111            *velocity = &*velocity + &(effective_grad * adaptive_lr);
112            *param = &*param - &*velocity;
113        }
114        Ok(())
115    }
116
117    fn zero_grad(&mut self) {}
118
119    fn get_lr(&self) -> f64 {
120        self.config.learning_rate
121    }
122
123    fn set_lr(&mut self, lr: f64) {
124        self.config.learning_rate = lr;
125    }
126
127    fn state_dict(&self) -> HashMap<String, Vec<f64>> {
128        let mut state = HashMap::new();
129        state.insert("trust_coef".to_string(), vec![self.trust_coef]);
130        state.insert(
131            "exclude_bias".to_string(),
132            vec![if self.exclude_bias { 1.0 } else { 0.0 }],
133        );
134        for (name, velocity) in &self.velocity {
135            state.insert(
136                format!("velocity_{}", name),
137                velocity.iter().copied().collect(),
138            );
139        }
140        state
141    }
142
143    fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
144        if let Some(trust) = state.get("trust_coef") {
145            self.trust_coef = trust[0];
146        }
147        if let Some(exclude) = state.get("exclude_bias") {
148            self.exclude_bias = exclude[0] > 0.5;
149        }
150        for (key, values) in state {
151            if let Some(name) = key.strip_prefix("velocity_") {
152                if let Some(velocity) = self.velocity.get(name) {
153                    let shape = velocity.raw_dim();
154                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
155                        self.velocity.insert(name.to_string(), arr);
156                    }
157                }
158            }
159        }
160    }
161}
162
163#[cfg(test)]
164mod tests {
165    use super::*;
166    use scirs2_core::ndarray::array;
167
168    #[test]
169    fn test_lars_optimizer() {
170        let config = OptimizerConfig {
171            learning_rate: 0.1,
172            momentum: 0.9,
173            weight_decay: 0.0001,
174            ..Default::default()
175        };
176        let mut optimizer = LarsOptimizer::new(config, 0.001, true);
177        let mut params = HashMap::new();
178        params.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
179        let mut grads = HashMap::new();
180        grads.insert("w".to_string(), array![[0.1, 0.1], [0.1, 0.1]]);
181        optimizer.step(&mut params, &grads).unwrap();
182        let w = params.get("w").unwrap();
183        assert!(w[[0, 0]] < 1.0);
184        assert!(w[[1, 1]] < 4.0);
185        let state = optimizer.state_dict();
186        assert!(state.contains_key("trust_coef"));
187        assert!(state.contains_key("exclude_bias"));
188        assert!(state.contains_key("velocity_w"));
189    }
190
191    #[test]
192    fn test_lars_bias_exclusion() {
193        let config = OptimizerConfig {
194            learning_rate: 0.1,
195            momentum: 0.9,
196            ..Default::default()
197        };
198        let mut optimizer = LarsOptimizer::new(config.clone(), 0.001, true);
199        let mut params = HashMap::new();
200        params.insert("weights".to_string(), array![[1.0, 2.0]]);
201        params.insert("bias".to_string(), array![[1.0, 2.0]]);
202        let mut grads = HashMap::new();
203        grads.insert("weights".to_string(), array![[0.1, 0.1]]);
204        grads.insert("bias".to_string(), array![[0.1, 0.1]]);
205        optimizer.step(&mut params, &grads).unwrap();
206        let weights = params.get("weights").unwrap();
207        let bias = params.get("bias").unwrap();
208        assert!(weights[[0, 0]] < 1.0);
209        assert!(bias[[0, 0]] < 1.0);
210    }
211}