Skip to main content

tensorlogic_train/optimizers/
adagrad.rs

1//! Adagrad optimizer (Adaptive Gradient).
2//!
3//! Adagrad adapts the learning rate for each parameter based on the historical
4//! sum of squared gradients, giving frequently occurring features lower learning rates.
5//!
6//! Reference: Duchi et al., "Adaptive Subgradient Methods for Online Learning
7//! and Stochastic Optimization", JMLR 2011
8
9use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
10use crate::{TrainError, TrainResult};
11use scirs2_core::ndarray::{Array, Ix2};
12use std::collections::HashMap;
13
14/// Adagrad optimizer (Adaptive Gradient).
15#[derive(Debug)]
16pub struct AdagradOptimizer {
17    config: OptimizerConfig,
18    /// Accumulated sum of squared gradients.
19    sum_squared_grads: HashMap<String, Array<f64, Ix2>>,
20}
21
22impl AdagradOptimizer {
23    /// Create a new Adagrad optimizer.
24    pub fn new(config: OptimizerConfig) -> Self {
25        Self {
26            config,
27            sum_squared_grads: HashMap::new(),
28        }
29    }
30
31    /// Apply gradient clipping if configured.
32    fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
33        if let Some(clip_value) = self.config.grad_clip {
34            match self.config.grad_clip_mode {
35                GradClipMode::Value => {
36                    for grad in gradients.values_mut() {
37                        grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
38                    }
39                }
40                GradClipMode::Norm => {
41                    let total_norm = compute_gradient_norm(gradients);
42                    if total_norm > clip_value {
43                        let scale = clip_value / total_norm;
44                        for grad in gradients.values_mut() {
45                            grad.mapv_inplace(|g| g * scale);
46                        }
47                    }
48                }
49            }
50        }
51    }
52}
53
54impl Optimizer for AdagradOptimizer {
55    fn step(
56        &mut self,
57        parameters: &mut HashMap<String, Array<f64, Ix2>>,
58        gradients: &HashMap<String, Array<f64, Ix2>>,
59    ) -> TrainResult<()> {
60        let mut clipped_gradients = gradients.clone();
61        self.clip_gradients(&mut clipped_gradients);
62        let lr = self.config.learning_rate;
63        let eps = self.config.epsilon;
64        for (name, param) in parameters.iter_mut() {
65            let grad = clipped_gradients.get(name).ok_or_else(|| {
66                TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
67            })?;
68            if !self.sum_squared_grads.contains_key(name) {
69                self.sum_squared_grads
70                    .insert(name.clone(), Array::zeros(param.raw_dim()));
71            }
72            let sum_sq = self.sum_squared_grads.get_mut(name).unwrap();
73            let grad_squared = grad.mapv(|g| g * g);
74            *sum_sq = &*sum_sq + &grad_squared;
75            let update = grad / &sum_sq.mapv(|s| s.sqrt() + eps);
76            *param = &*param - &(update * lr);
77        }
78        Ok(())
79    }
80
81    fn zero_grad(&mut self) {}
82
83    fn get_lr(&self) -> f64 {
84        self.config.learning_rate
85    }
86
87    fn set_lr(&mut self, lr: f64) {
88        self.config.learning_rate = lr;
89    }
90
91    fn state_dict(&self) -> HashMap<String, Vec<f64>> {
92        let mut state = HashMap::new();
93        for (name, sum_sq) in &self.sum_squared_grads {
94            state.insert(
95                format!("sum_squared_grads_{}", name),
96                sum_sq.iter().copied().collect(),
97            );
98        }
99        state
100    }
101
102    fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
103        for (key, values) in state {
104            if let Some(name) = key.strip_prefix("sum_squared_grads_") {
105                if let Some(sum_sq) = self.sum_squared_grads.get(name) {
106                    let shape = sum_sq.raw_dim();
107                    if let Ok(arr) = Array::from_shape_vec(shape, values) {
108                        self.sum_squared_grads.insert(name.to_string(), arr);
109                    }
110                }
111            }
112        }
113    }
114}
115
116#[cfg(test)]
117mod tests {
118    use super::*;
119    use scirs2_core::ndarray::array;
120
121    #[test]
122    fn test_adagrad_optimizer() {
123        let config = OptimizerConfig {
124            learning_rate: 0.1,
125            ..Default::default()
126        };
127        let mut optimizer = AdagradOptimizer::new(config);
128        let mut params = HashMap::new();
129        params.insert("w".to_string(), array![[1.0, 2.0]]);
130        let mut grads = HashMap::new();
131        grads.insert("w".to_string(), array![[0.1, 0.2]]);
132        optimizer.step(&mut params, &grads).unwrap();
133        let w = params.get("w").unwrap();
134        assert!(w[[0, 0]] < 1.0);
135        assert!(w[[0, 1]] < 2.0);
136    }
137}