use candle_core::{Result, Var};
use candle_nn::optim::Optimizer;
use crate::{Decay, OptimParams};
#[derive(Debug)]
pub struct Adagrad {
vars: Vec<VarAdaGrad>,
params: ParamsAdaGrad,
t: f64,
}
#[derive(Debug)]
struct VarAdaGrad {
theta: Var,
sum: Var,
}
#[derive(Clone, Debug, PartialEq, PartialOrd)]
pub struct ParamsAdaGrad {
pub lr: f64,
pub lr_decay: f64,
pub initial_acc: f64,
pub weight_decay: Option<Decay>,
pub eps: f64,
}
impl Default for ParamsAdaGrad {
fn default() -> Self {
Self {
lr: 0.01,
lr_decay: 0.0,
initial_acc: 0.0,
weight_decay: None,
eps: 1e-10,
}
}
}
impl Optimizer for Adagrad {
type Config = ParamsAdaGrad;
fn new(vars: Vec<Var>, params: ParamsAdaGrad) -> Result<Self> {
let vars = vars
.into_iter()
.filter(|var| var.dtype().is_float())
.map(|var| {
let dtype = var.dtype();
let shape = var.shape();
let device = var.device();
let sum = Var::zeros(shape, dtype, device)?;
Ok(VarAdaGrad { theta: var, sum })
})
.collect::<Result<Vec<VarAdaGrad>>>()?;
Ok(Self {
vars,
t: 0.,
params,
})
}
fn learning_rate(&self) -> f64 {
self.params.lr
}
fn step(&mut self, grads: &candle_core::backprop::GradStore) -> Result<()> {
if let Some(decay) = self.params.weight_decay {
match decay {
Decay::WeightDecay(decay) => {
for var in &self.vars {
let theta = &var.theta;
let sum = &var.sum;
if let Some(grad) = grads.get(theta) {
let gamma_tilde =
self.params.lr / self.t.mul_add(self.params.lr_decay, 1.);
let grad = &(grad + (decay * theta.as_tensor())?)?;
let current_sum = (sum.as_tensor() + grad.powf(2.)?)?;
let change = (gamma_tilde
* (grad.div(&(current_sum.powf(0.5)? + self.params.eps)?))?)?;
sum.set(¤t_sum)?;
theta.set(&theta.sub(&change)?)?;
}
}
}
Decay::DecoupledWeightDecay(decay) => {
for var in &self.vars {
let theta = &var.theta;
let sum = &var.sum;
if let Some(grad) = grads.get(theta) {
theta
.set(&(theta.as_tensor() * self.params.lr.mul_add(-decay, 1.))?)?;
let gamma_tilde =
self.params.lr / self.t.mul_add(self.params.lr_decay, 1.);
let current_sum = (sum.as_tensor() + grad.powf(2.)?)?;
let change = (gamma_tilde
* (grad.div(&(current_sum.powf(0.5)? + self.params.eps)?))?)?;
sum.set(¤t_sum)?;
theta.set(&theta.sub(&change)?)?;
}
}
}
}
} else {
for var in &self.vars {
let theta = &var.theta;
let sum = &var.sum;
if let Some(grad) = grads.get(theta) {
let gamma_tilde = self.params.lr / self.t.mul_add(self.params.lr_decay, 1.);
let current_sum = (sum.as_tensor() + grad.powf(2.)?)?;
let change =
(gamma_tilde * (grad.div(&(current_sum.powf(0.5)? + self.params.eps)?))?)?;
sum.set(¤t_sum)?;
theta.set(&theta.sub(&change)?)?;
}
}
}
self.t += 1.;
Ok(())
}
fn set_learning_rate(&mut self, lr: f64) {
self.params.lr = lr;
}
}
impl OptimParams for Adagrad {
fn params(&self) -> &Self::Config {
&self.params
}
fn set_params(&mut self, config: Self::Config) {
self.params = config;
}
}
impl Adagrad {
#[must_use]
pub fn into_inner(self) -> Vec<Var> {
self.vars.into_iter().map(|v| v.theta).collect()
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use assert_approx_eq::assert_approx_eq;
use candle_core::{Device, Var};
use candle_nn::Optimizer;
use super::*;
#[test]
fn lr_test() -> Result<()> {
let params = ParamsAdaGrad {
lr: 0.004,
..Default::default()
};
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let mut optim = Adagrad::new(vec![w.clone(), b.clone()], params)?;
assert_approx_eq!(0.004, optim.learning_rate());
optim.set_learning_rate(0.002);
assert_approx_eq!(0.002, optim.learning_rate());
Ok(())
}
#[test]
fn into_inner_test() -> Result<()> {
let params = ParamsAdaGrad::default();
let w = Var::new(&[[3f32, 1.]], &Device::Cpu)?;
let b = Var::new(-2f32, &Device::Cpu)?;
let optim = Adagrad::new(vec![w.clone(), b.clone()], params)?;
let inner = optim.into_inner();
assert_eq!(inner[0].as_tensor().to_vec2::<f32>()?, &[[3f32, 1.]]);
assert_approx_eq!(inner[1].as_tensor().to_vec0::<f32>()?, -2_f32);
Ok(())
}
#[test]
fn params_test() -> Result<()> {
let params = ParamsAdaGrad {
lr: 0.004,
..Default::default()
};
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let mut optim = Adagrad::new(vec![w.clone(), b.clone()], params.clone())?;
assert_eq!(params, optim.params().clone());
let new_params = ParamsAdaGrad {
lr: 0.002,
..Default::default()
};
optim.set_params(new_params.clone());
assert_eq!(new_params, optim.params().clone());
Ok(())
}
}