use candle_core::{Result, Var};
use candle_nn::optim::Optimizer;
use crate::{Decay, OptimParams};
#[derive(Debug)]
pub struct RAdam {
vars: Vec<VarRAdam>,
params: ParamsRAdam,
rho_inf: f64,
t: f64,
}
#[derive(Debug)]
struct VarRAdam {
theta: Var,
m: Var,
v: Var,
}
#[derive(Clone, Debug, PartialEq, PartialOrd)]
pub struct ParamsRAdam {
pub lr: f64,
pub beta_1: f64,
pub beta_2: f64,
pub weight_decay: Option<Decay>,
pub eps: f64,
}
impl Default for ParamsRAdam {
fn default() -> Self {
Self {
lr: 0.001,
beta_1: 0.9,
beta_2: 0.999,
eps: 1e-8,
weight_decay: None,
}
}
}
impl Optimizer for RAdam {
type Config = ParamsRAdam;
fn new(vars: Vec<Var>, params: ParamsRAdam) -> Result<Self> {
let vars = vars
.into_iter()
.filter(|var| var.dtype().is_float())
.map(|var| {
let dtype = var.dtype();
let shape = var.shape();
let device = var.device();
let m = Var::zeros(shape, dtype, device)?;
let v = Var::zeros(shape, dtype, device)?;
Ok(VarRAdam { theta: var, m, v })
})
.collect::<Result<Vec<VarRAdam>>>()?;
let rho_inf = 2. / (1. - params.beta_2) - 1.;
Ok(Self {
vars,
params,
rho_inf,
t: 1.,
})
}
fn learning_rate(&self) -> f64 {
self.params.lr
}
fn step(&mut self, grads: &candle_core::backprop::GradStore) -> Result<()> {
let rho_t = self.rho_inf
- 2. * self.t * self.params.beta_2.powf(self.t)
/ (1. - self.params.beta_2.powf(self.t));
if let Some(wd) = self.params.weight_decay {
match wd {
Decay::WeightDecay(wd) => {
for var in &self.vars {
let theta = &var.theta;
let m = &var.m;
let v = &var.v;
if let Some(grad) = grads.get(theta) {
let grad = &(grad + (wd * theta.as_tensor())?)?;
let m_next = ((self.params.beta_1 * m.as_tensor())?
+ ((1. - self.params.beta_1) * grad)?)?;
let v_next = ((self.params.beta_2 * v.as_tensor())?
+ ((1. - self.params.beta_2) * grad.powf(2.)?)?)?;
let m_hat = (&m_next / (1. - self.params.beta_1.powf(self.t)))?;
let delta = if rho_t > 5. {
let l = ((1. - self.params.beta_2.powf(self.t)).sqrt()
/ (&v_next.sqrt()? + self.params.eps)?)?;
let r = ((rho_t - 4.) * (rho_t - 2.) * self.rho_inf
/ ((self.rho_inf - 4.) * (self.rho_inf - 2.) * rho_t))
.sqrt();
(self.params.lr * r * (l * m_hat)?)?
} else {
(self.params.lr * m_hat)?
};
theta.set(&theta.sub(&(delta))?)?;
m.set(&m_next)?;
v.set(&v_next)?;
}
}
}
Decay::DecoupledWeightDecay(decay) => {
for var in &self.vars {
let theta = &var.theta;
let m = &var.m;
let v = &var.v;
if let Some(grad) = grads.get(theta) {
theta
.set(&(theta.as_tensor() * self.params.lr.mul_add(-decay, 1.))?)?;
let m_next = ((self.params.beta_1 * m.as_tensor())?
+ ((1. - self.params.beta_1) * grad)?)?;
let v_next = ((self.params.beta_2 * v.as_tensor())?
+ ((1. - self.params.beta_2) * grad.powf(2.)?)?)?;
let m_hat = (&m_next / (1. - self.params.beta_1.powf(self.t)))?;
let delta = if rho_t > 5. {
let l = ((1. - self.params.beta_2.powf(self.t)).sqrt()
/ (&v_next.sqrt()? + self.params.eps)?)?;
let r = ((rho_t - 4.) * (rho_t - 2.) * self.rho_inf
/ ((self.rho_inf - 4.) * (self.rho_inf - 2.) * rho_t))
.sqrt();
(self.params.lr * r * (l * m_hat)?)?
} else {
(self.params.lr * m_hat)?
};
theta.set(&theta.sub(&(delta))?)?;
m.set(&m_next)?;
v.set(&v_next)?;
}
}
}
}
} else {
for var in &self.vars {
let theta = &var.theta;
let m = &var.m;
let v = &var.v;
if let Some(grad) = grads.get(theta) {
let m_next = ((self.params.beta_1 * m.as_tensor())?
+ ((1. - self.params.beta_1) * grad)?)?;
let v_next = ((self.params.beta_2 * v.as_tensor())?
+ ((1. - self.params.beta_2) * grad.powf(2.)?)?)?;
let m_hat = (&m_next / (1. - self.params.beta_1.powf(self.t)))?;
let delta = if rho_t > 5. {
let l = ((1. - self.params.beta_2.powf(self.t)).sqrt()
/ (&v_next.sqrt()? + self.params.eps)?)?;
let r = ((rho_t - 4.) * (rho_t - 2.) * self.rho_inf
/ ((self.rho_inf - 4.) * (self.rho_inf - 2.) * rho_t))
.sqrt();
(self.params.lr * r * (l * m_hat)?)?
} else {
(self.params.lr * m_hat)?
};
theta.set(&theta.sub(&(delta))?)?;
m.set(&m_next)?;
v.set(&v_next)?;
}
}
}
self.t += 1.;
Ok(())
}
fn set_learning_rate(&mut self, lr: f64) {
self.params.lr = lr;
}
}
impl OptimParams for RAdam {
fn params(&self) -> &Self::Config {
&self.params
}
fn set_params(&mut self, config: Self::Config) {
self.params = config;
}
}
impl RAdam {
#[must_use]
pub fn into_inner(self) -> Vec<Var> {
self.vars.into_iter().map(|v| v.theta).collect()
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use assert_approx_eq::assert_approx_eq;
use candle_core::{Device, Var};
use candle_nn::Optimizer;
use super::*;
#[test]
fn lr_test() -> Result<()> {
let params = ParamsRAdam {
lr: 0.004,
..Default::default()
};
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let mut optim = RAdam::new(vec![w.clone(), b.clone()], params)?;
assert_approx_eq!(0.004, optim.learning_rate());
optim.set_learning_rate(0.002);
assert_approx_eq!(0.002, optim.learning_rate());
Ok(())
}
#[test]
fn into_inner_test() -> Result<()> {
let params = ParamsRAdam::default();
let w = Var::new(&[[3f32, 1.]], &Device::Cpu)?;
let b = Var::new(-2f32, &Device::Cpu)?;
let optim = RAdam::new(vec![w.clone(), b.clone()], params)?;
let inner = optim.into_inner();
assert_eq!(inner[0].as_tensor().to_vec2::<f32>()?, &[[3f32, 1.]]);
assert_approx_eq!(inner[1].as_tensor().to_vec0::<f32>()?, -2_f32);
Ok(())
}
#[test]
fn params_test() -> Result<()> {
let params = ParamsRAdam {
lr: 0.004,
..Default::default()
};
let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?;
let b = Var::new(0f32, &Device::Cpu)?;
let mut optim = RAdam::new(vec![w.clone(), b.clone()], params.clone())?;
assert_eq!(params, optim.params().clone());
let new_params = ParamsRAdam {
lr: 0.002,
..Default::default()
};
optim.set_params(new_params.clone());
assert_eq!(new_params, optim.params().clone());
Ok(())
}
}