use crate::optimizer::{GradClipMode, Optimizer, OptimizerConfig};
use crate::TrainResult;
use scirs2_core::ndarray::{Array, Ix2};
use std::collections::HashMap;
#[derive(Clone)]
pub struct AdamPOptimizer {
config: OptimizerConfig,
m: HashMap<String, Array<f64, Ix2>>,
v: HashMap<String, Array<f64, Ix2>>,
t: usize,
nesterov: f64,
delta: f64,
wd_ratio: f64,
}
impl AdamPOptimizer {
pub fn new(config: OptimizerConfig) -> Self {
Self {
config,
m: HashMap::new(),
v: HashMap::new(),
t: 0,
nesterov: 0.9,
delta: 0.1,
wd_ratio: 1.0,
}
}
pub fn with_params(config: OptimizerConfig, nesterov: f64, delta: f64, wd_ratio: f64) -> Self {
Self {
config,
m: HashMap::new(),
v: HashMap::new(),
t: 0,
nesterov,
delta,
wd_ratio,
}
}
fn projection(
&self,
_param: &Array<f64, Ix2>,
grad: &Array<f64, Ix2>,
perturb: &Array<f64, Ix2>,
delta: f64,
wd_ratio: f64,
) -> Array<f64, Ix2> {
let grad_norm = grad.iter().map(|&x| x * x).sum::<f64>().sqrt();
if grad_norm < 1e-12 {
return perturb.clone();
}
let perturb_norm = perturb.iter().map(|&x| x * x).sum::<f64>().sqrt();
if perturb_norm < 1e-12 {
return perturb.clone();
}
let dot_product: f64 = grad.iter().zip(perturb.iter()).map(|(&g, &p)| g * p).sum();
let cosine = dot_product / (grad_norm * perturb_norm + 1e-12);
if cosine.abs() > delta {
let scale = dot_product / (grad_norm * grad_norm + 1e-12);
let projection = grad.mapv(|x| x * scale);
let mut result = perturb - &projection;
let result_norm = result.iter().map(|&x| x * x).sum::<f64>().sqrt();
if result_norm > 1e-12 {
result = result.mapv(|x| x * perturb_norm / result_norm * wd_ratio);
}
result
} else {
perturb.mapv(|x| x * wd_ratio)
}
}
}
impl Optimizer for AdamPOptimizer {
fn step(
&mut self,
parameters: &mut HashMap<String, Array<f64, Ix2>>,
gradients: &HashMap<String, Array<f64, Ix2>>,
) -> TrainResult<()> {
self.t += 1;
let beta1 = self.config.beta1;
let beta2 = self.config.beta2;
let epsilon = self.config.epsilon;
let lr = self.config.learning_rate;
let weight_decay = self.config.weight_decay;
for (name, param) in parameters.iter_mut() {
let grad = gradients.get(name).ok_or_else(|| {
crate::TrainError::OptimizerError(format!("No gradient for parameter {}", name))
})?;
let grad = if let Some(clip_value) = self.config.grad_clip {
let mut clipped = grad.clone();
match self.config.grad_clip_mode {
GradClipMode::Value => {
clipped.mapv_inplace(|x| x.max(-clip_value).min(clip_value));
}
GradClipMode::Norm => {
let norm = grad.iter().map(|&x| x * x).sum::<f64>().sqrt();
if norm > clip_value {
let scale = clip_value / norm;
clipped.mapv_inplace(|x| x * scale);
}
}
}
clipped
} else {
grad.clone()
};
let m = self
.m
.entry(name.clone())
.or_insert_with(|| Array::zeros(param.raw_dim()));
let v = self
.v
.entry(name.clone())
.or_insert_with(|| Array::zeros(param.raw_dim()));
*m = m.mapv(|x| x * beta1) + grad.mapv(|x| x * (1.0 - beta1));
*v = v.mapv(|x| x * beta2) + grad.mapv(|x| x * x * (1.0 - beta2));
let m_hat = m.mapv(|x| x / (1.0 - beta1.powi(self.t as i32)));
let v_hat = v.mapv(|x| x / (1.0 - beta2.powi(self.t as i32)));
let update = &m_hat / &v_hat.mapv(|x| x.sqrt() + epsilon);
let perturb = if self.nesterov > 0.0 {
let nesterov_m = m.mapv(|x| x * beta1) + grad.mapv(|x| x * (1.0 - beta1));
let nesterov_m_hat =
nesterov_m.mapv(|x| x / (1.0 - beta1.powi((self.t + 1) as i32)));
&nesterov_m_hat / &v_hat.mapv(|x| x.sqrt() + epsilon)
} else {
update.clone()
};
if weight_decay > 0.0 {
let wd_perturb = param.mapv(|x| -x * weight_decay);
let projected_wd =
self.projection(param, &grad, &wd_perturb, self.delta, self.wd_ratio);
*param = param.clone() - perturb.mapv(|x| x * lr) + projected_wd;
} else {
param.scaled_add(-lr, &perturb);
}
}
Ok(())
}
fn state_dict(&self) -> HashMap<String, Vec<f64>> {
let mut state = HashMap::new();
state.insert("t".to_string(), vec![self.t as f64]);
for (name, m_val) in &self.m {
state.insert(format!("m_{}", name), m_val.iter().copied().collect());
}
for (name, v_val) in &self.v {
state.insert(format!("v_{}", name), v_val.iter().copied().collect());
}
state
}
fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
if let Some(t_vec) = state.get("t") {
self.t = t_vec[0] as usize;
}
for (key, value) in &state {
if let Some(name) = key.strip_prefix("m_") {
if let Some(m) = self.m.get(name) {
let shape = m.raw_dim();
if let Ok(array) = Array::from_shape_vec(shape, value.clone()) {
self.m.insert(name.to_string(), array);
}
}
} else if let Some(name) = key.strip_prefix("v_") {
if let Some(v) = self.v.get(name) {
let shape = v.raw_dim();
if let Ok(array) = Array::from_shape_vec(shape, value.clone()) {
self.v.insert(name.to_string(), array);
}
}
}
}
}
fn get_lr(&self) -> f64 {
self.config.learning_rate
}
fn set_lr(&mut self, lr: f64) {
self.config.learning_rate = lr;
}
fn zero_grad(&mut self) {
}
}
impl AdamPOptimizer {
pub fn reset(&mut self) {
self.m.clear();
self.v.clear();
self.t = 0;
}
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::array;
#[test]
fn test_adamp_basic() {
let config = OptimizerConfig {
learning_rate: 0.01,
beta1: 0.9,
beta2: 0.999,
..Default::default()
};
let mut optimizer = AdamPOptimizer::new(config);
let mut parameters = HashMap::new();
parameters.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
let mut gradients = HashMap::new();
gradients.insert("w".to_string(), array![[0.1, 0.2], [0.3, 0.4]]);
optimizer.step(&mut parameters, &gradients).expect("unwrap");
assert_ne!(parameters["w"][[0, 0]], 1.0);
assert_ne!(parameters["w"][[1, 1]], 4.0);
assert!(parameters["w"][[0, 0]] < 1.0);
assert!(parameters["w"][[1, 1]] < 4.0);
}
#[test]
fn test_adamp_with_weight_decay() {
let config = OptimizerConfig {
learning_rate: 0.01,
weight_decay: 0.1,
..Default::default()
};
let mut optimizer = AdamPOptimizer::new(config);
let mut parameters = HashMap::new();
parameters.insert("w".to_string(), array![[1.0, 2.0], [3.0, 4.0]]);
let mut gradients = HashMap::new();
gradients.insert("w".to_string(), array![[0.1, 0.2], [0.3, 0.4]]);
let initial_param = parameters["w"].clone();
optimizer.step(&mut parameters, &gradients).expect("unwrap");
assert_ne!(parameters["w"], initial_param);
}
#[test]
fn test_adamp_state_dict() {
let config = OptimizerConfig {
learning_rate: 0.01,
..Default::default()
};
let mut optimizer = AdamPOptimizer::new(config);
let mut parameters = HashMap::new();
parameters.insert("w".to_string(), array![[1.0, 2.0]]);
let mut gradients = HashMap::new();
gradients.insert("w".to_string(), array![[0.1, 0.2]]);
for _ in 0..5 {
optimizer.step(&mut parameters, &gradients).expect("unwrap");
}
let state = optimizer.state_dict();
assert!(state.contains_key("t"));
assert!(state.contains_key("m_w"));
assert!(state.contains_key("v_w"));
let mut new_optimizer = AdamPOptimizer::new(OptimizerConfig {
learning_rate: 0.01,
..Default::default()
});
new_optimizer
.step(&mut parameters, &gradients)
.expect("unwrap");
new_optimizer.load_state_dict(state);
assert_eq!(new_optimizer.t, 5);
}
#[test]
fn test_adamp_convergence() {
let config = OptimizerConfig {
learning_rate: 0.1,
..Default::default()
};
let mut optimizer = AdamPOptimizer::new(config);
let mut parameters = HashMap::new();
parameters.insert("w".to_string(), array![[5.0, 5.0]]);
for _ in 0..100 {
let grad = parameters["w"].mapv(|x| x * 0.1); let mut gradients = HashMap::new();
gradients.insert("w".to_string(), grad);
optimizer.step(&mut parameters, &gradients).expect("unwrap");
}
assert!(parameters["w"][[0, 0]].abs() < 1.0);
assert!(parameters["w"][[0, 1]].abs() < 1.0);
}
#[test]
fn test_adamp_projection() {
let config = OptimizerConfig {
learning_rate: 0.01,
weight_decay: 0.1,
..Default::default()
};
let optimizer = AdamPOptimizer::with_params(config, 0.9, 0.1, 1.0);
let param = array![[1.0, 2.0], [3.0, 4.0]];
let grad = array![[0.1, 0.2], [0.3, 0.4]];
let perturb = array![[-0.1, -0.2], [-0.3, -0.4]];
let projected = optimizer.projection(¶m, &grad, &perturb, 0.1, 1.0);
assert_eq!(projected.shape(), perturb.shape());
}
#[test]
fn test_adamp_nesterov() {
let config = OptimizerConfig {
learning_rate: 0.01,
..Default::default()
};
let mut opt_nesterov = AdamPOptimizer::with_params(config.clone(), 0.9, 0.1, 1.0);
let mut opt_standard = AdamPOptimizer::with_params(config, 0.0, 0.1, 1.0);
let mut params1 = HashMap::new();
params1.insert("w".to_string(), array![[1.0, 2.0]]);
let mut params2 = params1.clone();
let mut gradients = HashMap::new();
gradients.insert("w".to_string(), array![[0.1, 0.2]]);
opt_nesterov.step(&mut params1, &gradients).expect("unwrap");
opt_standard.step(&mut params2, &gradients).expect("unwrap");
assert!(
params1["w"][[0, 0]] != params2["w"][[0, 0]]
|| (params1["w"][[0, 0]] - params2["w"][[0, 0]]).abs() < 1e-10
);
}
}