use super::common::{compute_gradient_norm, GradClipMode, Optimizer, OptimizerConfig};
use crate::{TrainError, TrainResult};
use scirs2_core::ndarray::{Array, Ix2};
use std::collections::HashMap;
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum SophiaVariant {
GaussNewtonBartlett,
Hutchinson,
}
#[derive(Debug, Clone)]
pub struct SophiaConfig {
pub base: OptimizerConfig,
pub rho: f64,
pub hessian_update_freq: usize,
pub variant: SophiaVariant,
}
impl Default for SophiaConfig {
fn default() -> Self {
Self {
base: OptimizerConfig {
learning_rate: 2e-4,
beta1: 0.965,
beta2: 0.99,
epsilon: 1e-8,
weight_decay: 0.01,
..Default::default()
},
rho: 0.04,
hessian_update_freq: 10,
variant: SophiaVariant::GaussNewtonBartlett,
}
}
}
pub struct SophiaOptimizer {
config: SophiaConfig,
m: HashMap<String, Array<f64, Ix2>>,
h: HashMap<String, Array<f64, Ix2>>,
t: usize,
steps_since_hessian_update: usize,
}
impl SophiaOptimizer {
pub fn new(config: OptimizerConfig) -> Self {
Self::with_sophia_config(SophiaConfig {
base: config,
..Default::default()
})
}
pub fn with_sophia_config(config: SophiaConfig) -> Self {
Self {
config,
m: HashMap::new(),
h: HashMap::new(),
t: 0,
steps_since_hessian_update: 0,
}
}
fn clip_gradients(&self, gradients: &mut HashMap<String, Array<f64, Ix2>>) {
if let Some(clip_value) = self.config.base.grad_clip {
match self.config.base.grad_clip_mode {
GradClipMode::Value => {
for grad in gradients.values_mut() {
grad.mapv_inplace(|g| g.max(-clip_value).min(clip_value));
}
}
GradClipMode::Norm => {
let total_norm = compute_gradient_norm(gradients);
if total_norm > clip_value {
let scale = clip_value / total_norm;
for grad in gradients.values_mut() {
grad.mapv_inplace(|g| g * scale);
}
}
}
}
}
}
fn update_hessian_gnb(&mut self, gradients: &HashMap<String, Array<f64, Ix2>>) {
let beta2 = self.config.base.beta2;
for (name, grad) in gradients {
let grad_squared = grad.mapv(|g| g * g);
if let Some(h_state) = self.h.get_mut(name) {
*h_state = &*h_state * beta2 + &grad_squared * (1.0 - beta2);
} else {
self.h.insert(name.clone(), grad_squared * (1.0 - beta2));
}
}
}
fn update_hessian_hutchinson(&mut self, gradients: &HashMap<String, Array<f64, Ix2>>) {
self.update_hessian_gnb(gradients);
}
}
impl Optimizer for SophiaOptimizer {
fn step(
&mut self,
parameters: &mut HashMap<String, Array<f64, Ix2>>,
gradients: &HashMap<String, Array<f64, Ix2>>,
) -> TrainResult<()> {
let mut clipped_gradients = gradients.clone();
self.clip_gradients(&mut clipped_gradients);
self.t += 1;
self.steps_since_hessian_update += 1;
let lr = self.config.base.learning_rate;
let beta1 = self.config.base.beta1;
let eps = self.config.base.epsilon;
let rho = self.config.rho;
let weight_decay = self.config.base.weight_decay;
if self.steps_since_hessian_update >= self.config.hessian_update_freq {
match self.config.variant {
SophiaVariant::GaussNewtonBartlett => {
self.update_hessian_gnb(&clipped_gradients);
}
SophiaVariant::Hutchinson => {
self.update_hessian_hutchinson(&clipped_gradients);
}
}
self.steps_since_hessian_update = 0;
}
let bias_correction1 = 1.0 - beta1.powi(self.t as i32);
for (name, param) in parameters.iter_mut() {
let grad = clipped_gradients.get(name).ok_or_else(|| {
TrainError::OptimizerError(format!("Missing gradient for parameter: {}", name))
})?;
if !self.m.contains_key(name) {
self.m.insert(name.clone(), Array::zeros(param.raw_dim()));
self.h
.insert(name.clone(), Array::ones(param.raw_dim()) * eps);
}
let m = self
.m
.get_mut(name)
.expect("m initialized for all parameters");
let h = self.h.get(name).expect("h initialized for all parameters");
*m = &*m * beta1 + &(grad * (1.0 - beta1));
let m_hat = &*m / bias_correction1;
let denominator = h * rho + eps;
let update_direction = &m_hat / &denominator;
let clipped_update = update_direction.mapv(|x| x.clamp(-1.0, 1.0));
*param = &*param - &(&clipped_update * lr);
if weight_decay > 0.0 {
*param = &*param - &(&*param * (weight_decay * lr));
}
}
Ok(())
}
fn zero_grad(&mut self) {
}
fn get_lr(&self) -> f64 {
self.config.base.learning_rate
}
fn set_lr(&mut self, lr: f64) {
self.config.base.learning_rate = lr;
}
fn state_dict(&self) -> HashMap<String, Vec<f64>> {
let mut state = HashMap::new();
state.insert("t".to_string(), vec![self.t as f64]);
state.insert(
"steps_since_hessian_update".to_string(),
vec![self.steps_since_hessian_update as f64],
);
for (name, m_val) in &self.m {
state.insert(format!("m_{}", name), m_val.iter().copied().collect());
}
for (name, h_val) in &self.h {
state.insert(format!("h_{}", name), h_val.iter().copied().collect());
}
state
}
fn load_state_dict(&mut self, state: HashMap<String, Vec<f64>>) {
if let Some(t_vals) = state.get("t") {
self.t = t_vals[0] as usize;
}
if let Some(steps_vals) = state.get("steps_since_hessian_update") {
self.steps_since_hessian_update = steps_vals[0] as usize;
}
for (key, values) in state {
if let Some(name) = key.strip_prefix("m_") {
if let Some(m) = self.m.get(name) {
let shape = m.raw_dim();
if let Ok(arr) = Array::from_shape_vec(shape, values) {
self.m.insert(name.to_string(), arr);
}
}
} else if let Some(name) = key.strip_prefix("h_") {
if let Some(h) = self.h.get(name) {
let shape = h.raw_dim();
if let Ok(arr) = Array::from_shape_vec(shape, values) {
self.h.insert(name.to_string(), arr);
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::array;
#[test]
fn test_sophia_initialization() {
let config = OptimizerConfig::default();
let optimizer = SophiaOptimizer::new(config);
assert_eq!(optimizer.t, 0);
assert!(optimizer.m.is_empty());
assert!(optimizer.h.is_empty());
}
#[test]
fn test_sophia_custom_config() {
let config = SophiaConfig {
base: OptimizerConfig {
learning_rate: 1e-4,
beta1: 0.965,
beta2: 0.99,
..Default::default()
},
rho: 0.04,
..Default::default()
};
let optimizer = SophiaOptimizer::with_sophia_config(config);
assert_relative_eq!(optimizer.get_lr(), 1e-4);
}
#[test]
fn test_sophia_single_step() {
let config = OptimizerConfig {
learning_rate: 0.1,
..Default::default()
};
let mut optimizer = SophiaOptimizer::new(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0, 3.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.1, 0.2, 0.3]]);
let initial = params["w"].clone();
optimizer.step(&mut params, &grads).expect("unwrap");
assert!(params["w"][[0, 0]] < initial[[0, 0]]);
assert!(params["w"][[0, 1]] < initial[[0, 1]]);
assert!(params["w"][[0, 2]] < initial[[0, 2]]);
}
#[test]
fn test_sophia_convergence() {
let config = OptimizerConfig {
learning_rate: 0.1,
..Default::default()
};
let mut optimizer = SophiaOptimizer::new(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[5.0], [-3.0], [2.0]]);
for _ in 0..50 {
let mut grads = HashMap::new();
grads.insert("w".to_string(), ¶ms["w"] * 2.0); optimizer.step(&mut params, &grads).expect("unwrap");
}
for &p in params["w"].iter() {
assert!(p.abs() < 0.5);
}
}
#[test]
fn test_sophia_2d_parameters() {
let config = OptimizerConfig {
learning_rate: 0.01,
..Default::default()
};
let mut optimizer = SophiaOptimizer::new(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.1, 0.1, 0.1], [-0.1, -0.1, -0.1]]);
let initial_shape = params["w"].shape().to_vec();
optimizer.step(&mut params, &grads).expect("unwrap");
assert_eq!(params["w"].shape(), &initial_shape[..]);
}
#[test]
fn test_sophia_reset_and_state_dict() {
let config = OptimizerConfig::default();
let mut optimizer = SophiaOptimizer::new(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.1, 0.2]]);
optimizer.step(&mut params, &grads).expect("unwrap");
assert!(!optimizer.m.is_empty());
assert_eq!(optimizer.t, 1);
let state = optimizer.state_dict();
assert!(state.contains_key("t"));
assert!(state.contains_key("m_w"));
assert!(state.contains_key("h_w"));
}
#[test]
fn test_sophia_hessian_update_frequency() {
let config = SophiaConfig {
hessian_update_freq: 5,
..Default::default()
};
let mut optimizer = SophiaOptimizer::with_sophia_config(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.1, 0.2]]);
optimizer.step(&mut params, &grads).expect("unwrap");
assert_eq!(optimizer.steps_since_hessian_update, 1);
for _ in 0..4 {
optimizer.step(&mut params, &grads).expect("unwrap");
}
assert_eq!(optimizer.steps_since_hessian_update, 0);
assert!(optimizer.h.contains_key("w"));
}
#[test]
fn test_sophia_weight_decay() {
let config = SophiaConfig {
base: OptimizerConfig {
learning_rate: 0.1,
weight_decay: 0.01,
..Default::default()
},
..Default::default()
};
let mut optimizer = SophiaOptimizer::with_sophia_config(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0, 3.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.0, 0.0, 0.0]]);
let initial = params["w"].clone();
optimizer.step(&mut params, &grads).expect("unwrap");
assert!(params["w"][[0, 0]] < initial[[0, 0]]);
assert!(params["w"][[0, 1]] < initial[[0, 1]]);
assert!(params["w"][[0, 2]] < initial[[0, 2]]);
}
#[test]
fn test_sophia_gradient_clipping_value() {
let config = SophiaConfig {
base: OptimizerConfig {
learning_rate: 0.1,
grad_clip: Some(0.5),
grad_clip_mode: GradClipMode::Value,
..Default::default()
},
..Default::default()
};
let mut optimizer = SophiaOptimizer::with_sophia_config(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[1.0, -2.0]]);
let initial = params["w"].clone();
optimizer.step(&mut params, &grads).expect("unwrap");
let update_mag = (initial[[0, 0]] - params["w"][[0, 0]]).abs();
assert!(update_mag < 0.2); }
#[test]
fn test_sophia_gradient_clipping_norm() {
let config = SophiaConfig {
base: OptimizerConfig {
learning_rate: 0.1,
grad_clip: Some(1.0),
grad_clip_mode: GradClipMode::Norm,
..Default::default()
},
..Default::default()
};
let mut optimizer = SophiaOptimizer::with_sophia_config(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0, 3.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[10.0, 10.0, 10.0]]);
let initial = params["w"].clone();
optimizer.step(&mut params, &grads).expect("unwrap");
let total_update: f64 = initial
.iter()
.zip(params["w"].iter())
.map(|(&p, &u)| (p - u).powi(2))
.sum::<f64>()
.sqrt();
assert!(total_update < 1.0); }
#[test]
fn test_sophia_learning_rate_getter_setter() {
let config = OptimizerConfig::default();
let mut optimizer = SophiaOptimizer::new(config);
optimizer.set_lr(0.001);
assert_relative_eq!(optimizer.get_lr(), 0.001);
optimizer.set_lr(0.1);
assert_relative_eq!(optimizer.get_lr(), 0.1);
}
#[test]
fn test_sophia_variant_gnb() {
let config = SophiaConfig {
variant: SophiaVariant::GaussNewtonBartlett,
..Default::default()
};
let mut optimizer = SophiaOptimizer::with_sophia_config(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.5, 0.5]]);
let initial = params["w"].clone();
optimizer.step(&mut params, &grads).expect("unwrap");
assert!(params["w"][[0, 0]] < initial[[0, 0]]); }
#[test]
fn test_sophia_variant_hutchinson() {
let config = SophiaConfig {
variant: SophiaVariant::Hutchinson,
..Default::default()
};
let mut optimizer = SophiaOptimizer::with_sophia_config(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.5, 0.5]]);
let initial = params["w"].clone();
optimizer.step(&mut params, &grads).expect("unwrap");
assert!(params["w"][[0, 0]] < initial[[0, 0]]); }
#[test]
fn test_sophia_update_clipping() {
let config = SophiaConfig {
base: OptimizerConfig {
learning_rate: 0.1,
..Default::default()
},
rho: 0.001, ..Default::default()
};
let mut optimizer = SophiaOptimizer::with_sophia_config(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[10.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[100.0]]);
let initial = params["w"][[0, 0]];
optimizer.step(&mut params, &grads).expect("unwrap");
let update_size = (initial - params["w"][[0, 0]]).abs();
assert!(update_size <= 0.12); }
#[test]
fn test_sophia_load_state_dict() {
let config = OptimizerConfig::default();
let mut optimizer1 = SophiaOptimizer::new(config.clone());
let mut optimizer2 = SophiaOptimizer::new(config);
let mut params = HashMap::new();
params.insert("w".to_string(), array![[1.0, 2.0]]);
let mut grads = HashMap::new();
grads.insert("w".to_string(), array![[0.1, 0.2]]);
for _ in 0..5 {
optimizer1.step(&mut params, &grads).expect("unwrap");
}
let state = optimizer1.state_dict();
optimizer2.load_state_dict(state);
assert_eq!(optimizer2.t, optimizer1.t);
assert_eq!(
optimizer2.steps_since_hessian_update,
optimizer1.steps_since_hessian_update
);
}
}