use tch::nn::VarStore;
use tch::Tensor;
mod adamw;
pub use adamw::{AdamW, AdamWConfig};
mod grad;
pub use grad::ZeroGrad;
mod grad_scale;
pub use grad_scale::GradScaler;
pub trait Optimizer {
type Config;
fn backward_step<F>(&mut self, loss: &Tensor, config_fun: F)
where
F: Fn(&str) -> Self::Config;
fn step<F>(&mut self, config_fun: F)
where
F: Fn(&str) -> Self::Config;
fn var_store(&self) -> &VarStore;
}