use optimizer::prelude::*;
use optimizer_derive::Categorical;
#[derive(Clone, Debug, Categorical)]
enum Activation {
Relu,
Sigmoid,
Tanh,
Gelu,
}
fn main() {
let study: Study<f64> = Study::new(Direction::Minimize);
let lr = FloatParam::new(1e-5, 1e-1).log_scale().name("lr");
let n_layers = IntParam::new(1, 5).name("n_layers");
let optimizer = CategoricalParam::new(vec!["sgd", "adam", "rmsprop"]).name("optimizer");
let use_dropout = BoolParam::new().name("use_dropout");
let activation = EnumParam::<Activation>::new().name("activation");
study
.optimize(30, |trial: &mut optimizer::Trial| {
let lr_val = lr.suggest(trial)?;
let layers = n_layers.suggest(trial)?;
let opt = optimizer.suggest(trial)?;
let dropout = use_dropout.suggest(trial)?;
let act = activation.suggest(trial)?;
let loss = lr_val * f64::from(layers as i32)
+ if opt == "adam" { -0.05 } else { 0.0 }
+ if dropout { -0.02 } else { 0.0 }
+ match act {
Activation::Gelu => -0.03,
Activation::Relu => -0.01,
_ => 0.0,
};
Ok::<_, Error>(loss)
})
.unwrap();
let best = study.best_trial().unwrap();
println!("Best trial #{} — loss = {:.6}", best.id, best.value);
println!(" lr = {:.6}", best.get(&lr).unwrap());
println!(" n_layers = {}", best.get(&n_layers).unwrap());
println!(" optimizer = {}", best.get(&optimizer).unwrap());
println!(" use_dropout = {}", best.get(&use_dropout).unwrap());
println!(" activation = {:?}", best.get(&activation).unwrap());
}