extern crate nalgebra as na;
extern crate rand;
extern crate visual_odometry_rs as vors;
use na::DVector;
use rand::{distributions::Uniform, rngs::StdRng, SeedableRng};
use std::{f32, process::exit};
use vors::math::optimizer::{self, Continue, State as _};
fn main() {
if let Err(err) = run() {
eprintln!("{}", err);
exit(1);
};
}
fn run() -> Result<(), String> {
let a_ground_truth = 1.5;
let nb_data: usize = 100;
let x_domain = linspace(-5.0, 3.0, nb_data);
let seed = [0; 32];
let mut rng: StdRng = SeedableRng::from_seed(seed);
let distribution = Uniform::from(-1.0..1.0);
let noise = DVec::from_distribution(nb_data, &distribution, &mut rng);
let y_data_noisy = f(a_ground_truth, &x_domain) + 0.1 * noise;
let noisy_observations = Obs {
x: x_domain,
y: y_data_noisy,
};
let (final_state, nb_iteration) = LMOptimizerState::iterative_solve(&noisy_observations, 0.0)?;
println!("After {} iterations:", nb_iteration);
println!("Ground truth: a = {}", a_ground_truth);
println!("Computed: a = {}", final_state.eval_data.model);
Ok(())
}
type DVec = DVector<f32>;
fn linspace(start: f32, end: f32, nb: usize) -> DVec {
DVec::from_fn(nb, |i, _| {
(i as f32 * end + (nb - 1 - i) as f32 * start) / (nb as f32 - 1.0)
})
}
fn f(a: f32, x_domain: &DVec) -> DVec {
x_domain.map(|x| (-a * x).exp())
}
struct Obs {
x: DVec,
y: DVec,
}
struct LMOptimizerState {
lm_coef: f32,
eval_data: EvalData,
}
struct EvalData {
model: f32,
energy: f32,
gradient: f32,
hessian: f32,
}
type EvalState = Result<EvalData, f32>;
impl LMOptimizerState {
fn eval_energy(obs: &Obs, model: f32) -> (f32, DVec, DVec) {
let f_model = f(model, &obs.x);
let residuals = &f_model - &obs.y;
let new_energy: f32 = residuals.iter().map(|r| r * r).sum();
(new_energy / residuals.len() as f32, residuals, f_model)
}
fn compute_eval_data(obs: &Obs, model: f32, pre: (f32, DVec, DVec)) -> EvalData {
let (energy, residuals, f_model) = pre;
let jacobian = -1.0 * f_model.component_mul(&obs.x);
let gradient = jacobian.dot(&residuals);
let hessian = jacobian.dot(&jacobian);
EvalData {
model,
energy,
gradient,
hessian,
}
}
}
impl optimizer::State<Obs, EvalState, f32, String> for LMOptimizerState {
fn init(obs: &Obs, model: f32) -> Self {
Self {
lm_coef: 0.1,
eval_data: Self::compute_eval_data(obs, model, Self::eval_energy(obs, model)),
}
}
fn step(&self) -> Result<f32, String> {
let hessian = (1.0 + self.lm_coef) * self.eval_data.hessian;
Ok(self.eval_data.model - self.eval_data.gradient / hessian)
}
fn eval(&self, obs: &Obs, model: f32) -> EvalState {
let pre = Self::eval_energy(obs, model);
let energy = pre.0;
let old_energy = self.eval_data.energy;
if energy > old_energy {
Err(energy)
} else {
Ok(Self::compute_eval_data(obs, model, pre))
}
}
fn stop_criterion(self, nb_iter: usize, eval_state: EvalState) -> (Self, Continue) {
let too_many_iterations = nb_iter >= 20;
match (eval_state, too_many_iterations) {
(Err(_), true) => (self, Continue::Stop),
(Ok(eval_data), true) => {
println!("a = {}, energy = {}", eval_data.model, eval_data.energy);
let mut kept_state = self;
kept_state.eval_data = eval_data;
(kept_state, Continue::Stop)
}
(Err(model), false) => {
let mut kept_state = self;
kept_state.lm_coef *= 10.0;
println!("\t back from {}, lm_coef = {}", model, kept_state.lm_coef);
(kept_state, Continue::Forward)
}
(Ok(eval_data), false) => {
println!("a = {}, energy = {}", eval_data.model, eval_data.energy);
let delta_energy = self.eval_data.energy - eval_data.energy;
let mut kept_state = self;
kept_state.lm_coef *= 0.1;
kept_state.eval_data = eval_data;
let continuation = if delta_energy > 0.01 {
Continue::Forward
} else {
Continue::Stop
};
(kept_state, continuation)
}
}
}
}