const DEFAULT_SY_EPSILON: f64 = 1e-10;
const DEFAULT_CBFGS_EPSILON: f64 = 1e-8;
const DEFAULT_CBFGS_ALPHA: f64 = 1.0;
#[derive(Debug)]
pub struct PANOCCache {
pub(crate) lbfgs: lbfgs::Lbfgs,
pub(crate) gradient_u: Vec<f64>,
pub(crate) gradient_u_previous: Option<Vec<f64>>,
pub(crate) u_half_step: Vec<f64>,
pub(crate) gradient_step: Vec<f64>,
pub(crate) direction_lbfgs: Vec<f64>,
pub(crate) u_plus: Vec<f64>,
pub(crate) rhs_ls: f64,
pub(crate) lhs_ls: f64,
pub(crate) gamma_fpr: Vec<f64>,
pub(crate) gamma: f64,
pub(crate) tolerance: f64,
pub(crate) norm_gamma_fpr: f64,
pub(crate) tau: f64,
pub(crate) lipschitz_constant: f64,
pub(crate) sigma: f64,
pub(crate) cost_value: f64,
pub(crate) iteration: usize,
pub(crate) akkt_tolerance: Option<f64>,
}
impl PANOCCache {
pub fn new(problem_size: usize, tolerance: f64, lbfgs_memory_size: usize) -> PANOCCache {
assert!(tolerance > 0., "tolerance must be positive");
PANOCCache {
gradient_u: vec![0.0; problem_size],
gradient_u_previous: None,
u_half_step: vec![0.0; problem_size],
gamma_fpr: vec![0.0; problem_size],
direction_lbfgs: vec![0.0; problem_size],
gradient_step: vec![0.0; problem_size],
u_plus: vec![0.0; problem_size],
gamma: 0.0,
tolerance,
norm_gamma_fpr: std::f64::INFINITY,
lbfgs: lbfgs::Lbfgs::new(problem_size, lbfgs_memory_size)
.with_cbfgs_alpha(DEFAULT_CBFGS_ALPHA)
.with_cbfgs_epsilon(DEFAULT_CBFGS_EPSILON)
.with_sy_epsilon(DEFAULT_SY_EPSILON),
lhs_ls: 0.0,
rhs_ls: 0.0,
tau: 1.0,
lipschitz_constant: 0.0,
sigma: 0.0,
cost_value: 0.0,
iteration: 0,
akkt_tolerance: None,
}
}
pub fn set_akkt_tolerance(&mut self, akkt_tolerance: f64) {
assert!(akkt_tolerance > 0.0, "akkt_tolerance must be positive");
self.akkt_tolerance = Some(akkt_tolerance);
self.gradient_u_previous = Some(vec![0.0; self.gradient_step.len()]);
}
pub fn cache_previous_gradient(&mut self) {
if self.iteration >= 1 {
if let Some(df_previous) = &mut self.gradient_u_previous {
df_previous.copy_from_slice(&self.gradient_u);
}
}
}
fn akkt_residual(&self) -> f64 {
let mut r = 0.0;
if let Some(df_previous) = &self.gradient_u_previous {
r = self
.gamma_fpr
.iter()
.zip(self.gradient_u.iter())
.zip(df_previous.iter())
.fold(0.0, |mut sum, ((&gamma_fpr_i, &df_i), &dfp_i)| {
sum += (gamma_fpr_i + self.gamma * (df_i - dfp_i)).powi(2);
sum
})
.sqrt();
}
r
}
fn fpr_exit_condition(&self) -> bool {
self.norm_gamma_fpr < self.tolerance
}
fn akkt_exit_condition(&self) -> bool {
let mut exit_condition = true;
if let Some(akkt_tol) = self.akkt_tolerance {
let res = self.akkt_residual();
exit_condition = res < akkt_tol;
}
exit_condition
}
pub fn exit_condition(&self) -> bool {
self.fpr_exit_condition() && self.akkt_exit_condition()
}
pub fn reset(&mut self) {
self.lbfgs.reset();
self.lhs_ls = 0.0;
self.rhs_ls = 0.0;
self.tau = 1.0;
self.lipschitz_constant = 0.0;
self.sigma = 0.0;
self.cost_value = 0.0;
self.iteration = 0;
self.gamma = 0.0;
}
pub fn with_cbfgs_parameters(mut self, alpha: f64, epsilon: f64, sy_epsilon: f64) -> Self {
self.lbfgs = self
.lbfgs
.with_cbfgs_alpha(alpha)
.with_cbfgs_epsilon(epsilon)
.with_sy_epsilon(sy_epsilon);
self
}
}