use super::{ConvergenceStatus, OptimizationResult, Optimizer};
use crate::primitives::{Matrix, Vector};
#[derive(Debug, Clone)]
pub struct ADMM {
max_iter: usize,
rho: f32,
tol: f32,
adaptive_rho: bool,
rho_increase: f32,
rho_decrease: f32,
}
impl ADMM {
#[must_use]
pub fn max_iter(&self) -> usize {
self.max_iter
}
#[must_use]
pub fn rho(&self) -> f32 {
self.rho
}
#[must_use]
pub fn tol(&self) -> f32 {
self.tol
}
#[must_use]
pub fn adaptive_rho(&self) -> bool {
self.adaptive_rho
}
#[must_use]
pub fn rho_increase(&self) -> f32 {
self.rho_increase
}
#[must_use]
pub fn rho_decrease(&self) -> f32 {
self.rho_decrease
}
#[must_use]
pub fn new(max_iter: usize, rho: f32, tol: f32) -> Self {
Self {
max_iter,
rho,
tol,
adaptive_rho: false,
rho_increase: 2.0,
rho_decrease: 2.0,
}
}
#[must_use]
pub fn with_adaptive_rho(mut self, adaptive: bool) -> Self {
self.adaptive_rho = adaptive;
self
}
#[must_use]
pub fn with_rho_factors(mut self, increase: f32, decrease: f32) -> Self {
self.rho_increase = increase;
self.rho_decrease = decrease;
self
}
#[allow(clippy::too_many_arguments)]
pub fn minimize_consensus<F, G>(
&mut self,
x_minimizer: F,
z_minimizer: G,
a: &Matrix<f32>,
b_mat: &Matrix<f32>,
c: &Vector<f32>,
x0: Vector<f32>,
z0: Vector<f32>,
) -> OptimizationResult
where
F: Fn(&Vector<f32>, &Vector<f32>, &Vector<f32>, f32) -> Vector<f32>,
G: Fn(&Vector<f32>, &Vector<f32>, &Vector<f32>, f32) -> Vector<f32>,
{
let start_time = std::time::Instant::now();
let mut x = x0;
let mut z = z0;
let mut u = Vector::zeros(c.len());
let mut rho = self.rho;
let mut z_old = z.clone();
for iter in 0..self.max_iter {
x = x_minimizer(&z, &u, c, rho);
let ax = a.matvec(&x).expect("Matrix-vector multiplication");
z = z_minimizer(&ax, &u, c, rho);
let bz = b_mat.matvec(&z).expect("Matrix-vector multiplication");
let residual = &(&ax + &bz) - c;
u = &u + &residual;
let primal_res = residual.norm();
let z_diff = &z - &z_old;
let bt_z_diff = b_mat
.transpose()
.matvec(&z_diff)
.expect("Matrix-vector multiplication");
let dual_res = rho * bt_z_diff.norm();
if primal_res < self.tol && dual_res < self.tol {
return OptimizationResult {
solution: x,
objective_value: 0.0, iterations: iter + 1,
status: ConvergenceStatus::Converged,
gradient_norm: dual_res,
constraint_violation: primal_res,
elapsed_time: start_time.elapsed(),
};
}
if self.adaptive_rho && iter % 10 == 0 {
if primal_res > 10.0 * dual_res {
rho *= self.rho_increase;
let scale = 1.0 / self.rho_increase;
u = u.mul_scalar(scale);
} else if dual_res > 10.0 * primal_res {
rho /= self.rho_decrease;
u = u.mul_scalar(self.rho_decrease);
}
}
z_old = z.clone();
}
OptimizationResult {
solution: x,
objective_value: 0.0,
iterations: self.max_iter,
status: ConvergenceStatus::MaxIterations,
gradient_norm: 0.0,
constraint_violation: 0.0,
elapsed_time: start_time.elapsed(),
}
}
}
impl Optimizer for ADMM {
fn step(&mut self, _params: &mut Vector<f32>, _gradients: &Vector<f32>) {
panic!(
"ADMM does not support stochastic updates (step). Use minimize_consensus() with x-minimizer and z-minimizer functions."
)
}
fn reset(&mut self) {
}
}
#[cfg(test)]
#[path = "admm_tests.rs"]
mod tests;