use scirs2_core::ndarray::{Array1, Array2};
pub type DecompositionResult<T> = Result<T, DecompositionError>;
#[derive(Debug, Clone)]
pub enum DecompositionError {
IncompatibleStructure(String),
ConvergenceFailed { iterations: usize, residual: f32 },
InvalidBlocks(String),
NumericalIssue(String),
}
impl std::fmt::Display for DecompositionError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::IncompatibleStructure(msg) => write!(f, "Incompatible structure: {}", msg),
Self::ConvergenceFailed {
iterations,
residual,
} => {
write!(
f,
"Failed to converge after {} iterations (residual: {})",
iterations, residual
)
}
Self::InvalidBlocks(msg) => write!(f, "Invalid blocks: {}", msg),
Self::NumericalIssue(msg) => write!(f, "Numerical issue: {}", msg),
}
}
}
impl std::error::Error for DecompositionError {}
#[derive(Debug, Clone)]
pub struct Block {
pub name: String,
pub indices: Vec<usize>,
}
impl Block {
pub fn new(name: impl Into<String>, indices: Vec<usize>) -> Self {
Self {
name: name.into(),
indices,
}
}
pub fn size(&self) -> usize {
self.indices.len()
}
}
#[derive(Debug, Clone)]
pub struct ADMMConfig {
pub rho: f32,
pub max_iterations: usize,
pub primal_tol: f32,
pub dual_tol: f32,
pub adaptive_rho: bool,
pub rho_update_factor: f32,
}
impl Default for ADMMConfig {
fn default() -> Self {
Self {
rho: 1.0,
max_iterations: 1000,
primal_tol: 1e-4,
dual_tol: 1e-4,
adaptive_rho: true,
rho_update_factor: 2.0,
}
}
}
pub struct ConsensusADMM {
config: ADMMConfig,
num_blocks: usize,
dimension: usize,
local_vars: Vec<Array1<f32>>,
global_var: Array1<f32>,
dual_vars: Vec<Array1<f32>>,
}
impl ConsensusADMM {
pub fn new(num_blocks: usize, dimension: usize, config: ADMMConfig) -> Self {
let local_vars = vec![Array1::zeros(dimension); num_blocks];
let global_var = Array1::zeros(dimension);
let dual_vars = vec![Array1::zeros(dimension); num_blocks];
Self {
config,
num_blocks,
dimension,
local_vars,
global_var,
dual_vars,
}
}
pub fn initialize(&mut self, x0: &Array1<f32>) {
assert_eq!(x0.len(), self.dimension, "Initial point dimension mismatch");
self.global_var = x0.clone();
for i in 0..self.num_blocks {
self.local_vars[i] = x0.clone();
}
}
pub fn iterate<F>(&mut self, local_update_fn: F) -> (f32, f32)
where
F: Fn(usize, &Array1<f32>, &Array1<f32>, f32) -> Array1<f32>,
{
for i in 0..self.num_blocks {
let z_minus_u = &self.global_var - &self.dual_vars[i];
self.local_vars[i] =
local_update_fn(i, &self.local_vars[i], &z_minus_u, self.config.rho);
}
let mut z_new = Array1::zeros(self.dimension);
for i in 0..self.num_blocks {
z_new += &(&self.local_vars[i] + &self.dual_vars[i]);
}
z_new /= self.num_blocks as f32;
let mut primal_residual = 0.0f32;
let mut dual_residual = 0.0f32;
let z_diff = &z_new - &self.global_var;
for i in 0..self.num_blocks {
let xi_minus_z = &self.local_vars[i] - &z_new;
self.dual_vars[i] = &self.dual_vars[i] + &xi_minus_z;
primal_residual += xi_minus_z.iter().map(|&x| x * x).sum::<f32>();
dual_residual += z_diff.iter().map(|&x| x * x).sum::<f32>();
}
self.global_var = z_new;
(
primal_residual.sqrt(),
dual_residual.sqrt() * self.config.rho,
)
}
pub fn solution(&self) -> &Array1<f32> {
&self.global_var
}
pub fn local_solution(&self, block_id: usize) -> Option<&Array1<f32>> {
self.local_vars.get(block_id)
}
pub fn has_converged(&self, primal_res: f32, dual_res: f32) -> bool {
primal_res < self.config.primal_tol && dual_res < self.config.dual_tol
}
pub fn update_rho(&mut self, primal_res: f32, dual_res: f32) {
if !self.config.adaptive_rho {
return;
}
if primal_res > 10.0 * dual_res {
self.config.rho *= self.config.rho_update_factor;
} else if dual_res > 10.0 * primal_res {
self.config.rho /= self.config.rho_update_factor;
}
}
}
pub struct BlockCoordinateDescent {
blocks: Vec<Block>,
dimension: usize,
current_solution: Array1<f32>,
max_iterations: usize,
tolerance: f32,
}
impl BlockCoordinateDescent {
pub fn new(blocks: Vec<Block>, dimension: usize) -> DecompositionResult<Self> {
let mut covered = vec![false; dimension];
for block in &blocks {
for &idx in &block.indices {
if idx >= dimension {
return Err(DecompositionError::InvalidBlocks(format!(
"Index {} exceeds dimension {}",
idx, dimension
)));
}
if covered[idx] {
return Err(DecompositionError::InvalidBlocks(format!(
"Index {} appears in multiple blocks",
idx
)));
}
covered[idx] = true;
}
}
Ok(Self {
blocks,
dimension,
current_solution: Array1::zeros(dimension),
max_iterations: 1000,
tolerance: 1e-4,
})
}
pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
self.max_iterations = max_iterations;
self
}
pub fn with_tolerance(mut self, tolerance: f32) -> Self {
self.tolerance = tolerance;
self
}
pub fn initialize(&mut self, x0: &Array1<f32>) {
assert_eq!(x0.len(), self.dimension);
self.current_solution = x0.clone();
}
pub fn update_block<F>(&mut self, block_id: usize, block_update_fn: F) -> f32
where
F: Fn(&Array1<f32>, &[usize]) -> Array1<f32>,
{
let block = &self.blocks[block_id];
let block_solution = block_update_fn(&self.current_solution, &block.indices);
assert_eq!(
block_solution.len(),
block.indices.len(),
"Block solution size mismatch"
);
let mut change = 0.0f32;
for (i, &idx) in block.indices.iter().enumerate() {
let old_val = self.current_solution[idx];
let new_val = block_solution[i];
self.current_solution[idx] = new_val;
change += (new_val - old_val).powi(2);
}
change.sqrt()
}
pub fn solution(&self) -> &Array1<f32> {
&self.current_solution
}
pub fn num_blocks(&self) -> usize {
self.blocks.len()
}
pub fn block(&self, block_id: usize) -> Option<&Block> {
self.blocks.get(block_id)
}
}
pub struct DualDecomposition {
num_subproblems: usize,
coupling_matrix: Array2<f32>,
dual_vars: Array1<f32>,
step_size: f32,
max_iterations: usize,
}
impl DualDecomposition {
pub fn new(num_subproblems: usize, coupling_matrix: Array2<f32>) -> Self {
let num_coupling = coupling_matrix.nrows();
Self {
num_subproblems,
coupling_matrix,
dual_vars: Array1::zeros(num_coupling),
step_size: 0.1,
max_iterations: 1000,
}
}
pub fn with_step_size(mut self, step_size: f32) -> Self {
self.step_size = step_size;
self
}
pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
self.max_iterations = max_iterations;
self
}
pub fn update_duals(&mut self, constraint_violations: &Array1<f32>) {
assert_eq!(constraint_violations.len(), self.dual_vars.len());
for i in 0..self.dual_vars.len() {
self.dual_vars[i] =
(self.dual_vars[i] + self.step_size * constraint_violations[i]).max(0.0);
}
}
pub fn dual_variables(&self) -> &Array1<f32> {
&self.dual_vars
}
pub fn augmented_cost(
&self,
subproblem_id: usize,
base_cost: f32,
local_vars: &Array1<f32>,
) -> f32 {
assert!(subproblem_id < self.num_subproblems);
let mut augmented = base_cost;
for (i, &dual) in self.dual_vars.iter().enumerate() {
let coupling_coeff = self.coupling_matrix[[i, subproblem_id]];
if let Some(&var_val) = local_vars.get(0) {
augmented += dual * coupling_coeff * var_val;
}
}
augmented
}
}
pub struct HierarchicalDecomposition {
levels: Vec<DecompositionLevel>,
}
#[derive(Clone)]
pub struct DecompositionLevel {
pub name: String,
pub subproblems: Vec<Subproblem>,
}
#[derive(Clone)]
pub struct Subproblem {
pub id: usize,
pub variables: Vec<usize>,
pub children: Vec<usize>,
}
impl HierarchicalDecomposition {
pub fn new() -> Self {
Self { levels: Vec::new() }
}
pub fn add_level(&mut self, level: DecompositionLevel) {
self.levels.push(level);
}
pub fn num_levels(&self) -> usize {
self.levels.len()
}
pub fn level(&self, level_id: usize) -> Option<&DecompositionLevel> {
self.levels.get(level_id)
}
pub fn from_blocks(
coarse_blocks: Vec<Block>,
fine_blocks: Vec<Block>,
) -> DecompositionResult<Self> {
let mut decomp = Self::new();
let fine_subproblems: Vec<Subproblem> = fine_blocks
.into_iter()
.enumerate()
.map(|(id, block)| Subproblem {
id,
variables: block.indices,
children: Vec::new(),
})
.collect();
let fine_level = DecompositionLevel {
name: "fine".to_string(),
subproblems: fine_subproblems,
};
let coarse_subproblems: Vec<Subproblem> = coarse_blocks
.into_iter()
.enumerate()
.map(|(id, block)| Subproblem {
id,
variables: block.indices,
children: Vec::new(), })
.collect();
let coarse_level = DecompositionLevel {
name: "coarse".to_string(),
subproblems: coarse_subproblems,
};
decomp.add_level(fine_level);
decomp.add_level(coarse_level);
Ok(decomp)
}
}
impl Default for HierarchicalDecomposition {
fn default() -> Self {
Self::new()
}
}
pub mod block_utils {
use super::*;
pub fn uniform_blocks(dimension: usize, block_size: usize) -> Vec<Block> {
let num_blocks = dimension.div_ceil(block_size);
let mut blocks = Vec::new();
for i in 0..num_blocks {
let start = i * block_size;
let end = (start + block_size).min(dimension);
let indices: Vec<usize> = (start..end).collect();
blocks.push(Block::new(format!("block_{}", i), indices));
}
blocks
}
pub fn from_index_groups(groups: Vec<Vec<usize>>) -> Vec<Block> {
groups
.into_iter()
.enumerate()
.map(|(i, indices)| Block::new(format!("block_{}", i), indices))
.collect()
}
pub fn overlapping_blocks(dimension: usize, block_size: usize, overlap: usize) -> Vec<Block> {
assert!(overlap < block_size, "Overlap must be less than block size");
let stride = block_size - overlap;
let mut blocks = Vec::new();
let mut start = 0;
while start < dimension {
let end = (start + block_size).min(dimension);
let indices: Vec<usize> = (start..end).collect();
blocks.push(Block::new(format!("block_{}", blocks.len()), indices));
start += stride;
}
blocks
}
}
pub struct BendersDecomposition {
num_master_vars: usize,
num_sub_vars: usize,
cuts: Vec<BendersCut>,
config: BendersConfig,
lower_bound: f32,
upper_bound: f32,
}
#[derive(Debug, Clone)]
pub struct BendersConfig {
pub max_iterations: usize,
pub tolerance: f32,
pub max_cuts: usize,
pub multi_cut: bool,
}
impl Default for BendersConfig {
fn default() -> Self {
Self {
max_iterations: 100,
tolerance: 1e-4,
max_cuts: 1000,
multi_cut: false,
}
}
}
#[derive(Debug, Clone)]
pub enum BendersCut {
Optimality {
dual: Array1<f32>,
constant: f32,
coefficients: Array1<f32>,
},
Feasibility {
ray: Array1<f32>,
constant: f32,
coefficients: Array1<f32>,
},
}
impl BendersDecomposition {
pub fn new(num_master_vars: usize, num_sub_vars: usize) -> Self {
Self {
num_master_vars,
num_sub_vars,
cuts: Vec::new(),
config: BendersConfig::default(),
lower_bound: f32::NEG_INFINITY,
upper_bound: f32::INFINITY,
}
}
pub fn with_config(mut self, config: BendersConfig) -> Self {
self.config = config;
self
}
pub fn add_optimality_cut(
&mut self,
dual: Array1<f32>,
constant: f32,
coefficients: Array1<f32>,
) {
if self.cuts.len() >= self.config.max_cuts {
self.cuts.remove(0);
}
self.cuts.push(BendersCut::Optimality {
dual,
constant,
coefficients,
});
}
pub fn add_feasibility_cut(
&mut self,
ray: Array1<f32>,
constant: f32,
coefficients: Array1<f32>,
) {
if self.cuts.len() >= self.config.max_cuts {
self.cuts.remove(0);
}
self.cuts.push(BendersCut::Feasibility {
ray,
constant,
coefficients,
});
}
pub fn solve_master(&self, objective: &Array1<f32>) -> DecompositionResult<(Array1<f32>, f32)> {
if self.num_master_vars == 0 {
return Err(DecompositionError::IncompatibleStructure(
"No master variables".to_string(),
));
}
let mut x = Array1::zeros(self.num_master_vars);
let mut theta = f32::NEG_INFINITY;
for cut in &self.cuts {
match cut {
BendersCut::Optimality {
constant,
coefficients,
..
} => {
let cut_value = constant + coefficients.dot(&x);
theta = theta.max(cut_value);
}
BendersCut::Feasibility {
constant,
coefficients,
..
} => {
let cut_value = constant + coefficients.dot(&x);
if cut_value < 0.0 {
x = x.mapv(|v| v + 0.1);
}
}
}
}
let lower_bound = objective.dot(&x) + theta;
Ok((x, lower_bound))
}
pub fn solve_subproblem(
&self,
master_solution: &Array1<f32>,
sub_objective: &Array1<f32>,
coupling_matrix: &Array2<f32>,
rhs: &Array1<f32>,
) -> DecompositionResult<(f32, Array1<f32>, bool)> {
let (n_constraints, n_vars) = coupling_matrix.dim();
if n_vars != self.num_sub_vars {
return Err(DecompositionError::IncompatibleStructure(
"Subproblem dimension mismatch".to_string(),
));
}
let _adjusted_rhs = if !master_solution.is_empty() {
rhs.clone()
} else {
rhs.clone()
};
let y = Array1::from_elem(n_vars, 0.5);
let dual = Array1::from_elem(n_constraints, 1.0);
let obj_value = sub_objective.dot(&y);
Ok((obj_value, dual, true))
}
pub fn iterate(
&mut self,
master_obj: &Array1<f32>,
sub_obj: &Array1<f32>,
coupling: &Array2<f32>,
rhs: &Array1<f32>,
) -> DecompositionResult<BendersIterationResult> {
let mut iteration = 0;
loop {
iteration += 1;
if iteration > self.config.max_iterations {
return Err(DecompositionError::ConvergenceFailed {
iterations: iteration,
residual: self.upper_bound - self.lower_bound,
});
}
let (master_sol, lower_bound) = self.solve_master(master_obj)?;
self.lower_bound = lower_bound;
let (sub_obj_value, dual, is_feasible) =
self.solve_subproblem(&master_sol, sub_obj, coupling, rhs)?;
let master_obj_value = master_obj.dot(&master_sol);
let total_obj = master_obj_value + sub_obj_value;
self.upper_bound = self.upper_bound.min(total_obj);
let gap = self.upper_bound - self.lower_bound;
if gap < self.config.tolerance {
return Ok(BendersIterationResult {
master_solution: master_sol,
sub_solution: Array1::zeros(self.num_sub_vars), lower_bound: self.lower_bound,
upper_bound: self.upper_bound,
iterations: iteration,
converged: true,
});
}
if !is_feasible {
let coefficients = Array1::zeros(self.num_master_vars);
self.add_feasibility_cut(dual.clone(), 0.0, coefficients);
} else {
let coefficients = Array1::zeros(self.num_master_vars);
let constant = sub_obj_value;
self.add_optimality_cut(dual, constant, coefficients);
}
}
}
pub fn bounds(&self) -> (f32, f32) {
(self.lower_bound, self.upper_bound)
}
pub fn num_cuts(&self) -> usize {
self.cuts.len()
}
pub fn cuts(&self) -> &[BendersCut] {
&self.cuts
}
pub fn reset(&mut self) {
self.cuts.clear();
self.lower_bound = f32::NEG_INFINITY;
self.upper_bound = f32::INFINITY;
}
}
#[derive(Debug, Clone)]
pub struct BendersIterationResult {
pub master_solution: Array1<f32>,
pub sub_solution: Array1<f32>,
pub lower_bound: f32,
pub upper_bound: f32,
pub iterations: usize,
pub converged: bool,
}
impl BendersIterationResult {
pub fn gap(&self) -> f32 {
self.upper_bound - self.lower_bound
}
pub fn relative_gap(&self) -> f32 {
if self.upper_bound.abs() < 1e-10 {
self.gap()
} else {
self.gap() / self.upper_bound.abs()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_benders_decomposition() {
let mut benders = BendersDecomposition::new(3, 2);
assert_eq!(benders.num_cuts(), 0);
let dual = Array1::from_vec(vec![1.0, 2.0]);
let coeffs = Array1::from_vec(vec![0.5, 0.3, 0.1]);
benders.add_optimality_cut(dual, 1.5, coeffs);
assert_eq!(benders.num_cuts(), 1);
let (lb, ub) = benders.bounds();
assert_eq!(lb, f32::NEG_INFINITY);
assert_eq!(ub, f32::INFINITY);
}
#[test]
fn test_benders_config() {
let config = BendersConfig {
max_iterations: 50,
tolerance: 1e-5,
max_cuts: 500,
multi_cut: true,
};
let benders = BendersDecomposition::new(2, 3).with_config(config);
assert_eq!(benders.config.max_iterations, 50);
}
#[test]
fn test_consensus_admm_initialization() {
let config = ADMMConfig::default();
let mut admm = ConsensusADMM::new(3, 5, config);
let x0 = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
admm.initialize(&x0);
assert_eq!(admm.solution(), &x0);
}
#[test]
fn test_block_coordinate_descent() {
let blocks = vec![
Block::new("b1", vec![0, 1]),
Block::new("b2", vec![2, 3]),
Block::new("b3", vec![4]),
];
let bcd = BlockCoordinateDescent::new(blocks, 5).unwrap();
assert_eq!(bcd.num_blocks(), 3);
assert_eq!(bcd.block(0).unwrap().size(), 2);
}
#[test]
fn test_invalid_blocks() {
let blocks = vec![Block::new("b1", vec![0, 1, 10])];
let result = BlockCoordinateDescent::new(blocks, 5);
assert!(result.is_err());
}
#[test]
fn test_uniform_blocks() {
let blocks = block_utils::uniform_blocks(10, 3);
assert_eq!(blocks.len(), 4); assert_eq!(blocks[0].size(), 3);
assert_eq!(blocks[3].size(), 1); }
#[test]
fn test_overlapping_blocks() {
let blocks = block_utils::overlapping_blocks(10, 4, 1);
assert!(blocks.len() > 3);
let b0_last = blocks[0].indices.last().unwrap();
let b1_first = blocks[1].indices.first().unwrap();
assert_eq!(b0_last, b1_first);
}
#[test]
fn test_dual_decomposition() {
let coupling = Array2::from_shape_vec((2, 3), vec![1.0, 0.5, 0.0, 0.0, 0.5, 1.0]).unwrap();
let dual_decomp = DualDecomposition::new(3, coupling);
assert_eq!(dual_decomp.dual_variables().len(), 2);
}
#[test]
fn test_hierarchical_decomposition() {
let mut hier = HierarchicalDecomposition::new();
assert_eq!(hier.num_levels(), 0);
let level = DecompositionLevel {
name: "test".to_string(),
subproblems: vec![],
};
hier.add_level(level);
assert_eq!(hier.num_levels(), 1);
}
}