1use scirs2_core::ndarray::{Array1, Array2};
15
16pub type DecompositionResult<T> = Result<T, DecompositionError>;
18
19#[derive(Debug, Clone)]
21pub enum DecompositionError {
22 IncompatibleStructure(String),
24 ConvergenceFailed { iterations: usize, residual: f32 },
26 InvalidBlocks(String),
28 NumericalIssue(String),
30}
31
32impl std::fmt::Display for DecompositionError {
33 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
34 match self {
35 Self::IncompatibleStructure(msg) => write!(f, "Incompatible structure: {}", msg),
36 Self::ConvergenceFailed {
37 iterations,
38 residual,
39 } => {
40 write!(
41 f,
42 "Failed to converge after {} iterations (residual: {})",
43 iterations, residual
44 )
45 }
46 Self::InvalidBlocks(msg) => write!(f, "Invalid blocks: {}", msg),
47 Self::NumericalIssue(msg) => write!(f, "Numerical issue: {}", msg),
48 }
49 }
50}
51
52impl std::error::Error for DecompositionError {}
53
54#[derive(Debug, Clone)]
56pub struct Block {
57 pub name: String,
59 pub indices: Vec<usize>,
61}
62
63impl Block {
64 pub fn new(name: impl Into<String>, indices: Vec<usize>) -> Self {
66 Self {
67 name: name.into(),
68 indices,
69 }
70 }
71
72 pub fn size(&self) -> usize {
74 self.indices.len()
75 }
76}
77
78#[derive(Debug, Clone)]
80pub struct ADMMConfig {
81 pub rho: f32,
83 pub max_iterations: usize,
85 pub primal_tol: f32,
87 pub dual_tol: f32,
89 pub adaptive_rho: bool,
91 pub rho_update_factor: f32,
93}
94
95impl Default for ADMMConfig {
96 fn default() -> Self {
97 Self {
98 rho: 1.0,
99 max_iterations: 1000,
100 primal_tol: 1e-4,
101 dual_tol: 1e-4,
102 adaptive_rho: true,
103 rho_update_factor: 2.0,
104 }
105 }
106}
107
108pub struct ConsensusADMM {
119 config: ADMMConfig,
120 num_blocks: usize,
121 dimension: usize,
122 local_vars: Vec<Array1<f32>>,
124 global_var: Array1<f32>,
126 dual_vars: Vec<Array1<f32>>,
128}
129
130impl ConsensusADMM {
131 pub fn new(num_blocks: usize, dimension: usize, config: ADMMConfig) -> Self {
133 let local_vars = vec![Array1::zeros(dimension); num_blocks];
134 let global_var = Array1::zeros(dimension);
135 let dual_vars = vec![Array1::zeros(dimension); num_blocks];
136
137 Self {
138 config,
139 num_blocks,
140 dimension,
141 local_vars,
142 global_var,
143 dual_vars,
144 }
145 }
146
147 pub fn initialize(&mut self, x0: &Array1<f32>) {
149 assert_eq!(x0.len(), self.dimension, "Initial point dimension mismatch");
150 self.global_var = x0.clone();
151 for i in 0..self.num_blocks {
152 self.local_vars[i] = x0.clone();
153 }
154 }
155
156 pub fn iterate<F>(&mut self, local_update_fn: F) -> (f32, f32)
163 where
164 F: Fn(usize, &Array1<f32>, &Array1<f32>, f32) -> Array1<f32>,
165 {
166 for i in 0..self.num_blocks {
168 let z_minus_u = &self.global_var - &self.dual_vars[i];
169 self.local_vars[i] =
170 local_update_fn(i, &self.local_vars[i], &z_minus_u, self.config.rho);
171 }
172
173 let mut z_new = Array1::zeros(self.dimension);
175 for i in 0..self.num_blocks {
176 z_new += &(&self.local_vars[i] + &self.dual_vars[i]);
177 }
178 z_new /= self.num_blocks as f32;
179
180 let mut primal_residual = 0.0f32;
182 let mut dual_residual = 0.0f32;
183
184 let z_diff = &z_new - &self.global_var;
185 for i in 0..self.num_blocks {
186 let xi_minus_z = &self.local_vars[i] - &z_new;
187 self.dual_vars[i] = &self.dual_vars[i] + &xi_minus_z;
188
189 primal_residual += xi_minus_z.iter().map(|&x| x * x).sum::<f32>();
191 dual_residual += z_diff.iter().map(|&x| x * x).sum::<f32>();
192 }
193
194 self.global_var = z_new;
195
196 (
197 primal_residual.sqrt(),
198 dual_residual.sqrt() * self.config.rho,
199 )
200 }
201
202 pub fn solution(&self) -> &Array1<f32> {
204 &self.global_var
205 }
206
207 pub fn local_solution(&self, block_id: usize) -> Option<&Array1<f32>> {
209 self.local_vars.get(block_id)
210 }
211
212 pub fn has_converged(&self, primal_res: f32, dual_res: f32) -> bool {
214 primal_res < self.config.primal_tol && dual_res < self.config.dual_tol
215 }
216
217 pub fn update_rho(&mut self, primal_res: f32, dual_res: f32) {
219 if !self.config.adaptive_rho {
220 return;
221 }
222
223 if primal_res > 10.0 * dual_res {
224 self.config.rho *= self.config.rho_update_factor;
225 } else if dual_res > 10.0 * primal_res {
226 self.config.rho /= self.config.rho_update_factor;
227 }
228 }
229}
230
231pub struct BlockCoordinateDescent {
236 blocks: Vec<Block>,
237 dimension: usize,
238 current_solution: Array1<f32>,
239 max_iterations: usize,
240 tolerance: f32,
241}
242
243impl BlockCoordinateDescent {
244 pub fn new(blocks: Vec<Block>, dimension: usize) -> DecompositionResult<Self> {
246 let mut covered = vec![false; dimension];
248 for block in &blocks {
249 for &idx in &block.indices {
250 if idx >= dimension {
251 return Err(DecompositionError::InvalidBlocks(format!(
252 "Index {} exceeds dimension {}",
253 idx, dimension
254 )));
255 }
256 if covered[idx] {
257 return Err(DecompositionError::InvalidBlocks(format!(
258 "Index {} appears in multiple blocks",
259 idx
260 )));
261 }
262 covered[idx] = true;
263 }
264 }
265
266 Ok(Self {
267 blocks,
268 dimension,
269 current_solution: Array1::zeros(dimension),
270 max_iterations: 1000,
271 tolerance: 1e-4,
272 })
273 }
274
275 pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
277 self.max_iterations = max_iterations;
278 self
279 }
280
281 pub fn with_tolerance(mut self, tolerance: f32) -> Self {
283 self.tolerance = tolerance;
284 self
285 }
286
287 pub fn initialize(&mut self, x0: &Array1<f32>) {
289 assert_eq!(x0.len(), self.dimension);
290 self.current_solution = x0.clone();
291 }
292
293 pub fn update_block<F>(&mut self, block_id: usize, block_update_fn: F) -> f32
298 where
299 F: Fn(&Array1<f32>, &[usize]) -> Array1<f32>,
300 {
301 let block = &self.blocks[block_id];
302 let block_solution = block_update_fn(&self.current_solution, &block.indices);
303
304 assert_eq!(
305 block_solution.len(),
306 block.indices.len(),
307 "Block solution size mismatch"
308 );
309
310 let mut change = 0.0f32;
312 for (i, &idx) in block.indices.iter().enumerate() {
313 let old_val = self.current_solution[idx];
314 let new_val = block_solution[i];
315 self.current_solution[idx] = new_val;
316 change += (new_val - old_val).powi(2);
317 }
318
319 change.sqrt()
320 }
321
322 pub fn solution(&self) -> &Array1<f32> {
324 &self.current_solution
325 }
326
327 pub fn num_blocks(&self) -> usize {
329 self.blocks.len()
330 }
331
332 pub fn block(&self, block_id: usize) -> Option<&Block> {
334 self.blocks.get(block_id)
335 }
336}
337
338pub struct DualDecomposition {
343 num_subproblems: usize,
344 coupling_matrix: Array2<f32>,
345 dual_vars: Array1<f32>,
346 step_size: f32,
347 max_iterations: usize,
348}
349
350impl DualDecomposition {
351 pub fn new(num_subproblems: usize, coupling_matrix: Array2<f32>) -> Self {
357 let num_coupling = coupling_matrix.nrows();
358 Self {
359 num_subproblems,
360 coupling_matrix,
361 dual_vars: Array1::zeros(num_coupling),
362 step_size: 0.1,
363 max_iterations: 1000,
364 }
365 }
366
367 pub fn with_step_size(mut self, step_size: f32) -> Self {
369 self.step_size = step_size;
370 self
371 }
372
373 pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
375 self.max_iterations = max_iterations;
376 self
377 }
378
379 pub fn update_duals(&mut self, constraint_violations: &Array1<f32>) {
381 assert_eq!(constraint_violations.len(), self.dual_vars.len());
382
383 for i in 0..self.dual_vars.len() {
385 self.dual_vars[i] =
386 (self.dual_vars[i] + self.step_size * constraint_violations[i]).max(0.0);
387 }
388 }
389
390 pub fn dual_variables(&self) -> &Array1<f32> {
392 &self.dual_vars
393 }
394
395 pub fn augmented_cost(
397 &self,
398 subproblem_id: usize,
399 base_cost: f32,
400 local_vars: &Array1<f32>,
401 ) -> f32 {
402 assert!(subproblem_id < self.num_subproblems);
403
404 let mut augmented = base_cost;
406 for (i, &dual) in self.dual_vars.iter().enumerate() {
407 let coupling_coeff = self.coupling_matrix[[i, subproblem_id]];
408 if let Some(&var_val) = local_vars.get(0) {
409 augmented += dual * coupling_coeff * var_val;
410 }
411 }
412
413 augmented
414 }
415}
416
417pub struct HierarchicalDecomposition {
422 levels: Vec<DecompositionLevel>,
423}
424
425#[derive(Clone)]
427pub struct DecompositionLevel {
428 pub name: String,
430 pub subproblems: Vec<Subproblem>,
432}
433
434#[derive(Clone)]
436pub struct Subproblem {
437 pub id: usize,
439 pub variables: Vec<usize>,
441 pub children: Vec<usize>,
443}
444
445impl HierarchicalDecomposition {
446 pub fn new() -> Self {
448 Self { levels: Vec::new() }
449 }
450
451 pub fn add_level(&mut self, level: DecompositionLevel) {
453 self.levels.push(level);
454 }
455
456 pub fn num_levels(&self) -> usize {
458 self.levels.len()
459 }
460
461 pub fn level(&self, level_id: usize) -> Option<&DecompositionLevel> {
463 self.levels.get(level_id)
464 }
465
466 pub fn from_blocks(
468 coarse_blocks: Vec<Block>,
469 fine_blocks: Vec<Block>,
470 ) -> DecompositionResult<Self> {
471 let mut decomp = Self::new();
472
473 let fine_subproblems: Vec<Subproblem> = fine_blocks
475 .into_iter()
476 .enumerate()
477 .map(|(id, block)| Subproblem {
478 id,
479 variables: block.indices,
480 children: Vec::new(),
481 })
482 .collect();
483
484 let fine_level = DecompositionLevel {
485 name: "fine".to_string(),
486 subproblems: fine_subproblems,
487 };
488
489 let coarse_subproblems: Vec<Subproblem> = coarse_blocks
491 .into_iter()
492 .enumerate()
493 .map(|(id, block)| Subproblem {
494 id,
495 variables: block.indices,
496 children: Vec::new(), })
498 .collect();
499
500 let coarse_level = DecompositionLevel {
501 name: "coarse".to_string(),
502 subproblems: coarse_subproblems,
503 };
504
505 decomp.add_level(fine_level);
506 decomp.add_level(coarse_level);
507
508 Ok(decomp)
509 }
510}
511
512impl Default for HierarchicalDecomposition {
513 fn default() -> Self {
514 Self::new()
515 }
516}
517
518pub mod block_utils {
520 use super::*;
521
522 pub fn uniform_blocks(dimension: usize, block_size: usize) -> Vec<Block> {
524 let num_blocks = dimension.div_ceil(block_size);
525 let mut blocks = Vec::new();
526
527 for i in 0..num_blocks {
528 let start = i * block_size;
529 let end = (start + block_size).min(dimension);
530 let indices: Vec<usize> = (start..end).collect();
531 blocks.push(Block::new(format!("block_{}", i), indices));
532 }
533
534 blocks
535 }
536
537 pub fn from_index_groups(groups: Vec<Vec<usize>>) -> Vec<Block> {
539 groups
540 .into_iter()
541 .enumerate()
542 .map(|(i, indices)| Block::new(format!("block_{}", i), indices))
543 .collect()
544 }
545
546 pub fn overlapping_blocks(dimension: usize, block_size: usize, overlap: usize) -> Vec<Block> {
548 assert!(overlap < block_size, "Overlap must be less than block size");
549
550 let stride = block_size - overlap;
551 let mut blocks = Vec::new();
552 let mut start = 0;
553
554 while start < dimension {
555 let end = (start + block_size).min(dimension);
556 let indices: Vec<usize> = (start..end).collect();
557 blocks.push(Block::new(format!("block_{}", blocks.len()), indices));
558 start += stride;
559 }
560
561 blocks
562 }
563}
564
565pub struct BendersDecomposition {
589 num_master_vars: usize,
591 num_sub_vars: usize,
593 cuts: Vec<BendersCut>,
595 config: BendersConfig,
597 lower_bound: f32,
599 upper_bound: f32,
601}
602
603#[derive(Debug, Clone)]
605pub struct BendersConfig {
606 pub max_iterations: usize,
608 pub tolerance: f32,
610 pub max_cuts: usize,
612 pub multi_cut: bool,
614}
615
616impl Default for BendersConfig {
617 fn default() -> Self {
618 Self {
619 max_iterations: 100,
620 tolerance: 1e-4,
621 max_cuts: 1000,
622 multi_cut: false,
623 }
624 }
625}
626
627#[derive(Debug, Clone)]
629pub enum BendersCut {
630 Optimality {
633 dual: Array1<f32>,
635 constant: f32,
637 coefficients: Array1<f32>,
639 },
640 Feasibility {
643 ray: Array1<f32>,
645 constant: f32,
647 coefficients: Array1<f32>,
649 },
650}
651
652impl BendersDecomposition {
653 pub fn new(num_master_vars: usize, num_sub_vars: usize) -> Self {
655 Self {
656 num_master_vars,
657 num_sub_vars,
658 cuts: Vec::new(),
659 config: BendersConfig::default(),
660 lower_bound: f32::NEG_INFINITY,
661 upper_bound: f32::INFINITY,
662 }
663 }
664
665 pub fn with_config(mut self, config: BendersConfig) -> Self {
667 self.config = config;
668 self
669 }
670
671 pub fn add_optimality_cut(
673 &mut self,
674 dual: Array1<f32>,
675 constant: f32,
676 coefficients: Array1<f32>,
677 ) {
678 if self.cuts.len() >= self.config.max_cuts {
679 self.cuts.remove(0);
681 }
682
683 self.cuts.push(BendersCut::Optimality {
684 dual,
685 constant,
686 coefficients,
687 });
688 }
689
690 pub fn add_feasibility_cut(
692 &mut self,
693 ray: Array1<f32>,
694 constant: f32,
695 coefficients: Array1<f32>,
696 ) {
697 if self.cuts.len() >= self.config.max_cuts {
698 self.cuts.remove(0);
699 }
700
701 self.cuts.push(BendersCut::Feasibility {
702 ray,
703 constant,
704 coefficients,
705 });
706 }
707
708 pub fn solve_master(&self, objective: &Array1<f32>) -> DecompositionResult<(Array1<f32>, f32)> {
712 if self.num_master_vars == 0 {
719 return Err(DecompositionError::IncompatibleStructure(
720 "No master variables".to_string(),
721 ));
722 }
723
724 let mut x = Array1::zeros(self.num_master_vars);
726
727 let mut theta = f32::NEG_INFINITY;
729 for cut in &self.cuts {
730 match cut {
731 BendersCut::Optimality {
732 constant,
733 coefficients,
734 ..
735 } => {
736 let cut_value = constant + coefficients.dot(&x);
737 theta = theta.max(cut_value);
738 }
739 BendersCut::Feasibility {
740 constant,
741 coefficients,
742 ..
743 } => {
744 let cut_value = constant + coefficients.dot(&x);
746 if cut_value < 0.0 {
747 x = x.mapv(|v| v + 0.1);
749 }
750 }
751 }
752 }
753
754 let lower_bound = objective.dot(&x) + theta;
755 Ok((x, lower_bound))
756 }
757
758 pub fn solve_subproblem(
762 &self,
763 master_solution: &Array1<f32>,
764 sub_objective: &Array1<f32>,
765 coupling_matrix: &Array2<f32>,
766 rhs: &Array1<f32>,
767 ) -> DecompositionResult<(f32, Array1<f32>, bool)> {
768 let (n_constraints, n_vars) = coupling_matrix.dim();
775
776 if n_vars != self.num_sub_vars {
777 return Err(DecompositionError::IncompatibleStructure(
778 "Subproblem dimension mismatch".to_string(),
779 ));
780 }
781
782 let _adjusted_rhs = if !master_solution.is_empty() {
784 rhs.clone()
786 } else {
787 rhs.clone()
788 };
789
790 let y = Array1::from_elem(n_vars, 0.5);
794 let dual = Array1::from_elem(n_constraints, 1.0);
795 let obj_value = sub_objective.dot(&y);
796
797 Ok((obj_value, dual, true))
798 }
799
800 pub fn iterate(
802 &mut self,
803 master_obj: &Array1<f32>,
804 sub_obj: &Array1<f32>,
805 coupling: &Array2<f32>,
806 rhs: &Array1<f32>,
807 ) -> DecompositionResult<BendersIterationResult> {
808 let mut iteration = 0;
809
810 loop {
811 iteration += 1;
812
813 if iteration > self.config.max_iterations {
814 return Err(DecompositionError::ConvergenceFailed {
815 iterations: iteration,
816 residual: self.upper_bound - self.lower_bound,
817 });
818 }
819
820 let (master_sol, lower_bound) = self.solve_master(master_obj)?;
822 self.lower_bound = lower_bound;
823
824 let (sub_obj_value, dual, is_feasible) =
826 self.solve_subproblem(&master_sol, sub_obj, coupling, rhs)?;
827
828 let master_obj_value = master_obj.dot(&master_sol);
830 let total_obj = master_obj_value + sub_obj_value;
831 self.upper_bound = self.upper_bound.min(total_obj);
832
833 let gap = self.upper_bound - self.lower_bound;
835 if gap < self.config.tolerance {
836 return Ok(BendersIterationResult {
837 master_solution: master_sol,
838 sub_solution: Array1::zeros(self.num_sub_vars), lower_bound: self.lower_bound,
840 upper_bound: self.upper_bound,
841 iterations: iteration,
842 converged: true,
843 });
844 }
845
846 if !is_feasible {
848 let coefficients = Array1::zeros(self.num_master_vars);
850 self.add_feasibility_cut(dual.clone(), 0.0, coefficients);
851 } else {
852 let coefficients = Array1::zeros(self.num_master_vars);
854 let constant = sub_obj_value;
855 self.add_optimality_cut(dual, constant, coefficients);
856 }
857 }
858 }
859
860 pub fn bounds(&self) -> (f32, f32) {
862 (self.lower_bound, self.upper_bound)
863 }
864
865 pub fn num_cuts(&self) -> usize {
867 self.cuts.len()
868 }
869
870 pub fn cuts(&self) -> &[BendersCut] {
872 &self.cuts
873 }
874
875 pub fn reset(&mut self) {
877 self.cuts.clear();
878 self.lower_bound = f32::NEG_INFINITY;
879 self.upper_bound = f32::INFINITY;
880 }
881}
882
883#[derive(Debug, Clone)]
885pub struct BendersIterationResult {
886 pub master_solution: Array1<f32>,
888 pub sub_solution: Array1<f32>,
890 pub lower_bound: f32,
892 pub upper_bound: f32,
894 pub iterations: usize,
896 pub converged: bool,
898}
899
900impl BendersIterationResult {
901 pub fn gap(&self) -> f32 {
903 self.upper_bound - self.lower_bound
904 }
905
906 pub fn relative_gap(&self) -> f32 {
908 if self.upper_bound.abs() < 1e-10 {
909 self.gap()
910 } else {
911 self.gap() / self.upper_bound.abs()
912 }
913 }
914}
915
916#[cfg(test)]
921mod tests {
922 use super::*;
923
924 #[test]
925 fn test_benders_decomposition() {
926 let mut benders = BendersDecomposition::new(3, 2);
927 assert_eq!(benders.num_cuts(), 0);
928
929 let dual = Array1::from_vec(vec![1.0, 2.0]);
931 let coeffs = Array1::from_vec(vec![0.5, 0.3, 0.1]);
932 benders.add_optimality_cut(dual, 1.5, coeffs);
933
934 assert_eq!(benders.num_cuts(), 1);
935
936 let (lb, ub) = benders.bounds();
937 assert_eq!(lb, f32::NEG_INFINITY);
938 assert_eq!(ub, f32::INFINITY);
939 }
940
941 #[test]
942 fn test_benders_config() {
943 let config = BendersConfig {
944 max_iterations: 50,
945 tolerance: 1e-5,
946 max_cuts: 500,
947 multi_cut: true,
948 };
949
950 let benders = BendersDecomposition::new(2, 3).with_config(config);
951 assert_eq!(benders.config.max_iterations, 50);
952 }
953
954 #[test]
955 fn test_consensus_admm_initialization() {
956 let config = ADMMConfig::default();
957 let mut admm = ConsensusADMM::new(3, 5, config);
958
959 let x0 = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
960 admm.initialize(&x0);
961
962 assert_eq!(admm.solution(), &x0);
963 }
964
965 #[test]
966 fn test_block_coordinate_descent() {
967 let blocks = vec![
968 Block::new("b1", vec![0, 1]),
969 Block::new("b2", vec![2, 3]),
970 Block::new("b3", vec![4]),
971 ];
972
973 let bcd = BlockCoordinateDescent::new(blocks, 5).unwrap();
974 assert_eq!(bcd.num_blocks(), 3);
975 assert_eq!(bcd.block(0).unwrap().size(), 2);
976 }
977
978 #[test]
979 fn test_invalid_blocks() {
980 let blocks = vec![Block::new("b1", vec![0, 1, 10])];
982 let result = BlockCoordinateDescent::new(blocks, 5);
983 assert!(result.is_err());
984 }
985
986 #[test]
987 fn test_uniform_blocks() {
988 let blocks = block_utils::uniform_blocks(10, 3);
989 assert_eq!(blocks.len(), 4); assert_eq!(blocks[0].size(), 3);
991 assert_eq!(blocks[3].size(), 1); }
993
994 #[test]
995 fn test_overlapping_blocks() {
996 let blocks = block_utils::overlapping_blocks(10, 4, 1);
997 assert!(blocks.len() > 3);
998
999 let b0_last = blocks[0].indices.last().unwrap();
1001 let b1_first = blocks[1].indices.first().unwrap();
1002 assert_eq!(b0_last, b1_first);
1003 }
1004
1005 #[test]
1006 fn test_dual_decomposition() {
1007 let coupling = Array2::from_shape_vec((2, 3), vec![1.0, 0.5, 0.0, 0.0, 0.5, 1.0]).unwrap();
1008 let dual_decomp = DualDecomposition::new(3, coupling);
1009
1010 assert_eq!(dual_decomp.dual_variables().len(), 2);
1011 }
1012
1013 #[test]
1014 fn test_hierarchical_decomposition() {
1015 let mut hier = HierarchicalDecomposition::new();
1016 assert_eq!(hier.num_levels(), 0);
1017
1018 let level = DecompositionLevel {
1019 name: "test".to_string(),
1020 subproblems: vec![],
1021 };
1022 hier.add_level(level);
1023 assert_eq!(hier.num_levels(), 1);
1024 }
1025}