kizzasi_logic/
decomposition.rs

1//! Constraint decomposition for large-scale optimization problems
2//!
3//! This module provides algorithms for decomposing large constraint satisfaction
4//! and optimization problems into smaller, more manageable subproblems that can be
5//! solved efficiently, potentially in parallel.
6//!
7//! # Key Concepts
8//!
9//! - **Consensus ADMM**: Decomposes problems with separable objectives and coupled constraints
10//! - **Dual Decomposition**: Exploits separability in the constraint structure
11//! - **Block Decomposition**: Divides variables into blocks for coordinate descent
12//! - **Hierarchical Decomposition**: Multi-level decomposition for very large problems
13
14use scirs2_core::ndarray::{Array1, Array2};
15
16/// Result type for decomposition operations
17pub type DecompositionResult<T> = Result<T, DecompositionError>;
18
19/// Errors that can occur during constraint decomposition
20#[derive(Debug, Clone)]
21pub enum DecompositionError {
22    /// The problem structure is not compatible with the decomposition method
23    IncompatibleStructure(String),
24    /// Convergence failed after maximum iterations
25    ConvergenceFailed { iterations: usize, residual: f32 },
26    /// Invalid block structure
27    InvalidBlocks(String),
28    /// Numerical issues during decomposition
29    NumericalIssue(String),
30}
31
32impl std::fmt::Display for DecompositionError {
33    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
34        match self {
35            Self::IncompatibleStructure(msg) => write!(f, "Incompatible structure: {}", msg),
36            Self::ConvergenceFailed {
37                iterations,
38                residual,
39            } => {
40                write!(
41                    f,
42                    "Failed to converge after {} iterations (residual: {})",
43                    iterations, residual
44                )
45            }
46            Self::InvalidBlocks(msg) => write!(f, "Invalid blocks: {}", msg),
47            Self::NumericalIssue(msg) => write!(f, "Numerical issue: {}", msg),
48        }
49    }
50}
51
52impl std::error::Error for DecompositionError {}
53
54/// Block specification for block-wise decomposition
55#[derive(Debug, Clone)]
56pub struct Block {
57    /// Name of this block
58    pub name: String,
59    /// Indices of variables in this block
60    pub indices: Vec<usize>,
61}
62
63impl Block {
64    /// Create a new block
65    pub fn new(name: impl Into<String>, indices: Vec<usize>) -> Self {
66        Self {
67            name: name.into(),
68            indices,
69        }
70    }
71
72    /// Number of variables in this block
73    pub fn size(&self) -> usize {
74        self.indices.len()
75    }
76}
77
78/// Configuration for ADMM-based consensus optimization
79#[derive(Debug, Clone)]
80pub struct ADMMConfig {
81    /// Penalty parameter ρ for augmented Lagrangian
82    pub rho: f32,
83    /// Maximum number of iterations
84    pub max_iterations: usize,
85    /// Convergence tolerance for primal residual
86    pub primal_tol: f32,
87    /// Convergence tolerance for dual residual
88    pub dual_tol: f32,
89    /// Whether to use adaptive ρ
90    pub adaptive_rho: bool,
91    /// Scaling factor for adaptive ρ updates
92    pub rho_update_factor: f32,
93}
94
95impl Default for ADMMConfig {
96    fn default() -> Self {
97        Self {
98            rho: 1.0,
99            max_iterations: 1000,
100            primal_tol: 1e-4,
101            dual_tol: 1e-4,
102            adaptive_rho: true,
103            rho_update_factor: 2.0,
104        }
105    }
106}
107
108/// Consensus ADMM solver for decomposed optimization
109///
110/// Solves problems of the form:
111/// ```text
112/// minimize   Σᵢ fᵢ(xᵢ)
113/// subject to xᵢ = z  for all i
114///            z ∈ C
115/// ```
116///
117/// where each fᵢ is a separable objective and C is a constraint set.
118pub struct ConsensusADMM {
119    config: ADMMConfig,
120    num_blocks: usize,
121    dimension: usize,
122    /// Local variables xᵢ for each block
123    local_vars: Vec<Array1<f32>>,
124    /// Global consensus variable z
125    global_var: Array1<f32>,
126    /// Dual variables (scaled Lagrange multipliers)
127    dual_vars: Vec<Array1<f32>>,
128}
129
130impl ConsensusADMM {
131    /// Create a new consensus ADMM solver
132    pub fn new(num_blocks: usize, dimension: usize, config: ADMMConfig) -> Self {
133        let local_vars = vec![Array1::zeros(dimension); num_blocks];
134        let global_var = Array1::zeros(dimension);
135        let dual_vars = vec![Array1::zeros(dimension); num_blocks];
136
137        Self {
138            config,
139            num_blocks,
140            dimension,
141            local_vars,
142            global_var,
143            dual_vars,
144        }
145    }
146
147    /// Initialize from a starting point
148    pub fn initialize(&mut self, x0: &Array1<f32>) {
149        assert_eq!(x0.len(), self.dimension, "Initial point dimension mismatch");
150        self.global_var = x0.clone();
151        for i in 0..self.num_blocks {
152            self.local_vars[i] = x0.clone();
153        }
154    }
155
156    /// Perform one ADMM iteration with custom local updates
157    ///
158    /// The `local_update_fn` should solve:
159    /// ```text
160    /// xᵢ := argmin fᵢ(xᵢ) + (ρ/2)‖xᵢ - z + uᵢ‖²
161    /// ```
162    pub fn iterate<F>(&mut self, local_update_fn: F) -> (f32, f32)
163    where
164        F: Fn(usize, &Array1<f32>, &Array1<f32>, f32) -> Array1<f32>,
165    {
166        // Step 1: Update local variables xᵢ (can be done in parallel)
167        for i in 0..self.num_blocks {
168            let z_minus_u = &self.global_var - &self.dual_vars[i];
169            self.local_vars[i] =
170                local_update_fn(i, &self.local_vars[i], &z_minus_u, self.config.rho);
171        }
172
173        // Step 2: Update global variable z (averaging + projection)
174        let mut z_new = Array1::zeros(self.dimension);
175        for i in 0..self.num_blocks {
176            z_new += &(&self.local_vars[i] + &self.dual_vars[i]);
177        }
178        z_new /= self.num_blocks as f32;
179
180        // Step 3: Update dual variables uᵢ
181        let mut primal_residual = 0.0f32;
182        let mut dual_residual = 0.0f32;
183
184        let z_diff = &z_new - &self.global_var;
185        for i in 0..self.num_blocks {
186            let xi_minus_z = &self.local_vars[i] - &z_new;
187            self.dual_vars[i] = &self.dual_vars[i] + &xi_minus_z;
188
189            // Compute residuals
190            primal_residual += xi_minus_z.iter().map(|&x| x * x).sum::<f32>();
191            dual_residual += z_diff.iter().map(|&x| x * x).sum::<f32>();
192        }
193
194        self.global_var = z_new;
195
196        (
197            primal_residual.sqrt(),
198            dual_residual.sqrt() * self.config.rho,
199        )
200    }
201
202    /// Get the current consensus solution
203    pub fn solution(&self) -> &Array1<f32> {
204        &self.global_var
205    }
206
207    /// Get local variable for a specific block
208    pub fn local_solution(&self, block_id: usize) -> Option<&Array1<f32>> {
209        self.local_vars.get(block_id)
210    }
211
212    /// Check convergence based on residuals
213    pub fn has_converged(&self, primal_res: f32, dual_res: f32) -> bool {
214        primal_res < self.config.primal_tol && dual_res < self.config.dual_tol
215    }
216
217    /// Update ρ adaptively based on residuals
218    pub fn update_rho(&mut self, primal_res: f32, dual_res: f32) {
219        if !self.config.adaptive_rho {
220            return;
221        }
222
223        if primal_res > 10.0 * dual_res {
224            self.config.rho *= self.config.rho_update_factor;
225        } else if dual_res > 10.0 * primal_res {
226            self.config.rho /= self.config.rho_update_factor;
227        }
228    }
229}
230
231/// Block coordinate descent for structured constraints
232///
233/// Solves optimization problems by iteratively optimizing over blocks of variables
234/// while keeping other blocks fixed.
235pub struct BlockCoordinateDescent {
236    blocks: Vec<Block>,
237    dimension: usize,
238    current_solution: Array1<f32>,
239    max_iterations: usize,
240    tolerance: f32,
241}
242
243impl BlockCoordinateDescent {
244    /// Create a new block coordinate descent solver
245    pub fn new(blocks: Vec<Block>, dimension: usize) -> DecompositionResult<Self> {
246        // Validate blocks
247        let mut covered = vec![false; dimension];
248        for block in &blocks {
249            for &idx in &block.indices {
250                if idx >= dimension {
251                    return Err(DecompositionError::InvalidBlocks(format!(
252                        "Index {} exceeds dimension {}",
253                        idx, dimension
254                    )));
255                }
256                if covered[idx] {
257                    return Err(DecompositionError::InvalidBlocks(format!(
258                        "Index {} appears in multiple blocks",
259                        idx
260                    )));
261                }
262                covered[idx] = true;
263            }
264        }
265
266        Ok(Self {
267            blocks,
268            dimension,
269            current_solution: Array1::zeros(dimension),
270            max_iterations: 1000,
271            tolerance: 1e-4,
272        })
273    }
274
275    /// Set maximum iterations
276    pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
277        self.max_iterations = max_iterations;
278        self
279    }
280
281    /// Set convergence tolerance
282    pub fn with_tolerance(mut self, tolerance: f32) -> Self {
283        self.tolerance = tolerance;
284        self
285    }
286
287    /// Initialize from a starting point
288    pub fn initialize(&mut self, x0: &Array1<f32>) {
289        assert_eq!(x0.len(), self.dimension);
290        self.current_solution = x0.clone();
291    }
292
293    /// Perform one block update
294    ///
295    /// The `block_update_fn` should optimize the objective over the specified block
296    /// while keeping other variables fixed.
297    pub fn update_block<F>(&mut self, block_id: usize, block_update_fn: F) -> f32
298    where
299        F: Fn(&Array1<f32>, &[usize]) -> Array1<f32>,
300    {
301        let block = &self.blocks[block_id];
302        let block_solution = block_update_fn(&self.current_solution, &block.indices);
303
304        assert_eq!(
305            block_solution.len(),
306            block.indices.len(),
307            "Block solution size mismatch"
308        );
309
310        // Update the solution for this block
311        let mut change = 0.0f32;
312        for (i, &idx) in block.indices.iter().enumerate() {
313            let old_val = self.current_solution[idx];
314            let new_val = block_solution[i];
315            self.current_solution[idx] = new_val;
316            change += (new_val - old_val).powi(2);
317        }
318
319        change.sqrt()
320    }
321
322    /// Get current solution
323    pub fn solution(&self) -> &Array1<f32> {
324        &self.current_solution
325    }
326
327    /// Get number of blocks
328    pub fn num_blocks(&self) -> usize {
329        self.blocks.len()
330    }
331
332    /// Get block information
333    pub fn block(&self, block_id: usize) -> Option<&Block> {
334        self.blocks.get(block_id)
335    }
336}
337
338/// Dual decomposition for separable constraints
339///
340/// Exploits problem structure where constraints can be decomposed into
341/// independent subproblems coordinated through dual variables.
342pub struct DualDecomposition {
343    num_subproblems: usize,
344    coupling_matrix: Array2<f32>,
345    dual_vars: Array1<f32>,
346    step_size: f32,
347    max_iterations: usize,
348}
349
350impl DualDecomposition {
351    /// Create a new dual decomposition solver
352    ///
353    /// # Arguments
354    /// * `num_subproblems` - Number of decomposed subproblems
355    /// * `coupling_matrix` - Matrix describing how subproblems are coupled
356    pub fn new(num_subproblems: usize, coupling_matrix: Array2<f32>) -> Self {
357        let num_coupling = coupling_matrix.nrows();
358        Self {
359            num_subproblems,
360            coupling_matrix,
361            dual_vars: Array1::zeros(num_coupling),
362            step_size: 0.1,
363            max_iterations: 1000,
364        }
365    }
366
367    /// Set the dual step size
368    pub fn with_step_size(mut self, step_size: f32) -> Self {
369        self.step_size = step_size;
370        self
371    }
372
373    /// Set maximum iterations
374    pub fn with_max_iterations(mut self, max_iterations: usize) -> Self {
375        self.max_iterations = max_iterations;
376        self
377    }
378
379    /// Update dual variables using subgradient method
380    pub fn update_duals(&mut self, constraint_violations: &Array1<f32>) {
381        assert_eq!(constraint_violations.len(), self.dual_vars.len());
382
383        // Dual ascent: λ := λ + α·g(x) where g(x) is the constraint violation
384        for i in 0..self.dual_vars.len() {
385            self.dual_vars[i] =
386                (self.dual_vars[i] + self.step_size * constraint_violations[i]).max(0.0);
387        }
388    }
389
390    /// Get current dual variables
391    pub fn dual_variables(&self) -> &Array1<f32> {
392        &self.dual_vars
393    }
394
395    /// Compute augmented cost for subproblem i
396    pub fn augmented_cost(
397        &self,
398        subproblem_id: usize,
399        base_cost: f32,
400        local_vars: &Array1<f32>,
401    ) -> f32 {
402        assert!(subproblem_id < self.num_subproblems);
403
404        // Add dual contribution: f(x) + λᵀAx
405        let mut augmented = base_cost;
406        for (i, &dual) in self.dual_vars.iter().enumerate() {
407            let coupling_coeff = self.coupling_matrix[[i, subproblem_id]];
408            if let Some(&var_val) = local_vars.get(0) {
409                augmented += dual * coupling_coeff * var_val;
410            }
411        }
412
413        augmented
414    }
415}
416
417/// Hierarchical decomposition for very large-scale problems
418///
419/// Organizes constraints into a hierarchy where higher-level constraints
420/// coordinate lower-level subproblems.
421pub struct HierarchicalDecomposition {
422    levels: Vec<DecompositionLevel>,
423}
424
425/// A single level in the hierarchical decomposition
426#[derive(Clone)]
427pub struct DecompositionLevel {
428    /// Name of this level
429    pub name: String,
430    /// Subproblems at this level
431    pub subproblems: Vec<Subproblem>,
432}
433
434/// A subproblem in hierarchical decomposition
435#[derive(Clone)]
436pub struct Subproblem {
437    /// Unique identifier
438    pub id: usize,
439    /// Variable indices controlled by this subproblem
440    pub variables: Vec<usize>,
441    /// Child subproblems (at next level down)
442    pub children: Vec<usize>,
443}
444
445impl HierarchicalDecomposition {
446    /// Create a new hierarchical decomposition
447    pub fn new() -> Self {
448        Self { levels: Vec::new() }
449    }
450
451    /// Add a level to the hierarchy
452    pub fn add_level(&mut self, level: DecompositionLevel) {
453        self.levels.push(level);
454    }
455
456    /// Get number of levels
457    pub fn num_levels(&self) -> usize {
458        self.levels.len()
459    }
460
461    /// Get level by index
462    pub fn level(&self, level_id: usize) -> Option<&DecompositionLevel> {
463        self.levels.get(level_id)
464    }
465
466    /// Create a two-level decomposition from blocks
467    pub fn from_blocks(
468        coarse_blocks: Vec<Block>,
469        fine_blocks: Vec<Block>,
470    ) -> DecompositionResult<Self> {
471        let mut decomp = Self::new();
472
473        // Create fine level (bottom)
474        let fine_subproblems: Vec<Subproblem> = fine_blocks
475            .into_iter()
476            .enumerate()
477            .map(|(id, block)| Subproblem {
478                id,
479                variables: block.indices,
480                children: Vec::new(),
481            })
482            .collect();
483
484        let fine_level = DecompositionLevel {
485            name: "fine".to_string(),
486            subproblems: fine_subproblems,
487        };
488
489        // Create coarse level (top)
490        let coarse_subproblems: Vec<Subproblem> = coarse_blocks
491            .into_iter()
492            .enumerate()
493            .map(|(id, block)| Subproblem {
494                id,
495                variables: block.indices,
496                children: Vec::new(), // Could link to fine subproblems
497            })
498            .collect();
499
500        let coarse_level = DecompositionLevel {
501            name: "coarse".to_string(),
502            subproblems: coarse_subproblems,
503        };
504
505        decomp.add_level(fine_level);
506        decomp.add_level(coarse_level);
507
508        Ok(decomp)
509    }
510}
511
512impl Default for HierarchicalDecomposition {
513    fn default() -> Self {
514        Self::new()
515    }
516}
517
518/// Utility functions for creating block structures
519pub mod block_utils {
520    use super::*;
521
522    /// Create uniform blocks of equal size
523    pub fn uniform_blocks(dimension: usize, block_size: usize) -> Vec<Block> {
524        let num_blocks = dimension.div_ceil(block_size);
525        let mut blocks = Vec::new();
526
527        for i in 0..num_blocks {
528            let start = i * block_size;
529            let end = (start + block_size).min(dimension);
530            let indices: Vec<usize> = (start..end).collect();
531            blocks.push(Block::new(format!("block_{}", i), indices));
532        }
533
534        blocks
535    }
536
537    /// Create blocks from explicit index groups
538    pub fn from_index_groups(groups: Vec<Vec<usize>>) -> Vec<Block> {
539        groups
540            .into_iter()
541            .enumerate()
542            .map(|(i, indices)| Block::new(format!("block_{}", i), indices))
543            .collect()
544    }
545
546    /// Create overlapping blocks with specified overlap
547    pub fn overlapping_blocks(dimension: usize, block_size: usize, overlap: usize) -> Vec<Block> {
548        assert!(overlap < block_size, "Overlap must be less than block size");
549
550        let stride = block_size - overlap;
551        let mut blocks = Vec::new();
552        let mut start = 0;
553
554        while start < dimension {
555            let end = (start + block_size).min(dimension);
556            let indices: Vec<usize> = (start..end).collect();
557            blocks.push(Block::new(format!("block_{}", blocks.len()), indices));
558            start += stride;
559        }
560
561        blocks
562    }
563}
564
565// ============================================================================
566// Benders Decomposition
567// ============================================================================
568
569/// Benders Decomposition for mixed-integer programming
570///
571/// Decomposes problems of the form:
572/// ```text
573/// min c'x + d'y
574/// s.t. Ax + By >= b
575///      x binary/integer, y continuous
576/// ```
577///
578/// Into:
579/// - Master problem: involves only x (integer variables)
580/// - Subproblem: involves only y given fixed x
581///
582/// # Algorithm
583///
584/// 1. Solve relaxed master problem → get x*
585/// 2. Solve subproblem with fixed x* → get dual variables
586/// 3. Add Benders cut to master based on dual values
587/// 4. Repeat until convergence
588pub struct BendersDecomposition {
589    /// Number of master variables (integer/binary)
590    num_master_vars: usize,
591    /// Number of subproblem variables (continuous)
592    num_sub_vars: usize,
593    /// Benders cuts added so far
594    cuts: Vec<BendersCut>,
595    /// Configuration
596    config: BendersConfig,
597    /// Current lower bound
598    lower_bound: f32,
599    /// Current upper bound
600    upper_bound: f32,
601}
602
603/// Configuration for Benders decomposition
604#[derive(Debug, Clone)]
605pub struct BendersConfig {
606    /// Maximum number of iterations
607    pub max_iterations: usize,
608    /// Convergence tolerance (gap between bounds)
609    pub tolerance: f32,
610    /// Maximum number of cuts to keep
611    pub max_cuts: usize,
612    /// Whether to use multi-cut strategy
613    pub multi_cut: bool,
614}
615
616impl Default for BendersConfig {
617    fn default() -> Self {
618        Self {
619            max_iterations: 100,
620            tolerance: 1e-4,
621            max_cuts: 1000,
622            multi_cut: false,
623        }
624    }
625}
626
627/// Benders cut: either optimality or feasibility
628#[derive(Debug, Clone)]
629pub enum BendersCut {
630    /// Optimality cut: θ >= f(x) + π'(b - Ax)
631    /// where π are dual variables from subproblem
632    Optimality {
633        /// Dual variables (multipliers)
634        dual: Array1<f32>,
635        /// Constant term
636        constant: f32,
637        /// Coefficient matrix for master variables
638        coefficients: Array1<f32>,
639    },
640    /// Feasibility cut: ensures subproblem is feasible
641    /// π'(b - Ax) >= 0 where π are extreme rays
642    Feasibility {
643        /// Extreme ray
644        ray: Array1<f32>,
645        /// Constant term
646        constant: f32,
647        /// Coefficient matrix
648        coefficients: Array1<f32>,
649    },
650}
651
652impl BendersDecomposition {
653    /// Create a new Benders decomposition
654    pub fn new(num_master_vars: usize, num_sub_vars: usize) -> Self {
655        Self {
656            num_master_vars,
657            num_sub_vars,
658            cuts: Vec::new(),
659            config: BendersConfig::default(),
660            lower_bound: f32::NEG_INFINITY,
661            upper_bound: f32::INFINITY,
662        }
663    }
664
665    /// Set configuration
666    pub fn with_config(mut self, config: BendersConfig) -> Self {
667        self.config = config;
668        self
669    }
670
671    /// Add an optimality cut
672    pub fn add_optimality_cut(
673        &mut self,
674        dual: Array1<f32>,
675        constant: f32,
676        coefficients: Array1<f32>,
677    ) {
678        if self.cuts.len() >= self.config.max_cuts {
679            // Remove oldest cut if at limit
680            self.cuts.remove(0);
681        }
682
683        self.cuts.push(BendersCut::Optimality {
684            dual,
685            constant,
686            coefficients,
687        });
688    }
689
690    /// Add a feasibility cut
691    pub fn add_feasibility_cut(
692        &mut self,
693        ray: Array1<f32>,
694        constant: f32,
695        coefficients: Array1<f32>,
696    ) {
697        if self.cuts.len() >= self.config.max_cuts {
698            self.cuts.remove(0);
699        }
700
701        self.cuts.push(BendersCut::Feasibility {
702            ray,
703            constant,
704            coefficients,
705        });
706    }
707
708    /// Solve master problem (to be implemented with external solver)
709    ///
710    /// Returns the master solution and lower bound
711    pub fn solve_master(&self, objective: &Array1<f32>) -> DecompositionResult<(Array1<f32>, f32)> {
712        // Simplified master problem: minimize c'x + θ
713        // subject to Benders cuts
714        //
715        // In practice, this would call an MILP solver
716        // For now, we return a simple relaxation
717
718        if self.num_master_vars == 0 {
719            return Err(DecompositionError::IncompatibleStructure(
720                "No master variables".to_string(),
721            ));
722        }
723
724        // Simple heuristic: round to nearest feasible integer
725        let mut x = Array1::zeros(self.num_master_vars);
726
727        // Estimate lower bound from cuts
728        let mut theta = f32::NEG_INFINITY;
729        for cut in &self.cuts {
730            match cut {
731                BendersCut::Optimality {
732                    constant,
733                    coefficients,
734                    ..
735                } => {
736                    let cut_value = constant + coefficients.dot(&x);
737                    theta = theta.max(cut_value);
738                }
739                BendersCut::Feasibility {
740                    constant,
741                    coefficients,
742                    ..
743                } => {
744                    // Ensure feasibility cut is satisfied
745                    let cut_value = constant + coefficients.dot(&x);
746                    if cut_value < 0.0 {
747                        // Adjust x to satisfy cut (simplified)
748                        x = x.mapv(|v| v + 0.1);
749                    }
750                }
751            }
752        }
753
754        let lower_bound = objective.dot(&x) + theta;
755        Ok((x, lower_bound))
756    }
757
758    /// Solve subproblem given master solution
759    ///
760    /// Returns (objective value, dual variables, feasible flag)
761    pub fn solve_subproblem(
762        &self,
763        master_solution: &Array1<f32>,
764        sub_objective: &Array1<f32>,
765        coupling_matrix: &Array2<f32>,
766        rhs: &Array1<f32>,
767    ) -> DecompositionResult<(f32, Array1<f32>, bool)> {
768        // Solve: min d'y
769        //        s.t. By >= b - Ax*
770        //
771        // where x* is the fixed master solution
772        // Returns (objective, dual variables, is_feasible)
773
774        let (n_constraints, n_vars) = coupling_matrix.dim();
775
776        if n_vars != self.num_sub_vars {
777            return Err(DecompositionError::IncompatibleStructure(
778                "Subproblem dimension mismatch".to_string(),
779            ));
780        }
781
782        // Compute adjusted RHS: b - Ax*
783        let _adjusted_rhs = if !master_solution.is_empty() {
784            // For simplification, assume we have the master coupling part
785            rhs.clone()
786        } else {
787            rhs.clone()
788        };
789
790        // Simplified LP solve (in practice, use OSQP or other LP solver)
791        // For now, return a feasible solution with dual values
792
793        let y = Array1::from_elem(n_vars, 0.5);
794        let dual = Array1::from_elem(n_constraints, 1.0);
795        let obj_value = sub_objective.dot(&y);
796
797        Ok((obj_value, dual, true))
798    }
799
800    /// Main Benders iteration
801    pub fn iterate(
802        &mut self,
803        master_obj: &Array1<f32>,
804        sub_obj: &Array1<f32>,
805        coupling: &Array2<f32>,
806        rhs: &Array1<f32>,
807    ) -> DecompositionResult<BendersIterationResult> {
808        let mut iteration = 0;
809
810        loop {
811            iteration += 1;
812
813            if iteration > self.config.max_iterations {
814                return Err(DecompositionError::ConvergenceFailed {
815                    iterations: iteration,
816                    residual: self.upper_bound - self.lower_bound,
817                });
818            }
819
820            // Step 1: Solve master problem
821            let (master_sol, lower_bound) = self.solve_master(master_obj)?;
822            self.lower_bound = lower_bound;
823
824            // Step 2: Solve subproblem
825            let (sub_obj_value, dual, is_feasible) =
826                self.solve_subproblem(&master_sol, sub_obj, coupling, rhs)?;
827
828            // Step 3: Update upper bound
829            let master_obj_value = master_obj.dot(&master_sol);
830            let total_obj = master_obj_value + sub_obj_value;
831            self.upper_bound = self.upper_bound.min(total_obj);
832
833            // Step 4: Check convergence
834            let gap = self.upper_bound - self.lower_bound;
835            if gap < self.config.tolerance {
836                return Ok(BendersIterationResult {
837                    master_solution: master_sol,
838                    sub_solution: Array1::zeros(self.num_sub_vars), // Would be from subproblem
839                    lower_bound: self.lower_bound,
840                    upper_bound: self.upper_bound,
841                    iterations: iteration,
842                    converged: true,
843                });
844            }
845
846            // Step 5: Generate and add cut
847            if !is_feasible {
848                // Add feasibility cut (using extreme ray)
849                let coefficients = Array1::zeros(self.num_master_vars);
850                self.add_feasibility_cut(dual.clone(), 0.0, coefficients);
851            } else {
852                // Add optimality cut
853                let coefficients = Array1::zeros(self.num_master_vars);
854                let constant = sub_obj_value;
855                self.add_optimality_cut(dual, constant, coefficients);
856            }
857        }
858    }
859
860    /// Get current bounds
861    pub fn bounds(&self) -> (f32, f32) {
862        (self.lower_bound, self.upper_bound)
863    }
864
865    /// Number of cuts generated
866    pub fn num_cuts(&self) -> usize {
867        self.cuts.len()
868    }
869
870    /// Get all cuts
871    pub fn cuts(&self) -> &[BendersCut] {
872        &self.cuts
873    }
874
875    /// Reset the decomposition
876    pub fn reset(&mut self) {
877        self.cuts.clear();
878        self.lower_bound = f32::NEG_INFINITY;
879        self.upper_bound = f32::INFINITY;
880    }
881}
882
883/// Result of Benders iteration
884#[derive(Debug, Clone)]
885pub struct BendersIterationResult {
886    /// Master problem solution
887    pub master_solution: Array1<f32>,
888    /// Subproblem solution
889    pub sub_solution: Array1<f32>,
890    /// Lower bound on optimal value
891    pub lower_bound: f32,
892    /// Upper bound on optimal value
893    pub upper_bound: f32,
894    /// Number of iterations
895    pub iterations: usize,
896    /// Whether algorithm converged
897    pub converged: bool,
898}
899
900impl BendersIterationResult {
901    /// Get optimality gap
902    pub fn gap(&self) -> f32 {
903        self.upper_bound - self.lower_bound
904    }
905
906    /// Get relative gap
907    pub fn relative_gap(&self) -> f32 {
908        if self.upper_bound.abs() < 1e-10 {
909            self.gap()
910        } else {
911            self.gap() / self.upper_bound.abs()
912        }
913    }
914}
915
916// ============================================================================
917// Tests
918// ============================================================================
919
920#[cfg(test)]
921mod tests {
922    use super::*;
923
924    #[test]
925    fn test_benders_decomposition() {
926        let mut benders = BendersDecomposition::new(3, 2);
927        assert_eq!(benders.num_cuts(), 0);
928
929        // Add an optimality cut
930        let dual = Array1::from_vec(vec![1.0, 2.0]);
931        let coeffs = Array1::from_vec(vec![0.5, 0.3, 0.1]);
932        benders.add_optimality_cut(dual, 1.5, coeffs);
933
934        assert_eq!(benders.num_cuts(), 1);
935
936        let (lb, ub) = benders.bounds();
937        assert_eq!(lb, f32::NEG_INFINITY);
938        assert_eq!(ub, f32::INFINITY);
939    }
940
941    #[test]
942    fn test_benders_config() {
943        let config = BendersConfig {
944            max_iterations: 50,
945            tolerance: 1e-5,
946            max_cuts: 500,
947            multi_cut: true,
948        };
949
950        let benders = BendersDecomposition::new(2, 3).with_config(config);
951        assert_eq!(benders.config.max_iterations, 50);
952    }
953
954    #[test]
955    fn test_consensus_admm_initialization() {
956        let config = ADMMConfig::default();
957        let mut admm = ConsensusADMM::new(3, 5, config);
958
959        let x0 = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
960        admm.initialize(&x0);
961
962        assert_eq!(admm.solution(), &x0);
963    }
964
965    #[test]
966    fn test_block_coordinate_descent() {
967        let blocks = vec![
968            Block::new("b1", vec![0, 1]),
969            Block::new("b2", vec![2, 3]),
970            Block::new("b3", vec![4]),
971        ];
972
973        let bcd = BlockCoordinateDescent::new(blocks, 5).unwrap();
974        assert_eq!(bcd.num_blocks(), 3);
975        assert_eq!(bcd.block(0).unwrap().size(), 2);
976    }
977
978    #[test]
979    fn test_invalid_blocks() {
980        // Block with index out of bounds
981        let blocks = vec![Block::new("b1", vec![0, 1, 10])];
982        let result = BlockCoordinateDescent::new(blocks, 5);
983        assert!(result.is_err());
984    }
985
986    #[test]
987    fn test_uniform_blocks() {
988        let blocks = block_utils::uniform_blocks(10, 3);
989        assert_eq!(blocks.len(), 4); // ceil(10/3) = 4 blocks
990        assert_eq!(blocks[0].size(), 3);
991        assert_eq!(blocks[3].size(), 1); // Last block has only 1 element
992    }
993
994    #[test]
995    fn test_overlapping_blocks() {
996        let blocks = block_utils::overlapping_blocks(10, 4, 1);
997        assert!(blocks.len() > 3);
998
999        // Check first two blocks overlap
1000        let b0_last = blocks[0].indices.last().unwrap();
1001        let b1_first = blocks[1].indices.first().unwrap();
1002        assert_eq!(b0_last, b1_first);
1003    }
1004
1005    #[test]
1006    fn test_dual_decomposition() {
1007        let coupling = Array2::from_shape_vec((2, 3), vec![1.0, 0.5, 0.0, 0.0, 0.5, 1.0]).unwrap();
1008        let dual_decomp = DualDecomposition::new(3, coupling);
1009
1010        assert_eq!(dual_decomp.dual_variables().len(), 2);
1011    }
1012
1013    #[test]
1014    fn test_hierarchical_decomposition() {
1015        let mut hier = HierarchicalDecomposition::new();
1016        assert_eq!(hier.num_levels(), 0);
1017
1018        let level = DecompositionLevel {
1019            name: "test".to_string(),
1020            subproblems: vec![],
1021        };
1022        hier.add_level(level);
1023        assert_eq!(hier.num_levels(), 1);
1024    }
1025}